content
stringlengths
7
2.61M
package client import ( "fmt" "testing" ) func TestClient_PinFile(t *testing.T) { cIdStr := "QmS2gWrAzp42swfjweNQgzHL4dC1UVR6a2rokPJxMfgPfM" why, isPinned, err := ipfsClient.IsPinned(cIdStr) if err != nil { panic(err) } if isPinned { panic("already pinned") } err = ipfsClient.PinCID(cIdStr) if err != nil { panic(err) } why, isPinned, err = ipfsClient.IsPinned(cIdStr) if err != nil { panic(err) } if !isPinned { panic("should pin file") } fmt.Println("File has been pinned with reason:", why) } func TestClient_UnPinFile(t *testing.T) { cIdStr := "QmS2gWrAzp42swfjweNQgzHL4dC1UVR6a2rokPJxMfgPfM" _, isPinned, err := ipfsClient.IsPinned(cIdStr) if err != nil { panic(err) } if !isPinned { err = ipfsClient.PinCID(cIdStr) if err != nil { panic(err) } } _, isPinned, err = ipfsClient.IsPinned(cIdStr) if err != nil { panic(err) } if !isPinned { panic("should pin file") } err = ipfsClient.UnPinCID(cIdStr) if err != nil { panic(err) } _, isPinned, err = ipfsClient.IsPinned(cIdStr) if err != nil { panic(err) } if isPinned { panic("should un-pin file") } fmt.Println("Success!!") } func TestClient_IsPinned(t *testing.T) { cIdStr := "<KEY>" why, isPinned, err := ipfsClient.IsPinned(cIdStr) if err != nil { panic(err) } fmt.Println(why, isPinned, err) }
package main import ( "fmt" ) func main() { fmt.Println("The answer to Part One is: ") }
<gh_stars>1-10 import { DestroyedService, NGT_OBJECT_3D_CONTROLLER_PROVIDER, } from '@angular-three/core'; import { ChangeDetectorRef, InjectionToken, Optional, Provider, } from '@angular/core'; import { takeUntil } from 'rxjs'; import { NgtSobaLineController } from './line.controller'; export const NGT_SOBA_LINE_WATCHED_CONTROLLER = new InjectionToken( 'Watched Line Controller' ); export const NGT_SOBA_LINE_CONTROLLER_PROVIDER: Provider[] = [ NGT_OBJECT_3D_CONTROLLER_PROVIDER, DestroyedService, { provide: NGT_SOBA_LINE_WATCHED_CONTROLLER, deps: [ [new Optional(), NgtSobaLineController], ChangeDetectorRef, DestroyedService, ], useFactory: sobaLineWatchedControllerFactory, }, ]; export function sobaLineWatchedControllerFactory( controller: NgtSobaLineController | null, cdr: ChangeDetectorRef, destroyed: DestroyedService ) { if (!controller) return null; controller.change$.pipe(takeUntil(destroyed)).subscribe(() => { cdr.markForCheck(); }); return controller; }
Burlington, New Jersey History The council of West Jersey Proprietors purchased roughly 30 miles (48 km) of riverfront land in 1676 from the Lenape Native Americans. Burlington was founded on part of that land by English settlers (primarily Quakers) in 1677. It served as the capital of the province until 1702, when West Jersey and East Jersey were combined into a single Crown Colony. Burlington takes its name (including the county name) from the English east-coast town of Bridlington, of which Burlington was a district. It is now amalgamated into the larger Bridlington town. The Quakers formally established their congregation in 1678. Initially, they met in private homes; between 1683 and 1687, Francis Collings constructed a hexagonal meeting house of brick. Over the next century, the membership grew substantially and a larger building was needed. The present meeting house on High Street was built in 1783 in front of the old meeting house and cemetery. The cemetery predated the first building. A tablet commemorates that the Lenape chief King Ockanickon, a loyal friend of the English settlers, was buried here in 1681. The oldest gravestone is inscribed "D.B. 1726." Many notable Quakers are buried here. One of the oldest buildings in Burlington is known as the Revell House. Originally built in 1685 for George Hutchinson, it stood on East Pearl Street. The property was purchased by Thomas Revell, one of the original Anglo-European settlers. Local tradition associates this house with the young Benjamin Franklin, who received gingerbread from the household while traveling from Boston to Philadelphia. In the early 20th century, the house was purchased by the Annis Stockton Chapter of the DAR for use as their clubhouse. The Colonial Burlington Foundation acquired and restored it in the 1950s. 18th century Many institutions established in the 18th century continue to function in the 21st century. After the Quakers, the second oldest religious congregation in Burlington were the Anglicans (later known as Episcopalians). Their original church, Old St. Mary's, is the oldest church in Burlington and New Jersey. The congregation was founded in 1702 by George Keith and John Talbot. Talbot became the first minister and laid the cornerstone for the church in 1703. He served as the church's rector until 1725. The congregation prospered, and the church became the see of the Anglican bishops of New Jersey. After the Revolution, the Episcopal Church in the United States was established. In 1846, under the leadership of Bishop and Rector George Washington Doane, construction was begun on New St. Mary's. This early Gothic Revival architecture church was designed by Richard Upjohn, who also designed Trinity Church at the foot of Wall Street in Lower Manhattan. In the late 20th century, this building was designated as a National Historic Landmark (NHL). Bishop Doane founded an Episcopal girls' boarding school, St. Mary's, in Burlington in 1838, at a time when interest in girls' education led to development of schools for them in many areas. Girls from families up and down the East Coast came to study there, from as far as New England, Virginia, and upstate New York. St. Mary's provided a classical education, as well as classes in arts and music. The Library Company of Burlington was organized in 1757 as a "free" library open to the public as well as members. There were 60 members of the original Library Company, each paying ten shillings per year to support the institution. The Library received a Charter from King George II of Great Britain in 1758. The Library's books were kept in members' homes for a few years: Thomas Rodman's at 446 South High Street and, after 1767, Robert Smith's at 218 High Street. In 1789 the Library moved to its own building. In the early 21st century, the Library is housed in a stone building that was built on West Union Street in 1864. The Burlington Library is the oldest continuously operating library in New Jersey and the nation's seventh oldest. The Endeavor Fire Company was organized in 1795. It was one of the four companies in the Burlington Fire Department when it was organized almost a century later. Endeavor was the first permanent firefighting organization in Burlington and remains one of the oldest fire companies under its original name in the state. By 1882, the company had relocated to its present building, which was erected in 1852 as a Market House. Burlington has been the home of many notable people including John Lawrence, a politician and his son, Captain James Lawrence. The elder Lawrence served in the State Assembly, as Mayor of Burlington, New Jersey in 1769, and as a member of the Provincial Council from 1771 to 1775. He was suspected of being loyal to the British during the Revolution, which ended his career. His son was born on October 1, 1781, and became a legend during the War of 1812 with the command "Don't Give Up the Ship." Lawyer and writer, James Fenimore Cooper, who wrote The Last of the Mohicans, was also from Burlington. His father was a merchant there before buying land and developing Cooperstown, New York after the Revolution. 19th century As education for girls and young women became emphasized in the 19th century, Bishop George Washington Doane founded St. Mary's Hall in 1837 in association with the Episcopal diocese as the first Episcopal boarding school offering a classical education for girls and the first such school in New Jersey. In the 20th century, a boys' school was added. It is now known as Doane Academy and is a private, co-educational school for grades from Pre-K through 12th. The building at 301 High Street houses the oldest continuously operating pharmacy in New Jersey. Originally a dwelling, the ground floor was converted to commercial use around 1845 by William Allinson, a druggist, local historian, and leading Quaker abolitionist. He used the building as a center of anti-slavery activity. John Greenleaf Whittier denounced slavery from the doorstep, and local tradition holds that fugitive slaves hid in tunnels under the building in their passage on the Underground Railroad. New Jersey ended slavery, but many fugitives wanted to go further north, beyond the reach of slave catchers. During the 19th century, Burlington City was known for the quality and quantity of its manufacturing. The shoe industry rivaled shipbuilding and canning in prominence. The 1850 United States Census indicates that the largest number of men were employed in the shoe industry, followed closely by carpentry and bricklaying. J. Frank Budd got his start in the shoe business at a Burlington shoe company just after the Civil War. In 1887, J.F. Budd broke ground for a children's "shoeworks" at the corner of Penn and Dilwyn streets. The company employed approximately 325 people and operated six days a week for ten hours a day. The J.F. Budd Baby Shoe Company billed itself as the "largest baby shoe plant in the world." The commercial activity provided revenues for the City's cultural activity. In 1839, a Lyceum was erected as a venue for lectures, concerts, and public meetings. It served in that capacity until 1851, when it was turned over to the city to be used as the City Hall. The municipal offices' move was concurrent with the adoption of a new City charter. The Oneida Boat Club was organized in 1873 by a group of 10 members. It is named for one of the original Five Nations of the Iroquois Confederacy, based in New York. Over the next few years, membership in the club grew rapidly. In 1876, they dedicated their newly built clubhouse on the banks of the Delaware River at York Street. The Oneida is the oldest continuously operating boat club located on the Delaware River. During the 19th century, the City of Burlington developed in a grid pattern from the main crossroads of High and Broad streets. Blocks of attached rowhouses built in the latest architectural style characterize the city as a 19th-century town. Ferries carried traffic across the Delaware River to Pennsylvania before bridges were built. 20th century Burlington's waterfront park along the river was developed as a result of urban renewal and flood control projects in the late 1960s and 1970s. The shoreline improvements—revetments, walkways, etc.—span the city's Delaware riverfront from the Burlington-Bristol Bridge to Assiscunk Creek. The remains of former waterfront industries, ferry terminals, and docks were demolished. Development of an open, grassy park with a tree-lined waterfront esplanade has reconnected city residents to the riverfront for recreation. This also ensures that business properties are not at risk during floods and reduces damages. In this period, the United States federal and state governments began to value their historic assets more highly, and efforts were made to preserve structures that were significant to the layered history of places. In addition to recognition of individual structures, such as the National Historic Landmark St. Mary's Church, the city has two historic districts listed on the National Register of Historic Places, with multiple contributing buildings: the Burlington Historic District includes structures from both the eighteenth and nineteenth centuries. It is adjacent to the city's High Street Historic District. The Oneida Clubhouse narrowly escaped demolition during the urban renewal campaign. It was saved and renovated. As the new esplanade was built on fill that added land between the building and river's edge, it created a landlocked clubhouse for the boat club. Burlington Coat Factory was founded in 1924 as a wholesaler of ladies' coats and outerwear. The modern company was formed in 1972 when Monroe Milstein purchased a warehouse in the outskirts of the city of Burlington. He started selling coats and outerwear there at discount prices. The company gradually added other apparel, including suits, shoes, and accessories, and has branched out to include baby items and linens, all at discount prices. The company's corporate headquarters was moved from the city to Burlington Township in 1988. The Burlington Coat Factory relocated to a new store site in the fall of 2008. Geography According to the United States Census Bureau, the city had a total area of 3.782 square miles (9.793 km²), including 3.063 square miles (7.932 km²) of land and 0.719 square miles (1.861 km²) of water (19.00%). Unincorporated communities, localities and place names located partially or completely within the city include Burlington Island and East Burlington. Burlington borders Burlington Township in Burlington County and both Bristol and Bristol Township across the Delaware River in Pennsylvania. The Burlington-Bristol Bridge crosses the Delaware River, connecting Burlington to Bristol. 2010 Census As of the 2010 United States Census, there were 9,920 people, 3,858 households, and 2,438.256 families residing in the city. The population density was 3,239.1 per square mile (1,250.6/km²). There were 4,223 housing units at an average density of 1,378.9 per square mile (532.4/km²). The racial makeup of the city was 58.92% (5,845) White, 32.98% (3,272) Black or African American, 0.18% (18) Native American, 2.03% (201) Asian, 0.04% (4) Pacific Islander, 2.29% (227) from other races, and 3.56% (353) from two or more races. Hispanic or Latino of any race were 6.50% (645) of the population. There were 3,858 households out of which 27.3% had children under the age of 18 living with them, 37.6% were married couples living together, 20.0% had a female householder with no husband present, and 36.8% were non-families. 30.8% of all households were made up of individuals, and 13.9% had someone living alone who was 65 years of age or older. The average household size was 2.53 and the average family size was 3.18. In the city, the population was spread out with 23.9% under the age of 18, 8.6% from 18 to 24, 25.5% from 25 to 44, 26.4 % from 45 to 64, and 15.7% who were 65 years of age or older. The median age was 38.9 years. For every 100 females there were 87.7 males. For every 100 females ages 18 and older there were 83.8 males. The Census Bureau's 2006–2010 American Community Survey showed that (in 2010 inflation-adjusted dollars) median household income was $48,317 (with a margin of error of +/− $3,334) and the median family income was $62,049 (+/− $6,446). Males had a median income of $43,146 (+/− $7,469) versus $40,929 (+/− $3,562) for females. The per capita income for the borough was $24,612 (+/− $1,541). About 10.6% of families and 11.8% of the population were below the poverty line, including 13.0% of those under age 18 and 7.4% of those age 65 or over. 2000 Census As of the 2000 United States Census there were 9,736 people, 3,898 households, and 2,522 families residing in the city. The population density was 3,245.1 people per square mile (1,253.0/km²). There were 4,181 housing units at an average density of 1,393.6 per square mile (538.1/km²). The racial makeup of the city was 68.18% White, 26.62% African American, 0.27% Native American, 1.28% Asian, 0.01% Pacific Islander, 1.29% from other races, and 2.34% from two or more races. Hispanic or Latino of any race were 3.41% of the population. There were 3,898 households out of which 27.8% had children under the age of 18 living with them, 41.6% were married couples living together, 17.8% had a female householder with no husband present, and 35.3% were non-families. 29.9% of all households were made up of individuals and 14.3% had someone living alone who was 65 years of age or older. The average household size was 2.48 and the average family size was 3.09. In the city the population was spread out with 23.9% under the age of 18, 7.7% from 18 to 24, 29.8% from 25 to 44, 21.8% from 45 to 64, and 16.8% who were 65 years of age or older. The median age was 38 years. For every 100 females, there were 90.2 males. For every 100 females age 18 and over, there were 84.3 males. The median income for a household in the city was $43,115, and the median income for a family was $47,969. Males had a median income of $38,012 versus $28,022 for females. The per capita income for the city was $20,208. About 5.4% of families and 8.0% of the population were below the poverty line, including 11.2% of those under age 18 and 7.0% of those age 65 or over. Local government The City of Burlington is governed within the Faulkner Act (formally known as the Optional Municipal Charter Law) under the Mayor-Council form of municipal government (Plan 4), implemented based on the recommendations of a Charter Study Commission as of January 1, 1992. The governing body consists of a mayor and a seven-member Common Council, all elected on a partisan basis in balloting held as part of the November general election. The Mayor serves a four-year term of office. The Common Council consists of seven members, each serving four-year terms of office: three at-large Councilmembers are elected to represent the entire city, while four are elected from single-member districts, known as wards. The three at-large and mayoral seats are up for election in one cycle, and the ward seats are elected two years later. As of 2019, the Mayor of Burlington City is Democrat Barry W. Conaway, whose term of office ends December 31, 2019. Members of the City Council are Council President Ila Marie Lollar (Ward 4; D, 2021), Vice President David Babula (At-Large; D, 2019), George Chachis (Ward 1; D, 2021), Helen F. Hatala (Ward 3; D, 2021), Jeanette M. Mercuri (At-Large; D, 2019), Thomas J. Swan (Ward 2; R, 2021) and Suzanne E. Woodard (At-Large; D, 2019). In January 2016, the City Council appointed George Chachis to fill the Ward 1 seat expiring in 2017 that had been held by Barry Conaway until he was appointed as mayor. Federal, state and county representation Burlington City is located in New Jersey's 3rd congressional district. It is part of New Jersey's 7th state legislative district. Prior to the 2010 Census, Burlington City had been part of the 4th Congressional District. Based on the 2010 census and population changes, the New Jersey Redistricting Commission changed the boundaries, to take effect in January 2013. For the 116th United States Congress, New Jersey's 3rd Congressional District is represented by Andy Kim (D, Bordentown). New Jersey is represented in the United States Senate by Democrats Cory Booker (Newark, term ends 2021) and Bob Menendez (Paramus, term ends 2025). For the 2018–2019 session (Senate, General Assembly), the 7th Legislative District of the New Jersey Legislature is represented in the State Senate by Troy Singleton (D, Palmyra) and in the General Assembly by Herb Conaway (D, Moorestown) and Carol A. Murphy (D, Mount Laurel). Burlington County is governed by a board of chosen freeholders, whose five members are elected at-large in partisan elections to three-year terms of office on a staggered basis, with either one or two seats coming up for election each year; at an annual reorganization meeting, the board selects a director and deputy director from among its members. As of 2018, Burlington County Board of Chosen Freeholders are Director Kate Gibbs (R, Lumberton Township, term as freeholder and as director ends December 31, 2018), Deputy Director Linda Hughes (R, Evesham Township, term as freeholder and as deputy director ends 2018) Tom Pullion (D, Edgewater Park, 2020), Balvir Singh (D, Burlington Township, 2020), and Latham Tiver (R, Southampton Township, 2019). Burlington County's Constitutional Officers are County Clerk Tim Tyler (R, Fieldsboro, 2018), Sheriff Jean E. Stanfield (R, Westampton, 2019) and Surrogate Mary Ann O'Brien (R, Medford, 2021). Politics As of March 23, 2011, there were a total of 5,765 registered voters in Burlington City, of which 2,813 (48.8% vs. 33.3% countywide) were registered as Democrats, 795 (13.8% vs. 23.9%) were registered as Republicans and 2,150 (37.3% vs. 42.8%) were registered as Unaffiliated. There were 7 voters registered to other parties. Among the city's 2010 Census population, 58.1% (vs. 61.7% in Burlington County) were registered to vote, including 76.4% of those ages 18 and over (vs. 80.3% countywide). In the 2012 presidential election, Democrat Barack Obama received 3,138 votes here (72.0% vs. 58.1% countywide), ahead of Republican Mitt Romney with 1,146 votes (26.3% vs. 40.2%) and other candidates with 35 votes (0.8% vs. 1.0%), among the 4,356 ballots cast by the city's 6,097 registered voters, for a turnout of 71.4% (vs. 74.5% in Burlington County). In the 2008 presidential election, Democrat Barack Obama received 3,285 votes here (69.9% vs. 58.4% countywide), ahead of Republican John McCain with 1,308 votes (27.8% vs. 39.9%) and other candidates with 55 votes (1.2% vs. 1.0%), among the 4,697 ballots cast by the city's 6,117 registered voters, for a turnout of 76.8% (vs. 80.0% in Burlington County). In the 2004 presidential election, Democrat John Kerry received 2,819 votes here (64.2% vs. 52.9% countywide), ahead of Republican George W. Bush with 1,486 votes (33.8% vs. 46.0%) and other candidates with 37 votes (0.8% vs. 0.8%), among the 4,390 ballots cast by the city's 5,832 registered voters, for a turnout of 75.3% (vs. 78.8% in the whole county). In the 2013 gubernatorial election, Republican Chris Christie received 1,422 votes here (50.9% vs. 61.4% countywide), ahead of Democrat Barbara Buono with 1,284 votes (46.0% vs. 35.8%) and other candidates with 30 votes (1.1% vs. 1.2%), among the 2,793 ballots cast by the city's 6,115 registered voters, yielding a 45.7% turnout (vs. 44.5% in the county). In the 2009 gubernatorial election, Democrat Jon Corzine received 1,622 ballots cast (59.6% vs. 44.5% countywide), ahead of Republican Chris Christie with 881 votes (32.4% vs. 47.7%), Independent Chris Daggett with 129 votes (4.7% vs. 4.8%) and other candidates with 48 votes (1.8% vs. 1.2%), among the 2,723 ballots cast by the city's 6,010 registered voters, yielding a 45.3% turnout (vs. 44.9% in the county). Education The City of Burlington Public School District serves students in pre-kindergarten through twelfth grade. The district is one of 31 former Abbott districts statewide, which are now referred to as "SDA Districts" based on the requirement for the state to cover all costs for school building and renovation projects in these districts under the supervision of the New Jersey Schools Development Authority. As of the 2014-15 school year, the district and its five schools had an enrollment of 1,634 students and 171.9 classroom teachers (on an FTE basis), for a student–teacher ratio of 9.5:1. The schools in the district (with 2014-15 enrollment data from the National Center for Education Statistics) are Elias Boudinot Elementary School (93 students; in grades K-2), Captain James Lawrence Elementary School (173; PreK-2), Samuel Smith Elementary School (318; PreK-2), Wilbur Watts Intermediate School (413; 3-6) and Burlington City High School (705; 7-12). The district's high school serves as a receiving school for students in grade nine through twelve from Edgewater Park Township, as part of a sending/receiving relationship with the Edgewater Park School District. Students from Burlington City, and from all of Burlington County, are eligible to attend the Burlington County Institute of Technology, a countywide public school district that serves the vocational and technical education needs of students at the high school and post-secondary level at its campuses in Medford and Westampton Township. Doane Academy, a co-educational, Episcopal college-preparatory school, was founded as St. Mary's Hall, a boarding school for girls, by George Washington Doane in 1837. The name was shortened from St. Mary's Hall-Doane Academy in March 2008. All Saints Catholic Grade School (Pre-K though 8th grade) closed in June 2006 with several other Catholic schools in the Roman Catholic Diocese of Trenton due to low enrollment, after 75 years of operation, based on recommendations issued in 2005 to help improve diocese finances. Roads and highways As of May 2010, the city had a total of 42.76 miles (68.82 km) of roadways, of which 35.71 miles (57.47 km) were maintained by the municipality, 4.36 miles (7.02 km) by Burlington County, 2.30 miles (3.70 km) by the New Jersey Department of Transportation, and 0.39 miles (0.63 km) by the Burlington County Bridge Commission. Burlington is served directly by U.S. Route 130 and New Jersey Route 413. Interstate 95, Interstate 295 and the New Jersey Turnpike all pass fairly close to the city and are easily accessible from Burlington. The Burlington-Bristol Bridge, part of Route 413, crosses the Delaware River, connecting Burlington to Bristol Township, Pennsylvania, and is operated by the Burlington County Bridge Commission. Construction of the bridge started on April 1, 1930, and the bridge opened to traffic on May 1, 1931. The bridge carries NJ 413 and Pennsylvania Route 413. Public transportation NJ Transit provides bus service in the city between Trenton and Philadelphia on the 409 and 418 routes and between Burlington and Camden on the 413 and 419 routes. The NJ River Line light rail system provides transportation between the Trenton Transit Center in Trenton and the Walter Rand Transportation Center (and other stations) in Camden, with stops at Burlington South and Burlington Towne Centre.
<filename>heartbeat-api/src/main/java/io/sovaj/heartbeat/monitors/JVMMonitor.java package io.sovaj.heartbeat.monitors; import io.sovaj.heartbeat.api.AbstractMonitor; import io.sovaj.heartbeat.api.TestElement; import io.sovaj.heartbeat.api.Type; /** * Cette classe permet de tester si la version de JVM est celle support�e. */ public class JVMMonitor extends AbstractMonitor { /** * Version de JVM attendue. */ private String supportedJvmVersion; /** * Constructeur par d�faut. Il n'est pas conseill� de l'utiliser, sauf via * Spring. */ public JVMMonitor() { super("JVM Version", Type.JVM); } /** * Cr�ation d'un JVMMonitor. * * @param version version attendue de JVM, doit �tre sous la forme x.y avec x et y num�riques. */ public JVMMonitor(String version) { super("JVM Version " + version, Type.JVM); // Contr�le validit� de la version : if (version == null || !version.matches("^\\d+.\\d+$")) { throw new IllegalArgumentException("illegal JVM version number: " + version); } supportedJvmVersion = version; } /** * {@inheritDoc} */ public void doMonitor(TestElement monitoredElement) { final String runningOnJvm = System.getProperty("java.version"); if (runningOnJvm.startsWith(supportedJvmVersion)) { monitoredElement.setTestIsOk(); } else { final String msg = "The version of the JDK is not correct. The application is running on " + runningOnJvm + " instead of " + supportedJvmVersion; monitoredElement.setTestIsKo(msg); } } /** * @param version the supportedJvmVersion to set */ public void setSupportedJvmVersion(String version) { this.supportedJvmVersion = version; } }
A Critical Review of Polymer-based Composite Automotive Bumper Systems An automobile bumper is a structural component, which contributes to vehicle crashworthiness or occupant protection during front or rear collisions. The bumper systems also protect the hood, trunk, fuel, exhaust and cooling system as well as safety related equipments. A brief description of bumper components and a critical review of polymer-based bumper systems with specific methodology are provided. This article advocates proper bumper design and material selection. The authors also discuss bumper components from the standpoint of the materials and their manufacturing processes.
<gh_stars>1-10 // // AppDelegate.h // IosBeepingCoreLibTest // // Created by <NAME> (<EMAIL>) on 08/07/14. // Copyright (c) 2014 Voctro Labs. All rights reserved. // #import <UIKit/UIKit.h> #import "BeepingCore.h" @interface AppDelegate : UIResponder <UIApplicationDelegate> @property (strong, nonatomic) UIWindow *window; @property (strong, nonatomic) BeepingCore *myBeepingCore; @end
CINCINNATI - The Division III basketball game between Mount St. Joseph's and Hiriam College on Sunday was far from ordinary, and it wasn't just because of the packed arena and the basketball luminary in attendance and the NCAA's decision to move the game forward ahead of schedule. The game was special because of one freshman forward, number 22, Lauren Hill, who made her college basketball debut while battling an inoperable brain tumor that has given her just months left to live. Hill had long dreamed of playing college basketball, of fulfilling a hope she had had since middle school. She needed just 17 seconds to make her dream come true, albeit differently than she initially imagined. The freshman forward made an uncontested left-handed layup for the opening basket after her team one the tip. Her tumor that has forced the right-hander to shoot with her left hand because it has affected her coordination. Her shot brought a standing ovation from a sellout crowd at Xavier University's 10,000-seat arena and was among many emotional moments for Hill, who received love and support from the moment she walked out for warmups. Her audience included former Tennessee women's coach Pat Summitt. Her coach said normally 50 people attend their games. Hill has a brain tumor the size of a lemon, and it is growing daily. She was diagnosed last fall after suffering from vertigo and dizziness while playing for her high school team, reports CBS News correspondent Vladimir Duthiers. Despite her condition, she committed this year to playing basketball, a game she first fell in love with in the 6th grade. "She's chasing a dream," her father, Brent Hill, told CBS News' Steve Hartman. "And she wants people to see that - that they can do that." Her parents said she actually asked the doctor: "Can I at least still play basketball?" Her attitude is remarkable -- the only tears a CBS News crew ever saw when interviewing her were of joy when she read about all the people who were supporting her charity called the "The Cure Starts Now." Curing pediatric brain cancer is one of her two top priorities. The other is simply to live long enough to play in her first college game. "I wanted to wear that jersey and feel like a superhero again because that's what I feel when I put on the jersey and that number," said Lauren. Everyone where she lives, near Cincinnati, Ohio, knows her jersey number -- 22. Everyone at Mount Saint Joseph University, where she's a freshman, knows of her remarkable commitment to this team. Believe it or not, even though Hill now has just weeks to live, she still gets up at 5:30 a.m. for basketball practice. Even though she can't even do most of the drills anymore, she still tries. She said she's not scared dying. "But the people I worry about are the people that I'm leaving behind," Hill said.
President Oscar Arias will meet Wednesday with four Honduran presidential candidates to deliver a chilling message: Their country &ndash; which now borders on the poorest in the Western Hemisphere &ndash; will tip further into poverty and isolation if the San Jos&eacute; Agreement remains unsigned. Cut off from the rest of the world and losing much of the foreign aid it generally receives, Honduras was blacklisted by the international community after its president was removed from office at gunpoint on June 28. Now, nearly three months after the coup, deposed President Manuel Zelaya is still waiting on the outskirts of his country as the San Jos&eacute; Agreement (which would restore him to power) goes unsigned. Craig Kelly, a top official within the U.S. State Department, joined Arias in the announcement. The U.S. government has said it will not recognize the results of Honduras&acute; elections as long as the accord drafted by Arias isn&acute;t met. Elvin Santos of the Liberal Party, Porfirio Lobo of the National Party, Fel&iacute;cito Avila of the Christian Democracy Party, and Bernand Mart&iacute;nez of the Innovation and Unity Party have been invited to Casa Presidencial on Wednesday. The Honduran elections are scheduled for Nov. 29, with the victor expected to take office in January.
import * as React from 'react' export interface ArticleProps { article: { title: string category: string description: string image: string author: string date: number } setArticle: React.Dispatch< React.SetStateAction<{ title: string category: string description: string image: string author: string date: number }> > } export const Article: React.FC<ArticleProps> = ({ article, setArticle }: ArticleProps) => { const handleInputChange = (e: any, identifier: any) => { const newArticle = { ...article } newArticle[identifier] = e.target.value setArticle(newArticle) } return ( <> <h2>Compose your article</h2> <div className='separation-line'> <hr /> </div> <div className='article-container'> <label className='label'>Image</label> <input className='input' onChange={(e) => handleInputChange(e, 'image')} placeholder='Pass in a URL' /> <label className='label'>Title</label> <input className='input' onChange={(e) => handleInputChange(e, 'title')} /> <label className='label'>Description</label> <input className='input' onChange={(e) => handleInputChange(e, 'description')} /> <label className='label'>Category</label> <input className='input' onChange={(e) => handleInputChange(e, 'category')} /> <label className='label'>Author</label> <input className='input' onChange={(e) => handleInputChange(e, 'author')} /> <label className='label'>Date</label> <input className='input' onChange={(e) => handleInputChange(e, 'date')} placeholder='Pass in a timestamp' /> </div> </> ) }
<reponame>CSS-Lletya/open633 /* Class11_Sub45_Sub6 - Decompiled by JODE * Visit http://jode.sourceforge.net/ */ final class Class11_Sub45_Sub6 extends Class11_Sub45 { static int anInt8774; String[] aStringArray8775; int anInt8776; Class298 aClass298_8777; int anInt8778; Class213[] aClass213Array8779; String aString8780; int[] anIntArray8781; static Class84 aClass84_8782 = new Class84(); int[] anIntArray8783; int anInt8784; int anInt8785; static int anInt8786; static Class253 aClass253_8787; static final Class11_Sub45_Sub19 method3439(int i) { try { if (i != 0) return null; anInt8786++; return Class32.aClass11_Sub45_Sub19_418; } catch (RuntimeException runtimeexception) { throw Class205.method1298(runtimeexception, "bl.A(" + i + ')'); } } public static void method3440(int i) { do { try { aClass84_8782 = null; aClass253_8787 = null; if (i >= 15) break; method3439(-116); } catch (RuntimeException runtimeexception) { throw Class205.method1298(runtimeexception, "bl.C(" + i + ')'); } break; } while (false); } static final Class11_Sub10 method3441(int i) { try { anInt8774++; Class11_Sub10 class11_sub10 = Class11_Sub2_Sub2.method3333(false); ((Class11_Sub10) class11_sub10).aClass370_5394 = null; ((Class11_Sub10) class11_sub10).anInt5395 = 0; ((Class11_Sub10) class11_sub10).aClass11_Sub20_Sub1_5397 = new Packet( 5000); if (i > -101) return null; return class11_sub10; } catch (RuntimeException runtimeexception) { throw Class205.method1298(runtimeexception, "bl.B(" + i + ')'); } } public Class11_Sub45_Sub6() { /* empty */ } }
Join KPCC's AirTalk with host Larry Mantle weekdays for lively and in-depth discussions of city news, politics, science, the arts, entertainment, and more. Call-in number: 866-893-5722 For years, candidates for kidney transplants have had to bide their time on the waiting list until someone who’s a match donates a kidney. However, new research out this week sheds light on what many experts are calling a groundbreaking procedure that could allow eligible candidates to receive a kidney from a donor who isn’t compatible. The study from the New England Journal of Medicine details a procedure called “desensitization,” which basically alters a patient’s immune system so that it will accept a kidney from a donor who isn’t a match. For many people, getting a kidney from an incompatible donor isn’t an option because they have antibodies that will attack a transplanted organ. In the process of desensitization, doctors filter out antibodies from the patient’s blood and introduce different antibodies for protection while the immune system regenerates those antibodies. For some reason that is still unknown, the regenerated antibodies aren’t as likely to attack the transplant organ For many transplant candidates, the procedure could mean the difference between getting a kidney and having to spend the rest of their lives on dialysis, which can cost $70,000 a year for life. The desensitization procedure costs $30,000 and a transplant around $100,000, so many experts say it’s cheaper in the long run. Guests: Dr. Dorry Segev, abdominal transplant surgeon and associate professor of surgery at the Johns Hopkins University School of Medicine; he’s the lead author of the study “Survival Benefit with Kidney Transplants from HLA-Incompatible Live Donors” Dr. Krista Lentine, transplant nephrologist, medical director of living donor evaluation, and professor of medicine at St. Louis University
def testGetExists2(self): ds = self.dummy_dataset() unit = ds.get(0x300A00B2, None).value self.assertEqual( unit, 'unit001', "dataset.get() did not return existing member by long tag")
/** * Restore shelved files from a pending change into a workspace. </p> * * 'p4 unshelve' retrieves the shelved files from a pending changelist and * copies them into a pending changelist on the invoking user's workspace. * Unshelving files from a pending changelist is restricted by the user's * permissions on the files. A successful unshelve operation places the shelved * files on the user's workspace with the same open action and pending * integration history as if it had originated from that user and client. </p> * * @see PerforceTask * @see ClientTask */ public class UnshelveTask extends ClientTask { /** * The source pending changelist that contains the shelved files. */ protected String fromChangelist = String.valueOf(IChangelist.DEFAULT); /** * The target changelist to receive the shelved files. */ protected String toChangelist = String.valueOf(IChangelist.DEFAULT); /** Force the unshelve operation. */ protected boolean forceUnshelve = false; /** * previews what would be unshelved without actually changing any files or * metadata. */ protected boolean preview = false; /** * Default constructor. */ public UnshelveTask() { super(); commandOptions = new UnshelveFilesOptions(forceUnshelve, preview); } /** * Sets the from changelist. * * @param fromChangelist * the new from changelist */ public void setFromChangelist(String fromChangelist) { this.fromChangelist = fromChangelist; } /** * Sets the to changelist. * * @param toChangelist * the new to changelist */ public void setToChangelist(String toChangelist) { this.toChangelist = toChangelist; } /** * Sets the force unshelve. * * @param forceUnshelve * the new force unshelve */ public void setForceUnshelve(boolean forceUnshelve) { ((UnshelveFilesOptions) commandOptions).setForceUnshelve(forceUnshelve); } /** * Sets the preview. * * @param preview * the new preview */ public void setPreview(boolean preview) { ((UnshelveFilesOptions) commandOptions).setPreview(preview); } /** * Execute the Perforce unshelve command with source changelist, target * changelist and options. Log the returned file specs. * <p> * Restore shelved files from a pending change into a workspace. * <p> * Unshelving files from a pending changelist is restricted by the user's * permissions on the files. A successful unshelve operation places the * shelved files on the user's workspace with the same open action and * pending integration history as if it had originated from that user and * client. * * @see PerforceTask#execP4Command() */ protected void execP4Command() throws BuildException { try { fileSpecs = FileSpecBuilder.makeFileSpecList(getFiles()); retFileSpecs = getP4Client().unshelveFiles(fileSpecs, parseChangelist(fromChangelist), parseChangelist(toChangelist), ((UnshelveFilesOptions) commandOptions)); logFileSpecs(retFileSpecs); } catch (P4JavaException e) { throw new BuildException(e.getLocalizedMessage(), e, getLocation()); } catch (P4JavaError e) { throw new BuildException(e.getLocalizedMessage(), e, getLocation()); } catch (Throwable t) { throw new BuildException(t.getLocalizedMessage(), t, getLocation()); } } }
Hard diffraction in photoproduction with Pythia 8 We present a new framework for modeling hard diffractive events in photoproduction, implemented in the general purpose event generator Pythia 8. The model is an extension of the model for hard diffraction with dynamical gap survival in pp and ppbar collisions proposed in 2015, now also allowing for other beam types. It thus relies on several existing ideas: the Ingelman-Schlein approach, the framework for multiparton interactions and the recently developed framework for photoproduction in gamma p, gamma gamma, ep and $e^+e^-$ collisions. The model proposes an explanation for the observed factorization breaking in photoproduced diffractive dijet events at HERA, showing an overall good agreement with data. The model is also applicable to ultraperipheral collisions with pp and pPb beams, and predictions are made for such events at the LHC. Introduction Diffractive excitations represent large fractions of the total cross section in a wide range of collisions. A part of these has been seen to have a hard scale, as in e.g. the case of diffractive dijet production. These hard diffractive events allow for a perturbative calculation of the scattering subprocess, but still require some phenomenological modeling. This includes modeling of the Pomeron, expected to be responsible for the color-neutral momentum transfer between the beam and the diffractive system X. In the framework of collinear factorization, a diffractive parton distribution function (dPDF) may be defined. This can further be factorized into a Pomeron flux and a PDF, describing the flux of Pomerons from the beam and the parton density within the Pomeron, respectively. Here we focus mainly on photoproduced diffractive dijets in ep collisions. This scattering process can be separated into different subsystems, visualized in Fig. 1. The initial state consists of an electron and a proton, with the former radiating off a (virtual) photon. If the photon is highly virtual, we are in the range of deep inelastic scattering (DIS), while a photon with low enough virtuality can be considered (quasi-)real. This is the photoproduction regime. No clear distinction between the two regimes exists, however, and photons of intermediate virtuality require careful consideration to avoid double-counting. A special feature in the photoproduction regime is that there is a non-negligible probability for the photon to fluctuate into a hadronic state. These resolved photons open up for all possible hadron-hadron processes, including diffractive ones. The next subsystem shown in Fig. 1 is the photon-proton scattering system. Here, diffraction could in principle occur on both sides if the photon is resolved. In direct photoproduction (and in DIS) the diffractive system can only be present on the photon side, as no Pomeron flux can be defined for point-like photons. In this article the emphasis will be on Pomeron emission from the proton. The final subsystem is the hard scattering generated inside the diffractive system X. For direct photoproduction (and DIS) this includes the photon as an incoming parton, see Fig. 1 (a). In the resolved case, Fig. 1 (b), a parton is extracted from the hadronic photon, which then proceeds to initiate the hard scattering along with a parton extracted from the Pomeron. In both cases a beam remnant is left behind from the Pomeron, while resolved photoproduction also gives rise to a beam remnant from the hadronic photon. Multiple scatterings or multiparton interactions (MPIs) are expected between the remnants, but also in the larger photon-proton system. The particles produced by the latter type of MPIs may destroy the diffractive signature, the rapidity gap between the diffractive system and the elastically scattered proton (or meson, depending on the side of the diffractive system). The model for photoproduced diffractive dijets presented here is based on the general-purpose event generator Pythia 8. It combines the existing frameworks for photoproduction and hard diffraction, the latter originally introduced for purely hadronic collisions. The new model thus allows for event generation of photon-induced hard diffraction with different beam configurations. The model is highly dependent on the components of Pythia 8. The relevant onesthe model for MPIs, photoproduction and hard diffraction -are described in the following sections. The first measurements of diffractive dijets was done by the UA8 experiment at the SppS collider at CERN. Later on, similar events have been observed in ep collisions at HERA, in pp collisions at the Tevatron, and nowadays also in pp collisions at the LHC. Similarly, diffractively produced W ± and Z 0 bosons have been observed at the Tevatron. All of these processes are expected to be calculable within a perturbative framework, such as the Ingelman-Schlein picture. A model for such hard diffractive events was included in Pythia 8, based on the Ingelman-Schlein approach and the rapidity gap survival idea of Bjorken. The model proposed an explanation of the observed factorization breaking in hard diffractive pp collisions -the observation that with the Pomeron PDFs and fluxes derived from HERA DIS data, the factorization-based calculation was an order of magnitude above the measurement. The suppression factor required on top of the dPDF-based calculation, was dynamically generated by requiring no additional MPIs in the pp (or pp) system. The model predicted production rates in agreement with pp and pp measurements, albeit some differential distributions did show room for improvement when comparing to Tevatron data. The latest preliminary analysis on diffractive dijets by CMS finds a very good agreement between the model and data in all differential distributions. First evidence of factorization breaking for diffractive dijets in ep collisions was observed by an H1 measurement, where a suppression factor of 0.6 was required to describe the dijet data in the photoproduction region, whereas the analysis for the DIS region was, by construction, well described by the factorization-based model without a corresponding suppression factor. Advances in the formulation of the dPDFs improved the description of data in the DIS regime, but the discrepancies remained in the photoproduction limit. Several analyses have been performed by H1 and ZEUS for diffractive dijet production, all requiring a suppression factor between 0.5 − 0.9 in order for the factorization-based calculations to describe data. The extension of the hard diffraction model in this article, to collisions with (intermediate) photons, makes it possible to explain the factorization-breaking in the photoproduction regime. The model is also applicable to the DIS regime, but here no further suppression is added since the highly virtual photons do not have any partonic structure that would give rise to the MPIs. Furthermore, the framework can also be applied to diffractive photoproduction in purely hadronic collisions, usually referred to as ultra-peripheral collisions (UPCs). The model predicts a substantial suppression for diffractive dijets in UPCs at the LHC. The article is structured as follows: After the introduction in sec. 1, we briefly describe in sec. 2 the event generation procedure in Pythia 8. We then proceed in sec. 3 to the photoproduction framework available in Pythia 8 and continue to a short description of the hard diffraction model in sec. 4. We present results with our model compared to data from HERA on diffractive dijets in photoproduction in sec. 5, and show some predictions for photoproduction in UPCs at the LHC in sec. 6. We end with sec. 7 where we summarize our work and provide an outlook for further studies. Event generation with Pythia 8 Recently, Pythia 8 has undergone a drastic expansion. Where the earlier version, Pythia 6, was designed to accommodate several types of collisions (lepton-lepton, hadron-hadron and leptonhadron, excluding nuclei), the rewrite to C++ focused mainly on the hadronic physics at the Tevatron and the LHC. While the LHC will run for years to come, there are several future collider projects under consideration. A common feature between the projected colliders is that they will be using lepton beams either primarily (linear e + e − colliders: CLIC and ILC or Electron-Ion Collider (EIC) ), or as a first phase towards a hadronic collider (FCC ). To enable studies related to these future colliders, Pythia 8 has been extended to handle many processes involving lepton beams. Another major facility has been the extension from pp to pA and AA collisions with the inclusion of the Angantyr model for heavy ion collisions. Combining the heavy-ion machinery with the recent developments related to lepton beams will also allow simulations of eA collisions and ultra-peripheral AA collisions. Work in this direction has been started within the Pythia collaboration. The Pythia 6 description of lepton-lepton and lepton-hadron collisions included a sophisticated model for merging of the DIS regime (high-virtuality photons) and the photoproduction regime (low-virtuality photons). This, however, created upwards of 25 different event classes, each of which had to be set up differently. The model for the transition from photoproduction to DIS turned out not to agree so well with data, and the division of the different event classes was somewhat artificial. The aim for the Pythia 8 implementation of these processes has been to reduce the number of hard-coded event classes and increase robustness. The present framework, however, does not yet include a smooth merging of the high-and low-virtuality events and therefore the events with intermediate virtualities are not addressed. Work towards such a combined framework is currently ongoing. In addition, there is progress towards improving the parton showers for DIS events (see e.g. and ). In this paper we focus on the photoproduction regime, which is mature and well tested for hard-process events with virtuality 1 GeV against LEP and HERA data. The generation of non-diffractive (ND) pp or pp events proceeds with the following steps. First, the incoming beams are set up with (possible) PDFs at a given (user-defined) energy. Then the hard scattering of interest is generated based on the matrix element (ME) of the process and the PDFs. The generated partonic system is then evolved with a parton shower (PS), in Pythia 8 using the interleaved evolution of both initial and final state showers (ISR, FSR) and MPIs. The splitting probabilities for the FSR and ISR are obtained from the standard collinear DGLAP evolution equations. The ISR probabilities also depend on the PDFs of the incoming beams, as the evolution is backwards from a high scale, set by the hard process, to a lower scale. Similarly, the MPI probabilities depend on the PDFs of the incoming beams, and these have to be adjusted whenever an MPI has removed a parton from the beam. Colour reconnection (CR) is allowed after the evolution to mimic the finite-color effects that are not taken into account in the infinite-color PS. After the partonic evolution, a minimal number of partons are added as beam remnants in order to conserve color, flavor and the total momentum of the event. Lastly, the generated partons are hadronized using the Lund string model along with decays of unstable particles. In ep events, Pythia 8 operates with two regimes: the DIS regime, where the electron emits a highly virtual photon (Q 2 1 GeV 2 ), and the photoproduction regime, where the photon is (quasi-)real (Q 2 1 GeV 2 ). Currently no description is available for intermediate-virtuality photons. In DIS events, the hard scattering occurs between the incoming lepton and a parton from the hadron beam by an exchange of a virtual photon (or another EW boson). The photon can thus be considered devoid of any internal structure. In the photoproduction regime, the photon flux can be factorized from the hard scattering, such that the intermediate photon can be regarded as a particle initiating the hard scattering. In this regime, both point-like and hadron-like states of the photon occur. This significantly increases the complexity of the event generation, thus the photoproduction regime is thoroughly described in the next section. The photoproduction framework The (quasi-)real photon contains a point-like, direct part without substructure as well as a hadronlike part with internal structure. The latter part, the resolved photon, dominates the total cross section of the physical photon. The total cross section is expected to contain all types of hadronic collisions, including elastic (el), single-and double diffractive (SD, DD) and inelastic ND collisions. The ND collisions contain both hard and soft events, where the former can be calculated perturbatively, while the latter are modeled using the MPI framework in Pythia 8. Elastic and diffractive collisions require a phenomenological model for the hadronic photon. The ND processes were first introduced in Pythia 8.215, with a cross section given as a fraction of the total cross section, ND = f tot, f < 1. The framework for photoproduction has since been expanded to include all soft QCD processes using the Schuler-Sjstrand model in Pythia 8.235, and with this the cross sections for each of the event classes is calculated separately. The full description of these event classes is postponed to a forthcoming paper, as we here concentrate on diffractive processes with a hard scale. Between the two versions, the p and frameworks were extended to ep and e + e − by the introduction of a photon flux within a lepton, now giving a complete description of all photoproduction events in p,, ep and e + e − collisions in the latest release, 8.240. Furthermore, an option to provide an external photon flux has been included, allowing the user to study photoproduction also in UPCs, where the virtuality of the intermediate photon is always small and thus the photoproduction framework directly applicable. An internal setup for these cases is under way. The resolved photon is usually split into two: one describing a fluctuation of the photon into a low-mass meson and the other describing a fluctuation into a qq pair of higher virtuality. The former is usually treated according to a vector-meson dominance (VMD) model, where the photon is a superposition of the lightest vector mesons (usually, and ), whereas the latter, the anomalous part of the photon, is treated as "the remainder", anom = tot − direct − VMD. A generalization of the VMD exists (the GVMD model) which takes into account also higher-mass mesons with the same quantum numbers as photons. Note, however, that if the resonances are broad and closely spaced, they would look like a smooth continuum. The event generation for the direct photons begins by sampling the hard scattering between the incoming photon and a parton (or another direct photon in case of ), e.g. q → qg. The subsequent parton-shower generation always include FSR and in p case also ISR for the hadronic beam. The whole photon momentum goes into the hard process, x ∼ 1, as direct photons do not have any internal structure. Hence there is no energy left for MPIs and no photon remnant is left behind. The hadronization is then performed with the Lund string model as usual. For resolved photons, a model for the partonic content of the hadronic photon, the photon PDF, needs to be taken into account. This PDF includes both the VMD and the anomalous contributions, the latter being calculable within perturbative QCD, the former requiring a nonperturbative input. As in the case of protons, the non-perturbative input is fixed in a global QCD analysis using experimental data. There are several PDF analyses available for photons using mainly data from LEP, but some also exploiting HERA data to constrain the gluonic part of the PDF. Ideally one would have a PDF for each of the VMD states, in practice one uses the same parametrization for all -or approximates these with pion PDFs. After the setup of the photon PDFs, the hard collision kinematics has to be chosen. Here, a parton from the photon PDF initiates the hard process, carrying a fraction of the photon momentum, x i < 1, with parton i being extracted from the photon. Thus energy is still available in the fluctuation after the initial hard process, opening up for additional MPIs along with ISR and FSR in the subsequent evolution. As with other hadronic processes, a remnant is left behind, with its structure being derived from the flavor content of the original meson or qq state and the kicked-out partons. As in pp collisions, the PS splitting probabilites with resolved photons are based on the DGLAP equations. The DGLAP equation governing the scale evolution of resolved photon PDFs can be written as where f i(j)/ corresponds to the PDF of the photon, x i the fractional momenta of the photon carried by the parton i, em, s the electromagnetic and strong couplings, e i the charge of parton i and P ij, P i the DGLAP and → qq splitting kernels, respectively. The term proportional to P i gives rise to the anomalous part of the photon PDF. In Pythia 8 the separation into VMD and anomalous contributions is not explicitly performed. By the backwards evolution of ISR, however, a resolved parton can be traced back to the original photon by a → qq branching at some scale Q 2. Post facto, an event where this happens for Q 2 > Q 2 0 can then be associated with an anomalous photon state, and where not with a VMD state. The dividing scale Q 0 is arbitrary to some extent, but would be of the order of the 0 -meson mass. In the interleaved evolution of the parton showers and MPIs, additional MPIs and ISR splittings on the photon side become impossible below the scale where the photon became unresolved. This reduces the average number of MPIs for resolved photons compared to hadrons, and therefore has an impact also for the hard diffraction model as discussed in sec. 3.1. MPIs with photons When the photon becomes resolved it is possible to have several partonic interactions in the same event. MPIs in Pythia 8 are generated according to the leading-order (LO) QCD cross sections, albeit being regularized by introducing a screening parameter p ⊥0, d dp 2 Note here that p ⊥0 can be related to the size d of the colliding objects, p ⊥0 ∼ 1/d, thus a different value of the screening parameter could be motivated if the photon has a different size than the proton. Further, one could imagine working with different matter profiles for both the proton and the photon, and possibly also for each of the components of the photon. For now the shape is kept common for all systems, but possibly with different scale factors, i.e. average radii. The screening parameter is allowed to vary with center-of-mass energy √ s, with p ref ⊥0, p tunable parameters and √ s ref a reference scale. Thus both the parameters from the matter profile and the parameters related to p ⊥0 require input from data. These parameters can be fixed by a global tune, with the Monash tune being the current default. The MPI parameters in this tune, however, are derived using only data from pp and pp collisions. As the partonic structure and matter profile of resolved photons can be very different from that of protons, the values for the MPI parameters should be revisited for and p collisions. The limitation is that there are only a few data sets sensitive to the MPIs available for these processes, and therefore it is not possible to perform a global retune for all the relevant parameters. Thus we have chosen to use the same form of the impact-parameter profile as for protons and study only the p ⊥0 parameters (which allow for different scale factors). For collisions, LEP data is available for charged-hadron p ⊥ spectra in different W bins, allowing studies of the energy dependence of p ⊥0 as shown in. In the p case the HERA data for charged-hadron production is averaged over a rather narrow W p bin. Hence a similar study of the energy dependence is not possible for p, and it becomes necessary to assume the same energy dependence for p ⊥0 in p as for pp collisions. The value of the p ⊥0 -parameter, however, can be retuned with the available data. As discussed in a good description of the H1 data from HERA can be obtained with a slightly larger p ref GeV. Thus the photon-tune is consistent with a smaller size of the photon, i.e. that the photon does not quite reach a typical hadron size during its fluctuation. The rule of thumb is that a larger screening parameter gives less MPI activity in an event, thus a smaller probability for MPIs with resolved photons is expected compared to proton-proton collisions. As the model for hard diffraction is highly dependent on the MPI framework, we expect that the increased screening parameter gives less gap-suppression in photoproduction than what was found in the proton-proton study. This is simply because there is a larger probability for the event to have no additional MPIs when the p ref ⊥0 -value is larger. Furthermore, since the ISR splittings may collapse the resolved photon into an unresolved state and, by construction, the direct-photon induced processes do not give rise to additional interactions, the role of MPIs is suppressed for photoproduction compared to purely hadronic collisions. Also, the invariant mass of the photon-proton system in the photoproduction data from HERA is typically an order of magnitude smaller than that in previously considered (anti-)proton-proton data, which further reduces the probability for MPIs. Anticipating results to be shown below, this is in accordance with what is seen in diffractive dijet production at HERA, where the suppression factor is much smaller than that at the Tevatron. Photon flux in different beam configurations In the photoproduction regime one can factorize the flux of photons from the hard-process cross section. For lepton beams a virtuality-dependent flux is used, where x is the momentum fraction of the photon w.r.t. the lepton. Integration from the kinematically allowed minimum virtuality up to the maximum Q 2 max allowed by the photoproduction framework, yields the well-known Weizscker-Williams flux where m e is the mass of the lepton. In pp collisions the electric form factor arising from the finite size of the proton, or equivalently that the proton should not break up by the photon emission recoil, needs to be taken into account. A good approximation of a Q 2 -differential flux is given by where Q 2 0 = 0.71 GeV 2. Integration over the virtuality provides the flux derived by Drees and Zeppenfeld, where A = 1 + Q 2 0 /Q 2 min and Q 2 min is the minimum scale limited by the kinematics of a photon emission. Due to the form factor the photon flux drops rapidly with increasing virtuality and becomes negligible already at Q 2 ∼ 2 GeV 2. This ensures that the photons from protons are well within the photoproduction regime and there is no need to introduce any cut on maximal photon virtuality. In case of heavy ions it is more convenient to work in impact-parameter space. The size of a heavy nucleus is a better defined quantity than it is for protons, so the impact parameter b of the collision can be used to reject the events where additional hadronic interactions would overwhelm the electromagnetic interaction. Simply rejecting the events for which the minimal impact parameter, b min, is smaller than the sum of the radii of the colliding nuclei (or colliding hadron and nucleus for pA) provides a b-integrated flux, where Z is the charge of the emitting nucleus, K i are the modified Bessel functions of the second kind and = b min x m N, where x is a per-nucleon energy fraction and m N a per-nucleon mass. The downside of working in the impact-parameter space is that the virtuality cannot be sampled according to the flux, as virtuality and impact parameter are conjugate variables. For heavy-ions, however, the maximal virtuality is very small (of the order of 60 MeV ), and can be safely neglected for the considered applications. The different photon fluxes are shown in Fig. 2. When extending the photoproduction regime from pure photon-induced processes to collisions where the photon is emitted by a beam particle, some additions are needed. In direct photoproduction, the partonic processes can be generated by using the photon flux directly in the factorized cross-section formula, similar to what is done with the PDFs in a usual hadronic collision. In resolved photoproduction, a PDF for the partons from the photons emitted from the beam particle is needed. This can be found by convoluting the photon flux from the beam particle b, f /b (x), with the photon PDFs, f i/ (x, Q 2 ), where Q 2 refers to the scale at which the resolved photon is probed. This scale can be linked to the scale of the hard(est) process, e.g. the p ⊥ of the leading jet in jet-production processes. The convolution yields with x i the energy fraction of beam particle momentum carried by parton i and x the energy fraction of the photon w.r.t. the beam. In practice the intermediate photon kinematics is sampled according to the appropriate flux during the event generation, thus taking care of the convolution on the fly. Hard diffraction in Pythia 8 The Pythia model for hard diffractive events in pp collisions was introduced as an explanation for the factorization breaking between diffractive DIS at HERA and the Tevatron. The model can be applied to any process with sufficiently hard scales, including production of dijets, Z 0, W ±, H etc. It begins with the Ingelman-Schlein picture, where the diffractive cross section factorizes into a Pomeron-particle cross section and a Pomeron flux. Based on this ansatz a tentative probability for diffraction is defined as the ratio of diffractive PDF (dPDF) to inclusive PDF, as it is assumed that the proton PDF can be split into a diffractive and a non-diffractive part, with f i/p describing the PDF of the proton, f D i/p being the diffractive part of the proton PDF defined as a convolution of the Pomeron flux in a proton (f P/p ) and the Pomeron PDFs (f i/P ). The probabilities for side A, B to be the diffractive system are given as P D A,B and each relies on the variables of the opposite side. This tentative probability is then used to classify an event as preliminary diffractive or non-diffractive. If non-diffractive, the events are handled as usual non-diffractive ones. If diffractive, the interleaved evolution of ISR, FSR and MPIs is applied, but only events surviving without additional MPIs are considered as fully diffractive events. The reasoning behind this is that additional MPIs in the pp system would destroy the rapidity gap between the diffractive system and the elastically scattered proton. The gap survives if no further MPIs occur, and the event can be experimentally quantified as being diffractive, with e.g. the large rapidity gap method. This no-MPI requirement suppresses the probability for diffraction with respect to the tentative dPDF-based probability, and can thus be seen as a gap-survival factor. Unlike other methods of gap survival (e.g. [9,) this method is performed on an event-by-event basis, thus inherently is a dynamical effect. Furthermore, it does not include any new parameters, but relies solely on the existing and well tested (for pp/pp) MPI framework. Once the system is classified as diffractive, the full interleaved evolution is performed in the Pp subsystem. Here the model does not restrict the number of MPIs, as these will not destroy the rapidity gap between the scattered proton and the Pomeron remnant. Hard diffraction with photons In this article we extend the hard diffraction model to collisions involving one or two (intermediate) photons. The extension is straightforward. Changing the proton PDF in eqs. to a photon PDF on one side, it is possible to describe hard diffraction in p interactions. Changing on both sides, the model is extended to collisions. Thus eq. is valid in events with (intermediate) photons with the change p →. Connecting the event generation with an appropriate photon flux allows to study hard diffraction in both ep and e + e − collisions as well as in ultra-peripheral collisions of protons and nuclei. The differential cross section of the hard scattering (X h ) in a diffractive system X, e.g. the dijet system within the diffractive system, for direct (dir) and resolved (res) photoproduction can then schematically be written as, with beam A emitting a photon, beam B emitting a Pomeron, and AB → X h B denoting that the diffractive system is present on side A. Changing A → B in eqs. thus results in a diffractive system on side B. In the above, f /A denotes the photon flux from beam A, f i/ the photon PDF, while f P/B and f j/P are the Pomeron flux and PDF, respectively. d (i)j→X h are the partonic cross sections calculated from the hard scattering MEs. The full diffractive system X also contains partons from MPIs and beam remnants that also have to be taken into account, thus eqs. only represent the hard subprocess part of the diffractive system. Presently, neither the double diffractive process AB → X A h X B h nor the central diffractive process AB → AX h B are modelled, and the Pomeron can only be extracted from protons and resolved photons. As the model is based on dPDFs and the dynamical gap survival derived from the MPI framework inside Pythia 8, the extension does not require any further modelling or parameters. The dynamical gap survival is present only in the cases where the photon fluctuates into a hadronic state. Hence the tentative probability, eqs., equates the final probability for diffraction in direct photoproduction and in the DIS regime, where no MPIs occur. In resolved photoproduction, the dynamical gap survival suppresses the tentative probability for diffraction, offering an explanation for the discrepancies between next-to-leading order (NLO) predictions for dijets in photoproduction compared to measured quantities at HERA, see e.g.. The observed factorization breaking is not as striking as in pp collisions, but the factorization-based calculation still overshoots the latest H1 analysis by roughly a factor of two. It should be noted that this extension allows for diffraction on both sides, i.e. the Pomeron can be extracted from the hadronic photon and/or the proton, see Fig. 3. Typically, the experiments only considered diffractive events where the diffractive system consists of a photon and a Pomeron, with a rapidity gap on the proton side (and a surviving proton, whether observed or not). The option to generate diffractive events on only one of the sides exist in Pythia 8, such as to avoid needless event generation. Recent improvements in dPDFs Since the publication of the hard diffraction model for pp/pp, several improvements have been made for dPDFs. Work has been put into the inclusion of NLO corrections to the splitting kernels describing the evolution of the partons inside the Pomeron. Other work includes more recent fits to combined HERA data, or includes additional data samples into experiment-specific fits, so as to constrain some of the distributions in the dPDFs. A subset of these new dPDFs have been added to Pythia 8 recently and are briefly introduced below. Specifically two new sets of dPDFs have been introduced, along with the Pomeron fluxes used in these fits. The first set, the GKG18 dPDFs by Goharipour et. al., consists of two LO and two NLO dPDFs fitted to two different combined HERA data sets available, using the xFitter tool recently extended to dPDFs. In addition, we consider an analysis released by the ZEUS collaboration offering three NLO dPDFs fitted to a larger sample of data. One of these, denoted ZEUS SJ, includes also diffractive DIS dijets from in order to have better constraints for the gluon dPDF. Using PDFs derived at NLO is not perfectly consistent with the LO matrix elements available in Pythia 8, but since the ZEUS SJ dPDF analysis is the only of the considered dPDF analyses including dijet data 4, it is interesting to compare the results to other dPDFs. Both the GKG18 and the ZEUS SJ fits uses the following parametrization for the Pomeron flux, with P = P + P t and A, B being parameters to be included in the fits. The dPDFs are typically parametrized as again with A i, B i, C i being parameters to be determined in the fits. The dPDFs are then evolved using standard DGLAP evolution Diffractive dijets in the photoproduction range The production of dijets in a diffractive system is particularly interesting, as it provides valuable information on the validity of factorization theorems widely used in particle physics. These factorization theorems are not expected to hold in the case of diffractive dijets arising from resolved photoproduction, as this process essentially is a hadron-hadron collision, where the hard scattering factorization fails. Both H1 and ZEUS have measured the production of diffractive dijets in both the photoproduction and DIS range. We here limit ourselves to showing results from two analyses, the H1 2007 and ZEUS 2008 analyses on diffractive dijets. Other analyses have been presented, including several ones examining only the DIS regime, but as the analysis codes or even the data itself have not always been preserved, we limit ourselves to reconstructing only a subset of these analyses. We aim to validate and provide the analyses used in this article within the Rivet framework. Both experiments have data on ep collisions at √ s = 318 GeV using 27.5 GeV electrons and 920 GeV protons, with the proton moving in the +z direction. Both use the large rapidity gap method for selecting diffractive systems. The experimental cuts in the two analyses are shown in table 1. In the H1 analysis we concentrate on the differential cross sections as a function of four variables: invariant mass of the photon-proton system (W ), transverse energy of the leading jet (E * jet 1 ⊥ ) and momentum fractions z obs P and x obs, both constructed from the measured jets as where E e (E p ) is the energy of the beam electron (proton) and the summation includes the two leading jets, i.e. the two with highest E ⊥. The inelasticity y and Pomeron momentum fraction w.r.t. the proton x P are determined from the hadronic final state. In the ZEUS analysis the momentum fractions z obs P and x obs are defined in terms of transverse energy and pseudorapidity of the jets, equivalent to the definitions in eq., if the jets are massless. In a LO parton-level calculation these definitions would exactly correspond to the momentum fraction of partons inside a photon (x ) and Pomeron (z P ). Due to the underlying event, parton-shower emissions and hadronization effects, however, the connection between the measured z obs P and x obs and the actual x and z P is slightly smeared, but still eqs. and serve as decent hadron-level estimates for the quantities. In place of W the ZEUS analysis provides the differential cross section in terms of invariant mass of the photon-Pomeron system, M X. There are several theoretical uncertainties affecting the distributions of the diffractive events. Here we focus on the most important ones: Renormalization-and factorization-scale variations, estimating the uncertainties of the LO descriptions in Pythia 8. dPDF variations affecting especially the z obs P distribution and indirectly the number of events through the cuts on the squared momentum transfer, t, the momentum fraction of the beam carried by the Pomeron, x P and the mass of the scattered (and possibly excited) proton, M Y. p ref ⊥0 -variations, affecting the gap survival factor. Other relevant parameters and distributions have also been varied, showing little or no effect on the end distributions. Remarkably, one of these was the choice of photon PDF. Pythia 8 uses the CJKL parametrization as a default both in the hard process and in the shower and remnant description. As the MPI and ISR generation in the current photoproduction framework require some further approximations for the PDFs, that are not universal and thus cannot be Table 1: Kinematical cuts used in the experimental analyses by H1 and ZEUS. An asterisk ( * ) indicates that the observable is evaluated in the photon-proton rest frame. x P, M Y, t are found in the rest frame of the hadronic system X, while the remaining are found in the laboratory frame. H1 2007 ZEUS 2008 determined for an arbitrary PDF set, only the hard-process generation is affected by a change of photon PDF. Thus the effect of a different photon PDF on the various observables is not fully addressed with the present framework. The hard-process generation should, however, provide the leading photon PDF dependence. We find only a minimal change to the final distributions when changing to either the SaS, GRV or GS-G provided with LHAPDF5. There are two reasons for the weak dependence on photon PDFs. Firstly, the cuts applied by the experimental analyses presented here forces x to be rather large, where the photon PDFs are relatively well constrained by the LEP data. Secondly, the no-MPI requirement rejects mainly events from the low-x region, where the differences between the mentioned photon PDFs are more pronounced. Two other analyses from HERA have also been used to check the current framework, giving results similar to the analyses presented here. For our baseline setup we show comparisons to both the H1 and ZEUS analyses, while for the more detailed variations we focus on comparisons to ZEUS. Baseline results In figs. 6 and 7 we show the results obtained with Pythia 8 along with the experimental measurements. We show two simulated samples, one based on dPDFs solely without the dynamic gap survival (the "PDF" sample, dashed lines), and one including the dynamic gap survival (the "MPI" sample, solid lines). The results show that the "PDF" sample is too large compared to data in all distributions except for x, thus showing evidence of factorization breaking. The "MPI" sample, however, seems to give a reasonably good description of data as the ratio of MC/data is smaller for the "MPI" sample than the "PDF" sample, thus hinting that it is the additional probability for multiparton interactions between the photon remnant and the proton that causes the factorization breaking. A 2 -test have been performed in order to quantify which of the models do better. Here, we have performed three different tests; using only either of the H1 or ZEUS datasets, or using both, table 2. It is evident that the "MPI" model including the gap survival effect does a better job than the "PDF" model without it, within our baseline setup. The calulcation of the 2 values include all differential cross sections provided by the experimental analyses, excluding the additional x obs binned distributions in ZEUS analysis to avoid counting the same data twice. Error correlations are not provided and so not considered. In general, most distributions are well described by the model including dynamical gap survival. The invariant mass distributions for the photon-Pomeron system (M X ) and for the photon-proton system (W ) in figs. 6 and 7 (a) are both sensitive to the form of the photon flux from leptons. Both data sets are well compatible with the MPI samples, indicating that the standard Weizscker-Williams formula provide a good description of the flux. It is, however, evident that in some observables the shape of the data is poorly described. Examples are z obs P and x obs, figs. 6, 7 (b, c). The former is sensitive to the dPDFs used in the event generation. The baseline samples use the LO H1 Fit B flux and dPDF, fitted to data that is mainly sensitive to quarks. As the Pomeron is assumed to be primarily of gluonic content, it is expected that the vast majority of the dijets arise from gluon-induced processes. Thus a poorly-constrained gluon dPDF is expected to give discrepancies with distributions sensitive to this parameter, such as z P. In both the H1 and ZEUS analyses z obs P is overestimated in the low end, while being underestimated in the high-z obs P end. If the measured jets are dominantly gluon-induced, then it is expected that changing from the H1 LO Fit B dPDF to the ZEUS SJ fit should improve on the z obs P -distribution, as the low-z obs P gluons are suppressed in this dPDF. The latter observable, x obs, is similarly underestimated in the low end and overestimated in the high end. The tight cut on x P together with the requirement of high-E ⊥ jets reduces the contribution from lower values of x obs. This suppresses the resolved contribution and therefore increases the relative contribution from direct processes, which typically are close to x obs = 1. The additional no-MPI requirement further suppresses the already low resolved contribution, and we end up with not being able to describe the shape of x obs. As already discussed, the discrepancy cannot be explained with the uncertainties in the photon PDFs, as the sensitivity to different PDF analyses was found to be very low. The issue seems to be a problem with the relative normalizations of the direct and resolved contributions. This is evident from Fig. 8, where the ZEUS analysis conveniently splits the data into two regions, a direct-and a resolved-enhanced region with the division at x obs = 0.75. Here, the model underestimates the resolved-enriched part of the cross section and overestimates the direct-enriched part, confirming what we already observed in figs. 6, 7 (c). Future measurements could shed more light on this issue, especially experimental setups in which the events passing the kinematical cuts would not be dominated by the direct contribution. In the experimental analyses considered here, a similar observation was made when comparing to a NLO calculation: the shape of x obs was well described by the NLO calculation (corresponding to our PDF selection) in the direct-enhanced region, but applying a constant suppression factor for the resolved contribution undershot the data at x obs < 0.75, similar to what we observe. It is worth pointing out that both poorly-described distributions, x obs and z obs P, are constructed from the jet kinematics. Therefore further studies on jet reconstruction and their distributions could offer some insights for the observed discrepancies. The jet variable E ⊥ can be used to check if the amount of activity within the diffractive system is properly described. As this system contains a Pomeron, it might very well be that the MPI parameters here could be different from the MPI parameters in the p-system. It seems that using the same parameters for the P system as for p slightly overestimates the high-E ⊥ tail. This indicates that there might be too much MPI activity in the events, thus requiring a slightly larger p ref ⊥0 value in the diffractive system than in the p system. The argument for a different p ref ⊥0 -value for p as compared to pp can also be applied here: if the Pomeron has a smaller size than the proton, then the p ref ⊥0 -value can be increased. Having too much MPI activity in the P-system may also push the x obs distribution towards higher values, as the E ⊥ of the jets may increase due to the underlying event. A full discussion of the MPI parameters in the diffractive system in pp collisions has been provided in, but have not been pursued further here. Scale variations To probe the uncertainties in the choice of renormalization and factorization scales, R and F, we employ the usual method of varying the scales up and down with a factor of two. Each is probed individually, such that one scale is kept fixed while the other is varied. Only the scales at matrix-element level are varied; thus the shower and MPI scales have been excluded from these variations. Each variation gives rise to an uncertainty band, and in Fig. 9 we show the envelope using the maximal value obtained from either of the two uncertainty bands. The envelope is dominated by the renormalization scale, giving the largest uncertainty in most of the figures shown -not unusual in a LO calculation. Note, however, that the scale uncertainty in the highx obs bin actually reaches the upper error of the data point, essentially hinting that the model is able to describe the direct-enhanced region within theoretical uncertainties. The resolved region, however, cannot be fully accounted for within these theoretical uncertainties. Variations of the dPDFs As explained above, the considered observables are sensitive to the dPDFs, especially the fractional momentum carried by the parton from the Pomeron, z obs P. We here investigate if the increased amount of diffractive DIS data in the GKG LO dPDFs will provide a better description of the data than the less constrained H1 LO Fit B dPDF. We also show results obtained when using the NLO dPDF and flux from ZEUS SJ, as this dPDF includes data on diffractive dijets that is directly sensitive to the gluon distributions. Note, however, that a combination of NLO PDFs and LO matrix elements is still only accurate to LO and mixing different orders may result in different results compared to a situation where the matrix elements and PDF determination are consistently at the same perturbative order. In Fig. 10 we show results using two of the new dPDFs, ZEUS NLO SJ and GKG LO Fit A without the gap suppression factor. At first glance, the new dPDFs improve the overall description of data without a further need for suppression. Overall the new dPDFs seem to suppress the distributions as compared to H1 Fit B LO dPDF, with the ZEUS SJ dPDF performing slightly better than GKG LO Fit A as seen e.g. in the z obs P distribution. Here, the ZEUS SJ dPDF flattens out at high z obs P as compared to the GKG and H1 dPDFs, having a slightly larger x g -distribution in this regime. The distributions that the baseline study did not fully describe, also the new dPDFs fail to describe. Especially the x obs distribution is still underestimated at x obs < 0.75, which underlines the discrepancies with the relative normalization between the direct and resolved contributions. The E jet 1 ⊥ distribution is now well described with the GKG set. With the ZEUS SJ set the normalization is improved compared to the H1 Fit B but the shape of the distribution is similarly off. A separation of M X into the two regimes, Fig. 11, shows that the direct-enhanced region is well described with the ZEUS SJ dPDFs. The GKG set improves the normalization but the shape of the distribution is still not compatible. The resolved region, however, is too suppressed with both of these, so the relative normalizations of the two contributions remain as an unresolved issue. Adding the gap suppression factor on top of this, Fig. 12, further suppresses the already suppressed resolved-enhanced region, worsening the agreement with the data in this regime. Little effect is seen in the direct-enhanced region, as expected. These results thus puts forth the question whether the gap suppression is necessary if the dPDFs are refined and improved with additional diffractive data. The improvements seen especially with the ZEUS SJ dPDF in both the x obs and z obs P distributions might hint towards this. As discussed earlier, this might partly follow from the tight cuts applied in the ZEUS analysis which does not leave much room for MPIs in the p system. Also, one should keep in mind that using NLO dPDFs with LO matrix elements might lead to different results compared to a full NLO calculation. Variations of the screening parameter The gap suppression method used here is highly sensitive to the model parameters of the MPI framework. Here we especially look at the screening parameter, p ref parameter. A smaller value of p ref ⊥0 results in more MPIs, thus we expect that the gap suppression will be larger if we decrease p ref ⊥0 to its pp value, as a smaller fraction of the events will survive the MPI-selection. This effect is exactly what is seen in Fig. 13. The "PDF" samples are not affected, but the pp-tuned p ref ⊥0 value in red causes a stronger suppression, best seen in the ratio plots where the solid red curves, the "MPI" sample with p ref ⊥0 = 2.28 GeV, is lower than the solid blue curves with p ref ⊥0 = 3.00 GeV. The value of p ref ⊥0 has some effect on the shape of the distributions, mainly because a higher M X allows for more MPI activity, and thus a smaller fraction of events survive the no-MPI requirement. This means that the gap suppression increases with increasing energy available in the system, i.e. with increasing M X, seen in Fig. 13 (a), where ratio-plot shows a suppression factor of approximately 0.9 in the low M X bin and 0.6 in the high M X bin. Gap suppression factors Several models have been proposed to explain the factorization breaking in diffractive hadronic collisions. Many of these employ an overall suppression factor, often relying primarily on the impact-parameter of the collision, see e.g.. Some also include a suppression w.r.t. a kinematical variable, such as the p ⊥ of the diffractive dijets. But to our knowledge, the model of dynamical gap survival is the first of its kind to evaluate the gap survival on an event-by-event basis. This means it takes into account the kinematics of the entire event, and is thus also able to provide a gap suppression factor differential in any observable. In the model presented here, the ratio of "PDF" to "MPI" samples equates the gap survival factor, as the two samples only differ by the no-MPI requirement that determines the models definition of a fully diffractive event. The theoretical uncertainties not directly related to MPI probability (e.g. scale variations) are expected to cancel in such a ratio. Even though many experimental analysis present similar ratios by using a NLO calculation as a baseline, such a ratio is not a measurable quantity, as it always require a theory-based estimation for the unsuppressed result. These ratios, however, are useful for demonstrating the effects arising from different models such as our dynamical rapidity gap survival. In order to estimate the factorisation-breaking effect in data w.r.t. our model, we show also the ratio between the data and the "PDF" sample. In Fig. 14 we show the gap suppression differential in the observables M X and E jet 1 ⊥ from the ZEUS analysis and in Fig. 15 we show the gap suppression differential in the observables W and E * jet 1 ⊥ from the H1 analysis. These distributions demonstrate some of the main features of our dynamical rapidity gap survival model. We show the ratio of data to "PDF" sample (black dots) and the ratio of "MPI" to "PDF" sample (solid blue curve). This latter ratio is exactly the gap suppression factor predicted by the model. The shapes of the gap suppression factors agree reasonably well with the suppression factors derived from the data (the black dots), albeit the shape of Fig. 14 (b) is off in the high-E ⊥ end, as already mentioned in the baseline results. The model predicts a slowly decreasing suppression in E ( * ) jet 1 ⊥, while the suppression increases towards larger M X and W. This increase follows as the larger diffractive masses are correlated with larger invariant masses of the p-system, where there is more room for MPIs at fixed jet E ⊥. This results in a larger fraction of the events having additional MPIs, thus a smaller fraction of the events survive as diffractive. Similarly, high-E ⊥ jets takes away more momentum than low-E ⊥ jets, again leaving less room for MPIs to take place. Thus we do not predict a flat overall suppression, as has often been applied in the experimental analyses. Suppression factors in the range 0.7 − 0.9 are predicted in the shown observables. Given the uncertainty on the "PDF" sample, this is in agreement with the suppression factors of approximately 0.5 − 0.9, as observed by H1 and ZEUS. A somewhat contradictory result was observed in ref., in which the ZEUS dijet data from ref. was found consistent with the purely factorization-based NLO calculation when using the ZEUS SJ dPDFs. The experimental cuts applied in the ZEUS analysis, as compared to the analysis from H1, forces x obs to very large values, where the suppression from the MPIs does not have a large effect. Thus the ZEUS measurement requires less suppression than what is needed in the H1 measurement. The shown distributions, however, are still marred by the large theoretical uncertainties. One way to reduce these theoretical uncertainties would be to consider the ratio of photoproduced dijets to ones from DIS, as done e.g. in the recent H1 analysis. The kinematic domain Photoproduction in ultra-peripheral collisions Because of the more than an order of magnitude larger √ s at the LHC, the accessible invariant masses of the p system are much larger than what could be studied at HERA. This allows us to study the factorization-breaking effects in hard-diffractive photoproduction in a previously unexplored kinematical region. Such measurements would fill the gap between the rather mild suppression observed at HERA and the striking effect observed in pp and pp collisions at Tevatron and the LHC. This would provide important constraints for different models and thus valuable information about the underlying physics. Besides the results we present here, predictions for these processes have been computed in a framework based on a factorized NLO perturbative QCD calculation with two methods of gap survival probabilities, one with an overall suppression and one where the suppression is only present for resolved photons. The authors here expect that the two scenarios can be distinguished at LHC, especially in the x obs -distribution. The model presented in this work should thus be comparable to the latter suppression scheme from. Another work considering similar processes is presented in Ref.. In principle these measurements could be done in all kinds of hadronic and nuclear collisions, since all fast-moving charged particles generate a flux of photons. There are, however, some differences worth covering. In pp collisions, the photons can be provided by either of the beam particles with an equal probability. The flux of photons is a bit softer for protons than with leptons, but still clearly harder than with nuclei. Experimentally it might be difficult to distinguish the photon-induced diffraction and "regular" double diffraction in pp, since both processes would leave a similar signature with rapidity gaps on both sides. In pPb collisions the heavy nucleus is the dominant source of photons, as the flux is amplified by the squared charge of the emitting nucleus, Z 2. Thus the photon-induced diffraction should overwhelm the QCD-originating colorless exchanges (Pomerons and Reggeons). Similarly, in PbPb collisions the photon fluxes are large and thus would overwhelm the Regge exchanges. The latter type is currently not possible to model with Pythia 8, however, as in addition to regular MPIs, one should also take into account the further interactions between the resolved photon and the other nucleons, that could destroy the rapidity gap. Since these are currently not implemented in the photoproduction framework, we leave the PbPb case for a future study. pPb collisions The setup for the photoproduction in pPb collisions is the same as our default setup for ep, albeit the photon flux is now provided by eq.. We here neglect the contribution where the proton would provide the photon flux, such that all photons arise from the nucleus. The jets are reconstructed with an anti-k T algorithm using R = 1.0 as implemented in FastJet package. The applied cuts are presented in table 3 and are very similar to the ones used by HERA analyses. The experimentally reachable lower cut on E ⊥ is not set in stone, however. This depends on how well the jets can be reconstructed in this process. On one hand, the underlying event activity is greatly reduced in UPCs as compared to pp collisions, thus possibly allowing for a decrease of the reachable jet E ⊥. On the other hand, the increased W might require an increase of the minimum E ⊥ w.r.t. the HERA analyses. Feasibility of such a measurement has been recently demonstrated in a preliminary ATLAS study which measured inclusive dijets in ultra-peripheral PbPb collisions at the LHC. The resulting differential cross sections for diffractive dijets from UPCs in pPb collisions are presented in Fig. 16. Similar to sec. 4 we show the results differential in W, M X, x obs and z obs P. The "PDF" samples (dashed lines) are without the gap suppression and "MPI" samples (solid lines) are with the gap suppression. The lower panels show the ratio of the two, corresponding to the rapidity gap suppression factor predicted by the model. As discussed earlier, the energy dependence of the p ⊥0 screening parameter in p collisions was constrained by HERA data in a narrow W bin around 200 GeV. As the UPC events at the LHC will extend to much higher values of W, the poorly-constrained energy dependence of p ⊥0 will generate some theoretical uncertainty for the predictions. To get a handle on this uncertainty we show samples with both the pp-tuned (red lines) and ep-tuned (blue lines) values for p ⊥0. The predicted gap suppression factor is rather flat as a function of z obs P at around ∼ 0.7. The suppression factor is, however, strongly dependent on W and M X, also observed in the HERA comparisons. It is more pronounced at the LHC thanks to the extended range in W, with an average suppression being roughly two times larger than at HERA. A similar strong dependence is also seen in x obs. As concluded earlier, the increasing suppression with W follows from the fact that the probability for MPIs is increased with a higher W, due to the increased cross sections for the QCD processes. Thus a larger number of tentatively diffractive events are rejected due to the additional MPIs. Similarly, decreasing x obs will leave more room for the MPIs to take place, since the momentum extracted from the photon to the primary jet production is decreased. A reduction of the p ref ⊥0 -value from 3.00 GeV to 2.28 GeV increases the MPI probability, thus having a twofold effect. Firstly, it increases the jet cross section in the "PDF"-sample, as the additional MPIs allowed with the lower reference value increase the energy inside the jet cone. Secondly, the enhanced MPI probability rejects a larger number of tentatively diffractive events, thus giving a larger gap suppression effect. Collectively, these effects lead to 20 − 30 % larger gap-suppression factors as compared to the p value for p ref ⊥0. pp collisions The kinematical cuts applied in pp equals those from pPb. Due to the increased √ s and the harder photon spectrum from protons compared to heavy ions, the W range probed is extended to even larger values. When keeping jet kinematics fixed this leaves more room for MPIs in the psystem, while also increasing the relative contribution from resolved photons. Thus the predicted gap-suppression factors are further increased here, as compared to pPb and ep case, cf. Fig. 17. At extreme kinematics -high-M X, low-x obs -the gap-suppression factors are almost as large as what have been found in hadronic diffractive pp events. The pp suppression factors should provide an estimate of the upper limit for photoproduction, as the latter includes the (unsuppressed) direct contribution. The suppression factors show a similar sensitivity to the value of p ref ⊥0 as in pPb collisions, such that the lower value gives more suppression. Notice that the cross sections are calculated assuming that the photon is emitted from the beam with positive p z. A particularly interesting observable is the x obs distribution. Due to the extended W reach, the dijet production starts to be sensitive also to the low-x part of the photon PDFs. Here, the photon PDF analyses find that gluon distributions rise rapidly with decreasing x, the same tendency as seen in proton PDFs. This generates the observed rise of the cross section towards low values of x obs when the MPI rejection is not applied. However, the contribution from the low-x obs region is significantly reduced when the rejection is applied, as these events have a high probability for MPIs. Note, however, that there are large differences in the gluon distributions between different photon PDF analyses in this region. Thus here a variation of the photon PDF in the hard scattering could have some effect on the predicted gap-suppression factor, even though only very mild impact was seen in the HERA comparisons. But as most of these events with a soft gluon in the hard scattering will be rejected due to the presence of additional MPIs, the predicted cross-sections shown in Fig. 17 is expected to be rather stable against such variations. Further uncertainty again arises from the dPDFs. But as the purpose of the shown UPC results is to demonstrate the gap survival effects, we do not discuss the sensitivity to dPDF variation here explicitly. Conclusions In this paper we present a model for explaining the factorization-breaking effects seen in photoproduction events at HERA. The model, implemented in the general purpose event generator Pythia 8, is an extension of the hard diffraction model to photoproduction. It is a novel combination of several existing ideas, and it is the first model of its kind with a dynamical gap suppression based on the kinematics of the entire event. The starting point is the Ingelman-Schlein approach, where the cross section is factorized into a Pomeron flux and a PDF, convoluted with the hard scattering cross section. The Pomeron flux and PDF are extracted from HERA data, but if used out-of-the-box these give an order-of-magnitude larger cross sections in pure hadron-hadron collisions, while the differences in photoproduction are around a factor of two at most. Thus, factorization was observed to be broken in diffractive events with a hard scale. The dynamical model extended here, explain this factorization breaking with additional MPI activity filling the rapidity gap used for experimental detection of the diffractive events. Thus the MPI framework of Pythia 8 is used as an additional suppression factor on an event-by-event basis, giving cross sections very similar to what is seen in data, both in pp and pp events. As low virtuality photons are allowed to fluctuate into a hadronic state, MPIs are also possible in these systems. Thus the same mechanism is responsible for the factorization breaking in photoproduction events in ep collisions, and also here the model predicts cross sections similar to what is seen in ep data. We present results obtained with the model compared to experimental data from H1 and ZEUS for diffractive dijet photoproduction. The agreement with the data is improved when the MPI rejection is applied, supporting the idea behind the factorization-breaking mechanism. However, the kinematical cuts applied by the experiments reduce the contribution from resolved photons, so the observed suppression is rather mild with the HERA kinematics, especially for the ZEUS data. The improvements in the dPDFs raises the question if such a suppression is actually needed, as the new dPDFs seem to describe data fairly well without, especially in the direct-enhanced region of phase space. Furthermore, there are several theoretical uncertainties that hamper the interpretation of the data, and the description is far from perfect for all considered distributions. Many of these theoretical uncertainties could be reduced by considering ratios of diffractive dijets in DIS and photoproduction regimes, but have not been pursued here as the description for DIS in Pythia 8 is not yet complete. As an additional example for the range of the model, we present predictions for diffractive dijets in ultraperipheral pp and pPb collisions at the LHC. In these processes a quasi-real photon emitted from a proton or nucleus interacts with a proton from the other beam. Due to the larger invariant masses of the p system in these processes, the contribution from resolved photons is significantly increased. Thus UPCs is an excellent place to study the gap suppression in photoproduction. The results demonstrate that a measurement of photoproduced diffractive dijet cross sections in pp collisions would provide very strong constraints on our dynamical rapidity gap survival model, as the effects are much more pronounced than with HERA kinematics. The distinct features of the model are well accessible within the kinematical limits for UPCs at LHC. If such a measurement is not feasible due to the pure QCD background, a measurement in pPb collisions would be sufficient to confirm the factorization breaking in diffractive photoproduction and provide constraints on the underlying mechanism. Future work consists of opening up for different photon PDFs in the photoproduction framework, improving the DIS description in Pythia 8 and merging the two regimes in a consistent manner. The first allows for probing additional theoretical uncertainties of the photoproduction framework, the second allows for probing the double ratios of photoproduction to DIS cross sections for diffractive dijets. The merging of the two regimes would allow for full event generation of all photon virtualities needed for future collider studies. Similarly a combination of the current model and the Angantyr model for heavy ions is planned, such that eA and ultraperipheral UPCs in AA collisions could be probed as well. This project has received funding from the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation program (grant agreement No 668679), and in part by the Swedish Research Council, contract number 2016-05996 and the Marie Sklodowska-Curie Innovative Training Network MCnetITN3 (grant agreement 722104). Further support is provided by the Carl Zeiss Foundation and the Academy of Finland, Project 308301.
<filename>src/main/java/io/github/dnloop/inventorycom/repository/MeasureRepository.java package io.github.dnloop.inventorycom.repository; import io.github.dnloop.inventorycom.model.Measure; import org.springframework.data.domain.Page; import org.springframework.data.domain.Pageable; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.data.jpa.repository.Modifying; import org.springframework.data.jpa.repository.Query; import java.util.Optional; public interface MeasureRepository extends JpaRepository<Measure, Integer> { @Query("SELECT measure FROM Measure measure" + " WHERE measure.deleted = 0") Page<Measure> findAll(Pageable pageable); @Query("SELECT measure FROM Measure measure" + " WHERE measure.id = :id" + " AND measure.deleted = 0") Optional<Measure> findById(int id); @Query("SELECT measure FROM Measure measure" + " WHERE measure.deleted = 1" + " ORDER BY measure.deletedAt") Page<Measure> findAllDeleted(Pageable pageable); @Query("SELECT measure FROM Measure measure" + " WHERE measure.id = :id" + " AND measure.deleted = 1") Optional<Measure> findDeleted(int id); /** * Returns the number of assigned measures to a Product Detail. */ @Query("SELECT COUNT (productDetail.id)" + " FROM ProductDetail productDetail" + " WHERE productDetail.measureId = :measureId" + " AND productDetail.deleted = 0") Integer existsInProductDetail(int measureId); /** * Method to delete a single measure. * <p> * By using this method we ensure Product Detail is not in an invalid state. If * a record is 'deleted' we assign a default 'unavailable' (00) value. */ @Modifying @Query("UPDATE Measure measure" + " SET measure.deleted = 1" + " WHERE measure.id = :measureId") void delete(int measureId); }
Effects of ageing and smoking on pulmonary computed tomography scans using parametric response mapping Chronic obstructive pulmonary disease (COPD) is an obstructive lung disease often caused by cigarette smoke, and characterised by inflammation and abnormalities of the large and small airways (i.e. those with an internal diameter <2mm), as well as by alveolar destruction (emphysema). Recent evidence suggests that small airway disease precedes emphysema and, therefore, it may be useful to identify the presence and extent of small airway disease and emphysema in early COPD, or preferably, even before the onset of disease. Parametric response mapping can distinguish small airway disease, emphysema and parenchymal disease on pulmonary CT http://ow.ly/Neuw6
There's no need to fear arsenic poisoning if you grew up in the Argentinian Andes -- hundreds of years of drinking arsenic-laced groundwater will have left you with a genetic tolerance for it. Geneticists from Lund and Uppsala universities had noticed that certain plants and bacteria could live in environments with lots of arsenic, with natural selection favouring a gene known to improve their ability to metabolise the poison. Curious to see if humans could also gain some kind of arsenic immunity, they looked at a group of people who they knew would have been exposed to the poison over many generations -- the indigenous peoples of the Argentinian part of the Andes. Sure enough, a higher-than average proportion of people they studied possessed the AS3MT gene, which lets them flush out toxins faster than "normal" people. The genetic samples tested for the AS3MT gene came from 346 residents of the small, isolated town of San Antonio de los Cobres, located more than 3,700m above sea level in the Andes. Not only does the bedrock in the surrounding area contain a lot of arsenic which gets into the groundwater, but mining operations from the era of Spanish colonisation onwards have released even more arsenic -- so both modern people and mummies dating back 7,000 years have had high levels of arsenic found in their hair and internal organs. AS3MT". Their research has been printed in the journal Environmental Health Perspectives. Since arsenic pollution is a potentially dangerous side effect of many heavy manufacturing and mining industries, a better understanding of how people can be protected from the effects of arsenic and other poisons could help save lives. While the AS3MT gene is rare among most populations, most people can nevertheless develop a similar kind of tolerance to arsenic if they take tiny doses of it for many years. Famously, Mithridates the Great of Pontus was said to have been terrified that someone would try to assassinate him with arsenic, and took small doses with his meals to build up a tolerance. When, in 63BCE, the Roman Republic finally beat back his armies and closed in on his palace, the story goes that he tried to poison himself to avoid capture -- but his immunity stopped the poison working. So he had a servant kill him with a sword instead.
<reponame>easyopsapis/easyops-api-go<filename>protorepo-user_service/mongo/search_one.pb.go<gh_stars>1-10 // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: search_one.proto package mongo import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" types "github.com/gogo/protobuf/types" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // //SearchOne请求 type SearchOneRequest struct { // //表名 Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection" form:"collection"` // //查询条件 Query *types.Struct `protobuf:"bytes,2,opt,name=query,proto3" json:"query" form:"query"` // //fields Fields []string `protobuf:"bytes,3,rep,name=fields,proto3" json:"fields" form:"fields"` // //sort Sort []string `protobuf:"bytes,4,rep,name=sort,proto3" json:"sort" form:"sort"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SearchOneRequest) Reset() { *m = SearchOneRequest{} } func (m *SearchOneRequest) String() string { return proto.CompactTextString(m) } func (*SearchOneRequest) ProtoMessage() {} func (*SearchOneRequest) Descriptor() ([]byte, []int) { return fileDescriptor_87c00e5ad1953228, []int{0} } func (m *SearchOneRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SearchOneRequest.Unmarshal(m, b) } func (m *SearchOneRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SearchOneRequest.Marshal(b, m, deterministic) } func (m *SearchOneRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SearchOneRequest.Merge(m, src) } func (m *SearchOneRequest) XXX_Size() int { return xxx_messageInfo_SearchOneRequest.Size(m) } func (m *SearchOneRequest) XXX_DiscardUnknown() { xxx_messageInfo_SearchOneRequest.DiscardUnknown(m) } var xxx_messageInfo_SearchOneRequest proto.InternalMessageInfo func (m *SearchOneRequest) GetCollection() string { if m != nil { return m.Collection } return "" } func (m *SearchOneRequest) GetQuery() *types.Struct { if m != nil { return m.Query } return nil } func (m *SearchOneRequest) GetFields() []string { if m != nil { return m.Fields } return nil } func (m *SearchOneRequest) GetSort() []string { if m != nil { return m.Sort } return nil } // //SearchOne返回 type SearchOneResponse struct { // //记录 Doc *types.Struct `protobuf:"bytes,1,opt,name=doc,proto3" json:"doc" form:"doc"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SearchOneResponse) Reset() { *m = SearchOneResponse{} } func (m *SearchOneResponse) String() string { return proto.CompactTextString(m) } func (*SearchOneResponse) ProtoMessage() {} func (*SearchOneResponse) Descriptor() ([]byte, []int) { return fileDescriptor_87c00e5ad1953228, []int{1} } func (m *SearchOneResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SearchOneResponse.Unmarshal(m, b) } func (m *SearchOneResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SearchOneResponse.Marshal(b, m, deterministic) } func (m *SearchOneResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SearchOneResponse.Merge(m, src) } func (m *SearchOneResponse) XXX_Size() int { return xxx_messageInfo_SearchOneResponse.Size(m) } func (m *SearchOneResponse) XXX_DiscardUnknown() { xxx_messageInfo_SearchOneResponse.DiscardUnknown(m) } var xxx_messageInfo_SearchOneResponse proto.InternalMessageInfo func (m *SearchOneResponse) GetDoc() *types.Struct { if m != nil { return m.Doc } return nil } // //SearchOneApi返回 type SearchOneResponseWrapper struct { // //返回码 Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code" form:"code"` // //返回码解释 CodeExplain string `protobuf:"bytes,2,opt,name=codeExplain,proto3" json:"codeExplain" form:"codeExplain"` // //错误详情 Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error" form:"error"` // //返回数据 Data *SearchOneResponse `protobuf:"bytes,4,opt,name=data,proto3" json:"data" form:"data"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SearchOneResponseWrapper) Reset() { *m = SearchOneResponseWrapper{} } func (m *SearchOneResponseWrapper) String() string { return proto.CompactTextString(m) } func (*SearchOneResponseWrapper) ProtoMessage() {} func (*SearchOneResponseWrapper) Descriptor() ([]byte, []int) { return fileDescriptor_87c00e5ad1953228, []int{2} } func (m *SearchOneResponseWrapper) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SearchOneResponseWrapper.Unmarshal(m, b) } func (m *SearchOneResponseWrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SearchOneResponseWrapper.Marshal(b, m, deterministic) } func (m *SearchOneResponseWrapper) XXX_Merge(src proto.Message) { xxx_messageInfo_SearchOneResponseWrapper.Merge(m, src) } func (m *SearchOneResponseWrapper) XXX_Size() int { return xxx_messageInfo_SearchOneResponseWrapper.Size(m) } func (m *SearchOneResponseWrapper) XXX_DiscardUnknown() { xxx_messageInfo_SearchOneResponseWrapper.DiscardUnknown(m) } var xxx_messageInfo_SearchOneResponseWrapper proto.InternalMessageInfo func (m *SearchOneResponseWrapper) GetCode() int32 { if m != nil { return m.Code } return 0 } func (m *SearchOneResponseWrapper) GetCodeExplain() string { if m != nil { return m.CodeExplain } return "" } func (m *SearchOneResponseWrapper) GetError() string { if m != nil { return m.Error } return "" } func (m *SearchOneResponseWrapper) GetData() *SearchOneResponse { if m != nil { return m.Data } return nil } func init() { proto.RegisterType((*SearchOneRequest)(nil), "mongo.SearchOneRequest") proto.RegisterType((*SearchOneResponse)(nil), "mongo.SearchOneResponse") proto.RegisterType((*SearchOneResponseWrapper)(nil), "mongo.SearchOneResponseWrapper") } func init() { proto.RegisterFile("search_one.proto", fileDescriptor_87c00e5ad1953228) } var fileDescriptor_87c00e5ad1953228 = []byte{ // 379 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30, 0x10, 0xc6, 0xf1, 0xda, 0x0e, 0x44, 0xd9, 0x3f, 0x89, 0x60, 0x77, 0x45, 0x58, 0x70, 0xd0, 0x42, 0x49, 0x0f, 0x75, 0xa0, 0xa5, 0xd0, 0x16, 0x4a, 0xc1, 0xd0, 0x6b, 0x0b, 0xca, 0xa1, 0xc7, 0xe2, 0xc8, 0x8a, 0x63, 0x70, 0x3c, 0x8e, 0x2c, 0x43, 0xfb, 0xaa, 0x3d, 0x98, 0x3e, 0x83, 0x9f, 0xa0, 0x78, 0x94, 0x12, 0x43, 0xa0, 0xa7, 0x44, 0xf3, 0xfd, 0xbe, 0x99, 0x6f, 0x06, 0x93, 0x71, 0xa5, 0x62, 0x2d, 0x37, 0xcf, 0x50, 0xa8, 0xb0, 0xd4, 0x60, 0x80, 0xfa, 0x5b, 0x28, 0x52, 0x98, 0x9e, 0xa5, 0x99, 0xd9, 0xd4, 0xab, 0x50, 0xc2, 0x76, 0x91, 0x42, 0x0a, 0x0b, 0x54, 0x57, 0xf5, 0x1a, 0x5f, 0xf8, 0xc0, 0x7f, 0xd6, 0x35, 0xfd, 0x97, 0x02, 0xa4, 0xb9, 0x3a, 0x50, 0x95, 0xd1, 0xb5, 0x34, 0x56, 0xe5, 0x6f, 0x0e, 0x19, 0x2f, 0x71, 0xd0, 0x63, 0xa1, 0x84, 0xda, 0xd5, 0xaa, 0x32, 0xf4, 0x92, 0x10, 0x09, 0x79, 0xae, 0xa4, 0xc9, 0xa0, 0x60, 0xce, 0xcc, 0x99, 0x0f, 0xa3, 0xdf, 0x6d, 0x13, 0x4c, 0xd6, 0xa0, 0xb7, 0x37, 0xfc, 0xa0, 0x71, 0xd1, 0x03, 0xe9, 0x1d, 0xf1, 0x77, 0xb5, 0xd2, 0xaf, 0xec, 0xdb, 0xcc, 0x99, 0x8f, 0xce, 0xff, 0x86, 0x76, 0x72, 0xf8, 0x39, 0x39, 0x5c, 0xe2, 0xe4, 0x68, 0xdc, 0x36, 0xc1, 0x77, 0xdb, 0x0a, 0x79, 0x2e, 0xac, 0x8f, 0x9e, 0x92, 0xc1, 0x3a, 0x53, 0x79, 0x52, 0x31, 0x77, 0xe6, 0xce, 0x87, 0xd1, 0xa4, 0x6d, 0x82, 0x1f, 0x16, 0xb4, 0x75, 0x2e, 0xf6, 0x00, 0xfd, 0x4f, 0xbc, 0x0a, 0xb4, 0x61, 0x1e, 0x82, 0xbf, 0xda, 0x26, 0x18, 0x59, 0xb0, 0xab, 0x72, 0x81, 0x22, 0x7f, 0x20, 0x93, 0xde, 0x6e, 0x55, 0x09, 0x45, 0xa5, 0xe8, 0x35, 0x71, 0x13, 0x90, 0xb8, 0xd5, 0x17, 0x19, 0x7f, 0xb6, 0x4d, 0x40, 0x6c, 0xc7, 0x04, 0x24, 0x17, 0x9d, 0x87, 0xbf, 0x3b, 0x84, 0x1d, 0x35, 0x7c, 0xd2, 0x71, 0x59, 0x2a, 0xdd, 0x25, 0x92, 0x90, 0x28, 0x6c, 0xec, 0xf7, 0x13, 0x75, 0x55, 0x2e, 0x50, 0xa4, 0x57, 0x64, 0xd4, 0xfd, 0xde, 0xbf, 0x94, 0x79, 0x9c, 0x15, 0x78, 0xa8, 0x61, 0xf4, 0xa7, 0x6d, 0x02, 0x7a, 0x60, 0xf7, 0x22, 0x17, 0x7d, 0x94, 0x9e, 0x10, 0x5f, 0x69, 0x0d, 0x9a, 0xb9, 0xe8, 0xe9, 0xdd, 0x10, 0xcb, 0x5c, 0x58, 0x99, 0xde, 0x12, 0x2f, 0x89, 0x4d, 0xcc, 0x3c, 0xdc, 0x8f, 0x85, 0xf8, 0xcd, 0x84, 0x47, 0xa9, 0xfb, 0x01, 0x3b, 0x9e, 0x0b, 0xb4, 0xad, 0x06, 0x78, 0x88, 0x8b, 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x50, 0xb5, 0x7b, 0x7e, 0x02, 0x00, 0x00, }
By John W. Sewell John W. Sewell is president of the Overseas Development Council. AFRICA presents the greatest development challenge of this decade. The immediate problem is famine. Millions of Africans will go hungry in 1984 because of what may be the most severe drought of this century. In response, the international donor community is mobilizing additional aid for the region. The Reagan administration has requested, and received, $90 million for emergency food relief. Congress wants to allocate an additional $60 million. This will save some people from immediate starvation, but the real problem is chronic underdevelopment. The challenge is to strengthen the fragile African economies so they are able to deal with natural and external shocks in the future. African economic growth has lagged behind the rest of the developing world since independence, and this lagging development is costly in human terms. The average African has an income of $41 a month, and if current trends continue he or she can expect to earn less next year. Roughly 1 in 4 Africans is literate. The probability of a child's dying before the age of one is 10 times greater in Africa than in the United States. Africa's population is growing faster than food production. Endemic poverty means that when African economies are disrupted by natural or external events, there is little cushion to absorb the shock. Yet the region's long-run economic potential is good. The population of Africa is half again as large as that of the US - the intellectual and physical potential alone is enormous. Africa is also rich in natural resources; properly harnessed, the land, minerals, oil, gas, and renewable energy resources at the region's disposal are substantial tools for growth. To use these resources effectively, however, African policymakers must overcome three sets of problems. The first obstacle is the harsh international environment. Two oil price shocks, global recession, and declining terms of trade have hit African economies hard. During recession, the demand of industrialized countries for African exports shrinks and prices decline. Declining export earnings make it increasingly difficult to import needed food and more expensive oil. Most countries are too poor to borrow commercially, and the decline in public revenue erodes the ability of governments to make investments needed for development. African countries also face severe environmental constraints. Vast areas are incapable of supporting the food and energy needs of a growing population because of frequent drought, the spread of desert, and a variety of plant, animal, and human diseases. African forests are disappearing as the need for fuel wood rises, and this contributes to erosion and poor soil quality. Without tools to increase food production per acre, African farmers struggle to increase the production by shortening fallow times and increasing the use of marginally productive land. The long-term result is soil degradation, food that is less nutritious, and still lower yields per acre. Finally, African leaders have created their own problems. Unwise domestic policies rely too strongly on government intervention, misallocate resources, and hamper private-sector activity. In many African countries the agricultural sector has been neglected, despite the fact that the bulk of the population lives in rural areas. Despite the problems, the World Bank has projected brighter prospects for African economic growth, if two conditions are met. First, African leaders already beleaguered by rising national expectations in the face of continued poverty must make some very tough policy reforms. Second, the donor community must offer far greater financial support. To encourage policy reform and support the policies in action, the Reagan administration has proposed a new bilateral aid program for African countries that adopt growth-oriented economic reforms. The thrust of the Economic Policy Initiative (EPI) is to increase incentives to private producers, particularly in the agricultural sector. The administration is to be commended for focusing on the right kinds of issues. Policy reforms that increase food and export crop production are obviously a vital ingredient for African development. Regrettably, the EPI fails to address two crucial issues: donor-recipient policy coordination and aid levels. The administration expects the World Bank to take the lead in strengthening donor coordination in order to provide broader support for African policy reform. Yet, in deciding to create a new bilateral program, while cutting US funds channeled through existing international institutions, such as IDA, the bank's soft-loan facility, the administration weakens the leadership role of the World Bank. It may also be overestimating the ability of the US to persuade African leaders to make politically and socially difficult policy decisions. The amount requested under EPI ($500 million over five years) is nowhere near the US share of what the World Bank estimates Africa needs to achieve a positive growth rate over the next decade. To be sure, the US alone cannot provide all the aid that Africa needs for development: An international effort is required. The US can, however, lead the way by increasing bilateral and multilateral assistance. It can also help by mobilizing its resources toward creative and innovative solutions to Africa's problems. The longstanding objective of US foreign policy is the alleviation of poverty and human suffering abroad. Americans can be proud of their quick response to the emergency food needs of Africa. A greater achievement would be if emergency relief were no longer needed. The EPI is a step in the right direction, but much more can and should be done. Africa asks itself: Where is the aid money?
# Generated by Django 3.0.4 on 2020-06-16 20:27 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('burners', '0002_auto_20200405_0051'), ] operations = [ migrations.CreateModel( name='CombustionAnalysisTest', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('stack_temperature', models.DecimalField(decimal_places=2, max_digits=5)), ('carbon_dioxide', models.DecimalField(decimal_places=2, max_digits=4)), ('efficiency', models.DecimalField(decimal_places=2, max_digits=4)), ('excess_air', models.DecimalField(decimal_places=2, max_digits=2)), ('oxigen', models.DecimalField(decimal_places=2, max_digits=4)), ('carbon_monoxide', models.DecimalField(decimal_places=2, max_digits=4)), ('burner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Burner', to='burners.Burner')), ], ), ]
Diversity, Composition, and Geographical Distribution of Microbial Communities in California Salt Marsh Sediments ABSTRACT The Pacific Estuarine Ecosystem Indicators Research Consortium seeks to develop bioindicators of toxicant-induced stress and bioavailability for wetland biota. Within this framework, the effects of environmental and pollutant variables on microbial communities were studied at different spatial scales over a 2-year period. Six salt marshes along the California coastline were characterized using phospholipid fatty acid (PLFA) analysis and terminal restriction fragment length polymorphism (TRFLP) analysis. Additionally, 27 metals, six currently used pesticides, total polychlorinated biphenyls and polycyclic aromatic hydrocarbons, chlordanes, nonachlors, dichlorodiphenyldichloroethane, and dichlorodiphenyldichloroethylene were analyzed. Sampling was performed over large (between salt marshes), medium (stations within a marsh), and small (different channel depths) spatial scales. Regression and ordination analysis suggested that the spatial variation in microbial communities exceeded the variation attributable to pollutants. PLFA analysis and TRFLP canonical correspondence analysis (CCA) explained 74 and 43% of the variation, respectively, and both methods attributed 34% of the variation to tidal cycles, marsh, year, and latitude. After accounting for spatial variation using partial CCA, we found that metals had a greater effect on microbial community composition than organic pollutants had. Organic carbon and nitrogen contents were positively correlated with PLFA biomass, whereas total metal concentrations were positively correlated with biomass and diversity. Higher concentrations of heavy metals were negatively correlated with branched PLFAs and positively correlated with methyl- and cyclo-substituted PLFAs. The strong relationships observed between pollutant concentrations and some of the microbial indicators indicated the potential for using microbial community analyses in assessments of the ecosystem health of salt marshes.
The intersection of risk assessment and neurobehavioral toxicity. Neurobehavioral toxicology is now established as a core discipline of the environmental health sciences. Despite its recognized scientific prowess, stemming from its deep roots in psychology and neuroscience and its acknowledged successes, it faces additional demands and challenges. The latter, in fact, are a product of its achievements because success at one level leads to new and higher expectations. Now the discipline is counted upon to provide more definitive and extensive risk assessments than in the past. These new demands are the basis for the appraisals presented in the SGOMSEC 11 workshop. They extend beyond what would be offered in a primer of methodology. Instead, these appraisals are framed as issues into which what are usually construed as methodologies have been embedded. After nearly three decades of research in many parts of the world, neurobehavioral toxicity is now acknowledged as a significant outcome of chemical exposure. In contrast to the view prevailing even in the recent past, many observers now concede that its health and economic costs may exceed even those of cancer, the prototype for risk assessment, by substantial amounts. This new perspective has been accompanied by a surge of efforts designed to promote effective test methods, to explore the responsible mechanisms, to design applicable risk assessment procedures, and to determine the consequent policy implications. The process of recognition did not proceed as smoothly as expected, given the resonant scientific foundations provided by the behavioral neurosciences. One of these, behavioral pharmacology, the discipline that emerged in the 1950s in response to the introduction of chemotherapy for psychological disorders, provided a readily adaptable technology for exploring adverse effects. Workplace exposure criteria, such as threshold limit values (TLVs), had long relied on behavioral criteria such as work efficiency and alertness to danger to infer hazard. Perhaps the problem lay in how easily misunderstandings can arise about the definition and measurement of behavior. Although the discipline has generated an abundant literature and established a robust scientific footing, translating such efforts into policy decisions remains perplexing, mainly because of the difficulties posed by how to express them in risk terms. The conventional prototype for risk assessment is cancer, but numerous dissimilarities between neurobehavioral toxicity and carcinogenesis render it a rather imperfect model. Because behavior is often cited as the integrated product of a highly complex system, with numerous modes of expression, it should come as no surprise that it may be altered in equally diverse ways by xenobiotic influences and that the significance of any but the most blatant behavioral change eludes simplistic measures and interpretation. After all, behavior is a dynamic and plastic phenomenon. It would be deceptive to compare it to functions that are much more rigid and deterministic such as those of the cardiovascular system. Scientists unaccustomed to phenomena as malleable as behavior sometimes find it difficult to grasp both its essential lawfulness and the degree to which, concurrently, it may undergo critical modifications without displaying any overt abnormalities. Some consider behavioral changes to be analogous to alterations in software which, by proper reprogramming, may be overcome without major difficulties. Others may claim that behavioral deficiencies attributed, for example, to elevated exposure to metals, are more likely the product of deficiencies in social conditions. Such claims tend to erode when confronted jointly by data from properly conducted animal research and from epidemiological studies that deliberately and carefully weigh and balance the influence of potentially confounding social variables. Several of the joint chapters and indIvidual papers review these issues. A brpad, permeating issue derives from one of the original aims of SGOM-SEC: to make its contributions pertinent to countries lacking advanced industrial economies and resources. Chemicals and chemical production facilities tend to be transferred to such countries without an accompanying transfer of the technology of toxicology and environmental health science. This discrepancy results in unsafe control practices, excessive exposure levels, and, ultimately, mass chemical disasters. SGOM-SEC 11 strove to confront this issue by describing a range of methods from the relatively simple to the rather complex and by illustrating the different contexts in which different methods are appropriate. But even in advanced industrial societies, policy analysts, regulators, and others with decision-making responsibilities are confronted with irksome questions about neurobehavioral toxicity. In that arena, the challenges range from how to determine whether the potential for neurotoxicity exists to how to translate such potential into policy. SGOMSEC 11 was also designed to learn from the history of neurobehavioral toxicology. It sometimes proved difficult to convince toxicologists from other specialties and policy makers that even substances already dispersed in the human environment require careful evaluation of their neurobehavioral toxicity, despite no cogent evidence of adverse effects at environmental levels. Once a substance is widely distributed in the communal, or even the industrial environment, barriers to its removal are riveted in place. Especially if the arguments for its control are based, not on immediate threats to life but on a less tangible behavioral metric, inertia exerts a potent force. The arguments for premarket testing for neurobehavioral toxicity flow from such experiences. The Choice of a Focus on Behavior The adjective neurobehavioral is commonly applied because the nervous system determines the contours of its ultimate product, behavior. Any measure of nervous system status or function incurs immense complexities. Behavior's credentials as a valid toxicity index are often questioned because its determinants converge from many paths. The consequences of a specific neurochemical aberration such as a shift in receptor density, for example, may b'e expressed behaviorally in almost limitless ways depending on the specific end points and indices chosen for measurement and the constitutional capacities and behavioral history of the individual organism. Consider the numerous behaviors linked to the neurotransmitter dopamine: a variety of cognitive functions, mediation of reinforcement processes, tremor and other indices of motor function, sexual performance and motivation, and even speciesspecific behaviors. Naturally, the most appealing situation is one in which neurochemical findings could be correlated with behavioral data, but most behaviors are joined to more than one neurotransmitter system and embrace more than a single brain structure. Such multiple connections explain why neurochemistry, morphology, and even electrophysiology would normally be introduced only at the later stages of assessment. Because it arises from multiple sources, behavior might be viewed as a confusing index of toxicity. That potential for confusion, however, is also an argument in its favor. If it is subject to such a wide array of influences, the argument goes, it can then serve as an apical tool for testing general toxicity. If such evidence emerges, more specific behavioral or other measures can be applied to narrow the contributing variables or mechanisms. The opposing argument claims that, because behavior reflects the integration of a highly redundant system in which compensatory mechanisms may obscure a deficit in any particular functional domain, it is not a sensitive measure of adverse effects in all circumstances. Both arguments, despite their apparently conflicting stances, invoke equivalent conclusions: toxic potential should be assessed by choosing behavioral end points that offer the greatest breadth and precision of information. It should be recognized that the appeal of simplicity and economy may prove deceptive and even costly if they merely multiply the intrinsic ambiguities of risk assessment. SGOMSEC 11 aimed to deal explicitly with such supramethodological issues while offering critical reviews of the prevailing approaches. The final design of SGOMSEC 11 divided the issues into four sections: neurobehavioral toxicity in humans, neurobehavioral toxicity in animals, model agents, and risk assessment. Anyone familiar with the discipline appreciates that these rubrics do not describe fixed boundaries, but convenient dassifications. In fact, the extensive overlap between these categories proved to be an advantage because members of one group could be enlisted, in preparing the joint report, to assist another group when their special qualifications were required. The outline below provides a list of topics for which individual papers were commissioned. Each of the participants was asked to feature three points: How did we get to the current status of the topic? How can we relate it to risk assessment? 'What methodological advances should we seek to make a firmer connection with policy? Identification of Neurobehavioral Toxicity in Human Populations This section was designed to explicate the ways in which information about hazard and risk might be procured from human populations. In some past instances, this information came from clinical observations, usually on the basis of extreme exposure levels. The current mode of defining risks depends mostly on the use of psychological test instruments, but questions remain about their relevance and suitability. Clinical Data as a Basis for Hazard Identification Many of the neurobehavioral toxicants now viewed as hazardous to humans originally earned recognition through the observations of clinicians. These toxicants came to their attention because of signs and symptoms overtly expressed by patients. What are the lessons to be learned from this history? What tools should clinicians be prepared to deploy in such instances? Is hazard identification the only role fulfilled by dinical observations? Is there a series of steps, undertaken in a clinical context, that might lead to a firmer basis for identifying and estimating risk once such observations are validated? How can dinical observations be translated efficiently into epidemiological studies? Can a useful guide be designed for doing so? Is a tiered strategy, that is, one that builds systematically from one set of observations to another more complex set the most appropriate one to adopt, or does such staging of questions tend to delay the risk assessment process? Are there useful examples of such a progression? Designing and ValidatingTest Batteries Beginning in about the early 1970s, psychological test batteries began to be applied to the definition and assessment of adverse consequences stemming from exposure to central nervous system-active agents such as volatile organic solvents. By now, a plethora of test collections has penetrated the literature. Although these batteries possess many elements in common, they also diverge in philosophy and design. What are the strengths and weaknesses of the present array of batteries? How might they be improved while maintaining their advantages of ease of use and broad acceptance? Would they still be suitable for critical applications in less advanced countries? What about their suitability for longitudinal assessments? How well do they evaluate sensory and motor function? The most widely adopted batteries are anchored in diagnosis. Their roots lie in neuropsychology and the assessment of brain damage and psychopathology. Should other approaches be considered? Test batteries are generally constructed to use brief samples of behavior to screen for adverse effects in populations such as workers. Is the breadth of test items in the typical battery a problem? What are the advantages and disadvantages of adopting a more intense focus? This approach might be used for pilot and astronaut selection or to represent translations from complex performance in animals. Do such approaches hold any lessons for the evaluation of neurobehavioral toxicity? Translation of Symptoms into TestVariables A problem now looming for neuropsychology and neurobehavioral toxicology is the collection of quasi-clinical, often vaguely defined syndromes labeled as Multiple Chemical Sensitivity, Sick Building Syndrome, and Chronic Fatigue Syndrome. All are reflections of patient complaints lacking consistent objective verification such as that provided, say, by dinical chemistry profiles. As a result, many clinicians and biomedical scientists tend to view such complaints skeptically, or find themselves unable to propose any course of action. Does part of the problem arise from the emphasis by neuropsychology on diagnosis rather than on functional variables or on labeling of deficits rather than on determinations of how effectively the individual functions in his or her environment? How can such data be collected or synthesized or estimated? Are there especially suitable experimental designs for such questions, such as singlesubject designs? What alternatives to current assessment procedures hold promise? Would they be suitable for longitudinal evaluations such as those that might become necessary for monitoring the aftermath of a poisoning episode? Developmental Neurotoxicity The period of early brain development is a precarious stage because insults inflicted during this time seem to ramify in many directions, often first becoming perceptible only after reaching a particular epoch of the life cycle. As a consequence, a full evaluation Evaluations in laboratory animals fulfill two purposes. First, for new chemicals, these evaluations should make it possible to determine whether an agent presents a significant hazard. They also allow exploration of the potential dimensions of the hazard. Finally, they may make it possible to distill quantitative risk estimates for humans, in parallel with the way in which bioassay data are used in cancer risk assessment. Tumors, however, are presumed to reflect processes that will occur in human hosts. Neurobehavioral deficits in animals are less directly translatable into human functions. What should be the role of animal research and in what ways can it serve the ultimate purpose of risk assessment? Many critics attack the validity of extrapolating behavioral data from animals to humans. Indeed, behavior seems to be highly species-specific and exquisitely adapted to the organism's and its species survival needs. Although such critics grant the universality of the genetic code, they are less willing to grant the universality of the neural mechanisms governing the operation of nervous systems in different species. In this framework, humans are viewed as beyond extrapolation, with human behavior accorded the status of some emergent phenomenon disconnected from the brain structures they share with other species. No one denies that the structural differences -between rodent and human brains and the differences in behavioral repertoires vitiate any facile and superficial extrapolations. But the underlying functional mechanisms of the brain, and their expression in behavior, are shared by these organisms. Rat behavior can be used as a model of human behavior if a model is defined as a system possessing essentially the same functional properties as the one it simulates, except in a simplified version. Deficits in human behavior ascribed to neurotoxicants tend to manifest themselves in fundamental functional properties shared with other species. Labels such as attention, emotional responsivity, sensory processing, motor coordination, learning disabilities, and others are not specifically human properties of behavior. Human language is distinctive, of course, but its acquisition displays a pattern common to many other behaviors that follow a developmental sequence in which environmental and constitutional variables merge continuously. The primary source of confidence in the power of extrapolation though is a body of findings that supports the congruence of human and animal responses to neurotoxicants. Natural Populadtions as Sentinels Safety evaluation of environmental chemicals has been broadened to include ecological risk assessment. The U.S. EPA's Science Advisory Board report, Reducing Risk, is one instance of this growing appreciation, but the impact of chemical pollution on natural populations rose to a subject of widespread concern after Rachel Carson's seminal book. We now acknowledge that a major element in this impact derives from disruptions in behavior; one example is a reported diminution of nest attentiveness by birds in the Great Lakes. What are the indicators that up to now have proven useful in natural populations? In which directions should improvement in these methods be pointed? What is the extent of concordance between such observations and human health effects or with laboratory animal studies? How can ecological observations be converted into the kinds of quantified variables characteristic of laboratory experiments without losing essential information? Laboratory Approaches: Scope and Selection ofEnd Points For new chemicals, laboratory assays provide the first filtering stage for potential toxicity. Currently, a standardized set of observations, such as a functional observation battery (FOB), is used to probe for neurobehavioral effects. Certain regulatory bodies have also required measures of motor activity, perhaps accompanied by neuropathology at this stage. These criteria are acknowledged as broadly suggestive rather than as definitive, especially at the point when dose-response modeling enters the risk assessment process. For many purposes, the clinical examination, as in humans, will represent the first initiative, and often the first clues that a neurotoxic agent has appeared on the scene. Can a standardized protocol be designed that will prove feasible, in settings lacking other resources, and sensitive as well? How should such a protocol be modified for examinations in the field, as for wild animal populations? If a more comprehensive evaluation is sought, what should be its constituents? What considerations should guide the selection of experimental parameters? What research should be conducted to help refine such a process? What constraints are imposed by the extrapolation issue? How vital is it to assure that observations in animals reflect analogous functions in humans? Is it more important to select end points that reflect the functional capacities of the particular species? What economies of approach are feasible when resources are limited? Does the strategy of tiering, in which assessments branch to increasingly specific and complex assessments, make sense in such situations? How might low cost and sensitivity be combined? What should be the priorities in such a process? Developmental Neurotoxicants Exposure to chemicals during early development often inflicts toxic consequences rather different from the consequences inflicted on mature nervous systems. In addition to the modes of damage, however, differences arise in how the damage may be expressed. For example, it may emerge only after a prolonged latency, perhaps as late as senescence. Or, it may appear in different guises at different phases of the life cycle. U.S. EPA and other regulatory bodies have prescribed standardized protocols for assessing developmental neurotoxicity. Do these protocols offer support for a comprehensive, quantitative risk assessment? If not, how should they be modified? Are they efficiently designed and are some elements of these protocols possibly redundant? For example, does the absence of functional impairment at a particular exposure level preclude morphological aberrations at that level? Or nust all pogential-sources of informnation be examined? Model Agents The agents discussed in this section offer cogent history lessons. Organic solvents and chlorinated hydrocarbons were widely used for many years without mnuch concern Environmental Health Perspectives * Vol 104, Supplement 2 -April 1996 over their possibly adverse effects. By the time these properties had been identified in a painfully slow process, the agents had already pervaded the environment or had become so essential that their removal, even if technically possible, became impractical. Methylmercury and lead had been recognized as neurotoxicants long before their current prominence, but an appreciation of their more abstruse expression at low exposure levels required an abundance of resources and investigator dedication in the face of sometimes monumental skepticism. Current neurobehavioral toxicology largely owes its standing to these agents because they exemplify the power of behavioral end points. We asked the participants to review what we have learned from investigations of agents now established as prototypes. For example, would a retrospective analysis of the literature built around such model agents provide guidance for how to approach new agents? What would have been the most appropriate testing schemes and toxic end points and which assessment strategy would have yielded maximum information at the least cost? Those enumerated below all owe their original identification as neurobehavioral toxicants to observations in humans, typically at high doses. What might have been the outcome had these agents first been examined as new chemicals? Which endpoints would have proven to be sensitive? To what degree, for each agent, have we observed a convergence between progress in human and animal research? Lead Lead was recognized as a hazard even in antiquity but was frequently ignored. Only with the accumulating, incremental evidence provided by methodological refinements did we progress to the present situation. The current Centers for Disease Control (CDC) guidelines denote blood levels above 10 pg/dl as a potential index of excessive exposure-a sharp fall from the standards prevailing only a short time ago. Animal and human data show periods both of convergence and divergence but, on the whole, took parallel paths. Attaining convergence, the current situation, required improvements in both sets of methodologies, but the animal data proved critical because of the criticisms aimed at the epidemiological studies. In essence, investigators learned how to ask the appropriate questions. It was not a process that would have succeeded without the inevitable but instructive blunders. Methylmercury Not long ago, methylmercury was viewed only as a hazardous chemical confined to narrow purposes and distribution. A chain of mass chemical disasters gradually altered this view, but the extrapolation from mass disasters to broad implications for public health came slowly. On the basis of knowledge acquired from these disasters, 26 states in the United States have posted fish advisories. Animal research contributed significantly to our understanding of the underlying mechanisms of toxicity, but the risk issues are still being played out, primarily with the human disaster data. How has animal research illuminated the human risk perspective? What has it taught about the approach to unevaluated chemicals? What lessons should be drawn about the longitudinal monitoring of human populations? Do the animal data allow reasonable dose extrapolation? Oranochorine Pesticides and RltdCompounds Compounds ranging from dichlorodiphenyltrichloroethane (DDT) to 2,4dichlorophenoxyacetic acid (2,4-D) to the polychlorinated biphenyls (PCBs) to 2,3,7,8-tetrachlorobiphenyl-p-dioxin (TCDD) have been implicated in neurotoxicity. Especially for the last two classes of chemicals, recognition of their potential neurotoxic properties emerged only gradually, perhaps because it was submerged by concerns about carcinogenicity. What is the current perspective about the health risks of these compounds, and what lessons does its evolution provide for how other classes of chemicals should be examined? Such substances are also now implicated as environmental estrogens with a new spectrum of neurobehavioral issues to address, some of which may even be lurking in data we already possess. Solvents Volatile organic solvents became an early focus of human neurobehavioral toxicology. Their neurotoxic properties have always been recognized, even in setting exposure standards in the workplace. Wider recognition of these properties, especially in the absence of gross dysfunction, is attributable to the application of psychological testing methods. Because methodological advances moved in parallel with improvements in study design, the solvents literature has provided guidance for similar questions. The evolution of this research area to its current state should offer lessons on how to cope with related issues such as those stemming from chemical sensitivity syndromes. As with lead, animal models came on the scene only after solvent neurotoxicity had been well established. The same degree of parallelism seen with lead has yet to be achieved and awaits the application of equally sensitive behavioral criteria. Quantification, Modeling, and Definition of Risk The ultimate goal of neurobehavioral toxicology, apart from its inherent contributions to basic science, is formulating risk. Although, by tradition, toxicity data are transformed into values such as NOAELs, this is simply a regulatory convenience rather than a risk assessment. The conversion of neurobehavioral data into quantitative risk assessments presents numerous challenges. Cancer risk assessment, the prototype, is based on premises that cannot be applied to neurobehavioral toxicity. Among these are the assumption of a unitary biological process, cumulative dose as a valid exposure parameter, and the irrelevance of acute animal toxicity data for the prediction of carcinogenic potential. Translation of Neurobehavioral Data into Risk Figues Another legacy of the cancer risk model is its dependence on quantal data. Such measures are easier to handle for risks expressed in probabilistic terms, but most neurobehavioral measures are continuous rather than discrete. One result of this disparity is that risk for systemic outcomes is typically framed in terms such as NOAELs. Furthermore, many effects are graded over time, so that they present features best expressed, perhaps, as 3-dimensional surfaces. What A unique assortment of questions is posed by developmental neurotoxicity because the process of development itself offers inherent enigmas. Species extrapolation in this context, despite fundamental commonalities among species, poses an additional layer of uncertainty upon those already confronting risk evaluations based on species comparisons. Is the prevailing strategy adequate for even gross prediction or do its deficiencies herald further errors or even disasters? Neurobehavioral Epidemiology How do neurobehavioral end points coincide with the requirements of epidemiology? Rather than cases, for example, the data may consist of dose-effect relationships in which the effect may be expressed as alterations in a spectrum of deficits, or, because of individual patterns of susceptibility, individuals may differ in their relative responsiveness to different end points. What would be an appropriate epidemiological framework for assessing neurobehavioral toxicity? Setting Exposure Standards: A Decision Process Most observers recognize that, barring rejection of an agent at the earliest stage of risk assessment, a broad but necessarily superficial appraisal of potential neurobehavioral toxicity may be insufficient for quantitative risk assessment or even for identifying critical end points that are not easily appraised with simpler techniques. Under what conditions should a superficial appraisal be relied upon to formulate risk? Assume that further investigations beyond the simplest may have to be conducted. Can a cogent design for a sequential strategy be formulated? What are satisfactory starting and stopping points? One model of a quasi-tiered approach is the assessment of developmental neurotoxicity, a model imposed simply by the inability to reach definitive conclusions about the impact of exposure at one particular age from results determined at another age. What should be the major decision points in evaluations not aimed at developmental questions or in evaluations of developmental toxicity? Is it more efficient to begin with the later decision points than to proceed, say, from simple to complex in several stages? That is, would the later decision points embody, as well, the earlier ones? Are there decision rules that can be constructed to guide such a process? Can decision nodes be established at which certain paths can be taken for more definitive conclusions? Tiered testing schemes generally proceed from simple to complex criteria. This direction generally implies corresponding dimensions such as from cheap to expensive, from crude to sensitive, from high-dose to lowdose effects, from acute to chronic effects, from adult exposure to developmental toxicity, from hazard identification to quantitative risk assessment. Such progressions reveal where the problem lies in a tiered testing approach: If merely the absence of toxicity in tier 1 procedures is legally required for approval of substances that may invade the environment and expose humans and animals, new substances will be tested by relatively simple and insensitive tests following acute high-dose administration in adult animals. Would such a strategy be adequate to offer protection against the recurrence of situations such as those described under Model Systems? Will more scientific battles have to be fought in 10 years to prompt an assessment of the neurobehavioral toxicity of substances introduced today? Summary Neurobehavioral toxicology is now established as a core discipline of the environmental health sciences. Despite its recognized scientific prowess, stemming from its deep roots in psychology and neuroscience and its acknowledged successes, it faces additional demands and challenges. The latter, in fact, are a product of its achievements because success at one level leads to new and higher expectations. Now the discipline is counted upon to provide more definitive and extensive risk assessments than in the past. These new demands are the basis for the appraisals presented in the SGOMSEC 11 workshop. They extend beyond what would be offered in a primer of methodology. Instead, these appraisals are framed as issues into which what are usually construed as methodologies have been embedded.
EmotionBox: A music-element-driven emotional music generation system based on music psychology With the development of deep neural networks, automatic music composition has made great progress. Although emotional music can evoke listeners' different auditory perceptions, only few research studies have focused on generating emotional music. This paper presents EmotionBox -a music-element-driven emotional music generator based on music psychology that is capable of composing music given a specific emotion, while this model does not require a music dataset labeled with emotions as previous methods. In this work, pitch histogram and note density are extracted as features that represent mode and tempo, respectively, to control music emotions. The specific emotions are mapped from these features through Russell's psychology model. The subjective listening tests show that the Emotionbox has a competitive performance in generating different emotional music and significantly better performance in generating music with low arousal emotions, especially peaceful emotion, compared with the emotion-label-based method. Introduction Computational modeling of polyphonic music has been deeply studied for decades (). Recently, with the development of deep learning, neural network systems for automatic music generation have made great progress on the quality and coherence of music (;Herremans and Chew, 2019;). As we know, emotion is of great importance in music since the music consistently elicits auditory responses from its listeners (Raynor and Meyer, 1958). Therefore,. Emotional music has significant implications for subjects such as music psychology, music composition, and performance. However, surprisingly, automatic systems rarely consider emotion when generating music, which lacks the ability to generate music that evokes a specific auditory response. To study the automatic music generation with music psychology, it is necessary to review the relation between music emotions and music elements. As mentioned by Parncutt, the relationship in Western tonal music between emotional valence (positive vs. negative) and music-structural factors, such as tempo (fast vs. slow) and mode (major vs. minor tonality), have been studied. Experimental results have illustrated that a fast tempo tends to make music sound happy while slow tempo has the opposite effect. In typical tonal musical excerpts, the experimental result showed that tempo was more determinant than the mode in forming happy-sad judgments (Gagnon and Peretz, 2003). Many experiments have demonstrated that musical excerpts written in the major or minor mode were judged to be positive or negative, respectively (Hevner, 1935(Hevner,, 1936. Recent psychological studies have shown that the happiness ratings were elevated for fast-tempo and major-key stimuli while sadness ratings were elevated for slow tempo and minor-key stimuli ((Hunter et al.,, 2010. Another study has revealed that mode and tempo were the most impactful cues in shaping emotions while sadness and joy were among the most accurately recognized emotions (Micallef Grimaud and Eerola, 2022). The effect of cues on emotions in music as combinations of multiple cues rather than as individual cues has also been discussed, as mixed cues might portray a complicated emotion. Most previous emotional music generation models were based on emotion labels (Ferreira and Whitehead, 2019;;), without taking into consideration the effect of music psychology. Moreover, labelbased methods require a huge music dataset labeled with different emotions, which need a lot of tedious work. Utilizing music psychology instead of the manual labels to train the emotional music generator and exploring the most suitable music elements for evoking the specific emotion are the main focuses in this paper. In this work, we extract two features from two music elements (i.e., tempo and mode) to supervise the deep neural network for generating music with a specific emotion. To the best of our knowledge, this is the first music-element-driven emotional symbolic music generation system based on a deep neural network. Related work Currently, deep learning algorithms have become mainstream methods in the field of music generation research. Music generation can be classified into two types: symbol domain generation (i.e., generating MIDIs or piano sheets ;) and audio domain generation (i.e., directly generating sound waves van den ;;). Recurrent Neural Network (RNN) or its variants have been widely used to model sequential data. Its outstanding temporal modeling ability makes it suitable for music generation. The first attempt is that Todd used RNN to generate monophonic melodies early in Todd. To solve the gradient vanishing problem of RNN, Eck et al. proposed an LSTM-based model in music generation for the first time (Eck and Schmidhuber, 2002). In Boulanger-Lewandowski et al., RNN combined with Restricted Boltzmann Machines was proposed to model polyphonic music, which is superior to the traditional model in various datasets. In 2016, the magenta team proposed the Melody RNN model which can generate long-term structures in songs. In 2017, anticipate RNN (Hadjeres and Nielsen, 2017) was used to generate music interactively with positional constraints. Moreover, Bi-axial LSTM (BALSTM) proposed by Johnson et al. are capable of generating polyphonic music while preserving translation invariance of the dataset. Recently, more advanced deep generative models, such as VAE (Hadjeres and Nielsen, 2017;), GAN (;), and Transformer (;Zhang, 2020), have gradually been used in music generation. The expressive generation has long been explored in the field of computer music, reviewed in Kirke and Miranda. With the development of deep learning, there are several previous attempts to generate emotional music based on deep neural networks. Ferreira et al. proposed a multiplicative long short-term memory (mLSTM) based model that can be directed to compose music with a specific emotion and analyze music emotions (Ferreira and Whitehead, 2019). mLSTM is a RNN architecture for sequence modeling that combines the factorized hidden-to-hidden transition of multiplicative RNN with the gating framework from the LSTM. However, only video game soundtracks are used in training and evaluation. In 2019, Zhao et al. extended the BALSTM network proposed in Mao and used the model in emotional music generation (). Recently, Ferreira et al. proposed a system called Bardo Composer, which generates music with different emotions for the tabletop role-playing games based on the mood of players (). However, all methods mentioned above are labelbased thus a large dataset labeled with emotions is needed. Moreover, to the best of our knowledge, no MIDI dataset labeled with emotion is available online. Labeling the dataset manually takes a lot of time and effort. In our work, we train the model on an open-source MIDI dataset without emotion labels. Data preprocessing Note representation The input of our proposed generation model consists of polyphonic MIDI files, which are composed of both melody and accompaniment. To present notes with expressive timing and dynamics, we use the performance encoding proposed in Oore et al., which consists of a vocabulary of NOTE-ON, NOTE-OFF, TIME-SHIFT, and VELOCITY events. The main purpose of encoding is to transform the music information in MIDI files into a suitable presentation for training the neural network. The pitch information in MIDI files ranges from 0 to 127, which is beyond the pitch range of a piano. In our work, pieces in the training set are all performed by piano. Thus, the pitch range is only presented from 21 to 108, which corresponds to A0 and C8 on piano, respectively. For each note, music dynamics is recorded in MIDI files, ranging from 0 to 127 to present how loud a note is. For convenience, we use velocity ranges from 0 to 32 to convey the dynamics. The range can be mapped from 0 to 127 when generating MIDI files. Finally, a MIDI excerpt is represented as a sequence of events from the following vocabulary of 240 different events: 88 NOTE-ON events: one for each of the 88 MIDI pitches. Each event starts a new note. 88 NOTE-OFF events: one for each of the 88 MIDI pitches. Each event releases a note. 32 TIME-SHIFT events: each event moves the time step forward by increments of 15 ms up to 1 s. 32 VELOCITY events: each event changes the velocity applied to all upcoming notes. Feature extraction In this work, the model is fed with two extracted musical features, namely pitch histogram and note density. All these calculations are done automatically by computers and thus no human labors are required. A pitch histogram () is an array of 12 integer values indexed by 12 semitones in a chromatic scale, showing the frequency of occurrence of each semitone in a music piece. An example of a pitch histogram in C major is shown in Table 1. According to music theory, notes with a sharp sign are not included in C major. Therefore, in this work, we set their corresponding value in pitch histogram as 0 so that they will never be played in a C major music. C, F, and G are the tonic, subdominant, and dominant in C major, respectively. They are the main elements in a C major music so their corresponding value in pitch histogram is set as 2, which means the probability of starting these notes is two times as much as other notes in C major. Pitch histograms can capture musical information regarding harmonic features of different scales. Note density is a number to record how many notes will be played within a time window (2 s in our work). Note density can present the speed information in each part of a music piece. Note density and pitch histogram are calculated at each time step. The motivation for this is that we can explicitly choose a pitch histogram and note density when creating samples, which provides us with two options to control the music generation. By changing the pitch histogram and note density, we can therefore alter the mode and tempo of the music, which ultimately leads to emotional difference. Russell emotion model There are various models for describing emotion and they can be mainly divided into four categories: discrete, dimensional, miscellaneous, and music-specific models (Eerola and Vuoskoski, 2012). This work is based on the simplified emotion model of Russell. Russell's circumplex model is a typical dimensional model, which uses two coordinate axes to present the degree of valence and arousal, respectively. This emotion model is shown in Figure 1. For simplicity, we only use four basic emotions as shown in four quadrants. Our model is designed to generate music with these four basic emotions, namely happy, tensional, sad, and peaceful. The four emotions are located in four different quadrants, presenting four varying degrees of valence and arousal. Emotion presentation As we have mentioned in the introduction, there is a strong connection between music elements and music emotional valence. Therefore, we combine note density and pitch histogram to control the tempo and mode of the generated sample. According to twelve-tone equal temperament, an octave is divided into 12 parts, all of which are equal on a logarithmic scale. So, we can choose the mode when generating music by changing the probability of each semitone. We use an array containing 12 integers to present a pitch histogram. For example, C major is presented as where 2 presents the tonic, subdominant, and dominant while 1 presents other notes in the scale. Pitch histogram of C minor is presented as according to music theory. A pitch histogram is used to control the valence of music. Note density indicates the number of notes that will be performed within 2 s (the time window is adjustable). We set note density as 1 to present slow music and note density as 5 to present fast music. Note density is used to control the arousal of music. Combining mode and note density as two adjustable parameters, we aim to generate four categories of. /fpsyg.. Pitch name Russell's two-dimensional valence-arousal emotion space. The x-axis denotes valence while the y-axis denotes arousal. emotional music: happy (with the major scale and fast tempo), tensional (with the minor scale and fast tempo), peaceful (with the major scale and slow tempo), and sad (with the minor scale and slow tempo). Method Neural network architecture A recurrent neural network has an excellent performance in modeling sequential data. A gated recurrent unit (GRU) () is an improved version of the standard RNN. It was proposed to solve the vanishing gradient problem of a standard recurrent neural network during backpropagation. The gating mechanism enables GRU to carry information from earlier time steps to later ones. The illustration of GRU is shown in Figure 2. In our work, GRU is used for temporal modeling. The model is shown in Figure 3. Input X represents the masked performance events while Input Z represents the pitch histogram and the note density. Masking means the last event of each event sequence is dropped out and the rest part of the event sequence is sent to the neural network as the input. The reason for this is to make the model generate the unmasked sequence recursively. Then, we can calculate the loss, i.e., the difference, between the generated unmasked sequence and ground truth. If the length of an event sequence is T, the size of Input X (i.e., the masked performance events) will be (T − 1) 1. Each performance event is converted to a 240-dimension vector by a 240 240 embedding layer. The 240-dimension vector was chosen for convenience. The pitch histogram is a (T − 1) 12 vector and note density is converted to a (T − 1) 12 one-hot. /fpsyg.. vector. A (T −1)1 zero vector is used to increase the stability of the neural network. Therefore, the size of input Z is (T − 1) 25. The pitch histogram and note density are then concatenated with the 240-dimension vector. The size of the concatenated vector is (T − 1) 265. The concatenated input is fed into a 265 512 full connection layer and a rectified linear unit (ReLU) activation function. Then, this (T − 1) 512 vector is sent into a three-layer, 512-unit GRU, with a 0.3 dropout applied after each of the first two GRU layers. The GRU output is then fed to a 240-unit linear layer. The output of the neural network is a T 240 vector. The output presents the probability of each event at each time step. The cross-entropy loss between the generated sequence and the unmasked event sequence, namely, the ground truth, is then calculated. The codes of this work have been open-sourced on Github. Emotional music generation At the generating stage, we generate samples with different emotions by specifying a particular pitch histogram and note density. When the model generates music, the first event will be randomly selected. The first event, pitch histogram, and note density are sent to the model to create new events recursively. The output of our model is the probability of 240 events. If we use greedy sampling to select an event with the largest The codes are available on https://github.com/KaitongZheng/ EmotionBox. probability, the sample may end up with some partial repetition, which means a small part of the music may repeat again and again. Therefore, we combine greedy sampling with stochastic sampling. We select a threshold ranged from 0 to 1. Whenever a new event is sampled, we produce a random number ranged from 0 to 1. If the random number is larger than the threshold, this event will be sampled using the greedy algorithm, which means selecting an event with the largest probability. If not, this event will be sampled based on the probability of each event, which produces a lot of uncertainty. When generating a new piece of emotional music, we can use temperature () to alter the degree of uncertainty. Temperature is a hyperparameter used to control the randomness of predictions by scaling the logits before applying softmax. Lower temperature results in more predictable events, while higher temperature results in more surprising events. The temperature parameter is manually tuned by listening to the generated music. If the music is too random, the temperature will be turned down. If the music is too repetitive, the temperature will be turned up. Experiment Dataset We selected a widely used dataset, piano-midi, to train our model. It includes 329 piano pieces from 23 classical The training data can be found on http://www.piano-midi.de/. FIGURE Diagram of the EmotionBox model architecture. "Input X" denotes a sequence of events and "Input Z" denotes the pitch histogram and note density. composers. Each piece is a MIDI file capturing a classical piano performance with expressive dynamics and timing. The dataset is highly homogeneous because all of the pieces in it are classical music, and the solo instrument is consistently piano. also used this emotion-labeled dataset with the permission of the authors to train a label-based model. The Pretty-Midi package was used to extract the note information from the MIDI files (Raffel and Ellis, 2014). Training At the training stage, the whole sequence of events is cut into 200-event-wide event sequences. The stride of event sequences is 10 events. The network was trained using the ADAM optimizer with a loss function of cross-entropy loss between the predicted event and the ground truth event. We used a learning rate of 0.0002, and the model was trained for 100 epochs with a batch size of 64. We implemented our models in PyTorch. Comparison We implement a label-based model for comparison as all previous emotional music generation models were based on emotion labels (Ferreira and Whitehead, 2019;). In order to evaluate the performance between our proposed method and the labeled-based method, the structure of the labelbased model remains unchanged except that the inputs Z of the model are substituted with emotion labels. One-hot coding is used to present four basic emotions. The neural network is trained to learn the mapping between music emotions and wellclassified emotion labels. In the generation stage, the label-based model takes the emotion label as input. Results and discussion To evaluate the performance of music generation given a specific emotion, a subjective listening test study was carried out to compare our proposed method with the label-based method. Similar to the subjective listening test for analyzing different styles of classification, three 6-s long music samples were provided for each emotion and each model. The total amount of music samples was 24 (3 samples 4 emotions 2 models). The samples were randomly selected and shuffled. Table 2 shows the average note density of the experimental stimuli. Twenty-six subjects took part in the test. For each sample, participants were asked which emotion was observed in the sample? They have to choose one option from happy, peaceful, sad, and tensional. It is a little difficult for untrained participants to classify the music's emotion. Therefore, we provided a warming-up stage by playing four manually selected emotional music samples with their corresponding emotional labels. During the listening The subjective listening test files can be found on https://github.com/ KaitongZheng/EmotionBoxDEMO. Emotion classification In this section, we calculated the accuracy of emotion classification for each of the four emotions and two methods. The statistical results are shown in Figure 4. In Figure 4, it shows that our proposed model, without a database labeled with emotions, has comparable performance to the label-based model in terms of emotion classification accuracy. Among the four kinds of emotion, the results indicate that the music samples with tensional and happy emotions were correctly recognized by the highest accuracy for both methods. These observations can be explained by an emotion psychology study that showed that valence can be distinguished more easily by high-arousal stimuli (). The proposed method outperforms the label-based method on peaceful and sad samples, which greatly overcome the shortcomings of the label-based method and yield a more balanced result. A two-way ANOVA is used with emotion (happy, sad, tensional, peaceful) and model (EmotionBox, labelbased) set as within-subject factors to investigate how these two factors, in combination, affect the accuracy of subjective experiments. For each subject, the accuracy of emotion classification was calculated for each emotion and model. The classification accuracy was calculated by dividing the number of samples that were correctly recognized by the number of samples tested for each emotion and model (3 tested Table 3 shows a post-hoc Bonferroni adjusted pairwise comparison within each emotion pair of two methods. Table 3 indicates that there are significant differences between the. /fpsyg.. FIGURE The mean accuracy and SD of subjective evaluation test for classifying generated music samples into emotion categories. p-value less than 0.05 means a statistically significant difference at a confidence level of 5% and is presented in bold type. two methods on tensional and peaceful samples. The emotion classification accuracy of the label-based method is significantly high on tensional emotion while that is significantly low on peaceful emotion. There are no significant differences between the two methods on happy and sad samples. The note density of experimental stimuli can be used to explain why the proposed model achieved good performance for peaceful whereas the label-based model worked well for tensional. Table 2 shows that the tensional samples of the label-based model have a much higher note density than that of the EmotionBox. Therefore, the subjects are more likely to judge the former as tensional. On the other hand, the peaceful samples of the EmotionBox have a much lower note density than that of the label-based model. Therefore, the subjects are more likely to judge the former as peaceful. A post-hoc Bonferroni adjusted pairwise comparison between each emotion of EmotionBox has been conducted. The result shows no statistically significant differences (p > 0.05) between these emotions. Another post-hoc Bonferroni adjusted pairwise comparison between each emotion of labelbased method has also been conducted. The result shows no statistically significant differences (p > 0.05) between happy and tensional, peaceful and sad. For other pairs, there are statistically significant differences (p < 0.05). Combined with Figure 4, the results indicate that emotions with higher arousal like happy and tensional are more likely to be distinguished than emotions with low arousal like sad and peaceful for label-based method. To investigate the performance of generating different emotional music within each model, we also count the result of all the combinations between specific emotions at generating stage and emotions classified by subjects as shown in Table 4. From Table 4A, it shows that the arousal of music is more distinguishable than valence. For example, for the first row, 28% of happy samples were classified as tensional samples that have the same level of arousal but a different level of valence. However, a happy sample is rarely classified as a peaceful sample as they have a different level of arousal. This experimental result agrees with the observation that tempo is more determinant than the mode in forming happy-sad judgments as reported in Gagnon and Peretz. In our work, the tempo and the mode are associated with arousal and valence of music, respectively. The classification of arousal and valence will be discussed in next section. From Table 4B, the classification accuracy is similar for high arousal music. However, for low arousal music, the classification accuracy in terms of both arousal and valence of emotion decreases significantly. For the last row, 26 and 28% peaceful samples were perceived as happy samples and tensional samples, respectively, which indicates that the labelbased method has a poor performance on generating music with a low arousal emotion. Arousal and valence classification Our proposed method uses note density and pitch histogram as features to present the arousal and valence of a specific emotion, respectively. To investigate whether these two features are suitable or not for training the deep neural networks, we calculated the accuracy of arousal and valence classification as shown in Figure 5. If the emotion specified during generating stage and the emotion classified by subjects have the same arousal or valence, the classification result will be calculated as correct. For example, if the emotion of a sample specified during generating stage is happy while classified as tensional by subjects, the classification result will be viewed as correct because of the same arousal of happy and tensional. A two-way ANOVA is used with arousal and model set as within-subject factors to investigate how these two factors affect the accuracy of subjective experiments. The statistical results show that model [F Table 5 shows a post-hoc Bonferroni adjusted pairwise comparison between two methods in terms of arousal and valence. It shows that the classification accuracy of EmotionBox is significantly higher than that of the label-based method on low arousal emotions. For other emotion categories, Table 5 shows that there is no significant difference between two methods for other three pairs. The tempo and the mode are relevant with note density and pitch histogram, respectively, in our work. Note density and pitch histogram further present arousal and valence, respectively. Without the limitation of note density, the label-based method tends to generate music with a faster tempo, which results in a low classification accuracy of the samples with. FIGURE The mean accuracy and SD of subjective evaluation test for classifying generated music samples into arousal and valence categories. low arousal emotions. This result means note density is a suitable feature to control the arousal of music. Limitations and outlook However, there are still some limitations to the proposed method. First, the classification of valence is still challenging, which indicates that the valence of music cannot solely be presented by mode. A more appropriate presentation method of valence should be investigated in future work. Second, the generated music is more like an improvization. The model learns how to play the next note according to the previous notes whereas it has no idea about the structure of music. The structure of music is important and needs to be considered in the future work. The EmotionBox can be used to help the composers create music with a specific emotion by providing various novel samples. By tuning the network's parameters, the EmotionBox can be a versatile assistant to create music. The combination of intelligent music composition and performance of music robot based on emotional computing is a promising approach for the future development of human-machine interaction, which provides a practical solution to eliminate the interaction barrier between humans and machines. Automatic emotional music may also be helpful for music therapy. Studies have shown neurological evidence that music effectively enhances auditory and language function through the human brain's plasticity (;). Music therapies that utilize music as a treatment for tinnitus can leverage the plasticity in the auditory cortex and thus reduce the impact of tinnitus (). Some researchers have also shown that emotional music may support emotion recognition in. /fpsyg.. children with ASD, and thus improve their social skills (). Music therapy often needs to avoid repetitive music. By tuning the networks parameters, the proposed method can generate non-repetitive music with a predefined emotion, which may be helpful for music therapy applications. Conclusion In this work, we propose a music-element-driven automatic emotional music generator based on music psychology. This model does not need any music datasets with emotion labels that the previous methods required. The note density and the pitch histogram are chosen to present the arousal and valence of music, respectively. Then, different combinations of arousal and valence will be mapped to different emotions according to the Russell emotion model. Based on the specific note density and pitch histogram, our proposed method will be able to evoke listeners' different auditory perceptions and emotions. Subjective experimental results indicate that our proposed method has a significantly better performance in generating music with low arousal emotions. The results of the subjective listening test also indicate that note density is a suitable presentation for the arousal of music while more research studies should be carried out to find a more appropriate feature to convey the valence of music. The proposed method may have unique values for some music therapy applications. Data availability statement The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation. Ethics statement The studies involving human participants were reviewed and approved by the Ethics Committee of the Institute of Acoustics Chinese Academy of Sciences. The patients/participants provided their written informed consent to participate in this study. Author contributions KZ: writing. RM, KZ, and JS: methodology. CZ and XL: supervision and editing. JS: writing-review. JC: database. JW: evaluation. XW: data analysis and modification. All authors contributed to the article and approved the submitted version.
def with_previous(iterable): """Yield (previous, current) tuples, starting from second """ iterator = iter(iterable) previous = next(iterator) for item in iterator: yield previous, item previous = item differences = [current-previous for previous, current in with_previous(my_list)] print(differences)
import { ProductoCantidad } from './productoCantidad.model'; import { ProductosPedido } from './../pedido/ProductosPedido'; import { ProductoService } from './../producto/producto.service'; import { PersonaServices } from './../persona/persona.service'; import { CestaService } from './cesta.service'; import { ActivatedRoute, Router } from '@angular/router'; import { Component, OnInit } from '@angular/core'; import { Cesta } from './cesta.model'; import { Producto } from '../producto/producto.model'; import { Pedido } from '../pedido/pedido.model'; import { PedidoService } from '../pedido/pedido.service'; @Component({ selector: 'app-cesta', templateUrl: './cesta.component.html', styleUrls: ['./cesta.component.css'], }) export class CestaComponent implements OnInit { cesta!: Cesta; producto!: Producto; productos!: Producto[]; pedido!: Pedido; total!: number; imagenEscaparate: string; constructor( private activatedRoute: ActivatedRoute, private cestaService: CestaService, private personaService: PersonaServices, private productoService: ProductoService, private pedidoService: PedidoService, private router: Router ) {} ngOnInit(): void { this.cesta = new Cesta(); this.setCesta(); } setCesta() { this.cestaService .recuperarCesta(this.personaService.usuario.id) .subscribe((cesta) => { this.cesta = cesta; this.total = 0; console.log('numero de productos' + cesta?.productoCantidad.length); this.productos = []; for (var index in this.cesta.productoCantidad) { console.log('for' + this.cesta.productoCantidad[index].idProducto); let indice = this.cesta.productoCantidad[index].idProducto; let item = new Producto(); this.productoService.getProductoById(indice).subscribe((producto) => { item = producto; this.total = this.total + producto.precio * this.cesta.productoCantidad[index].cantidad; producto.productoCaracteristicas.imagenesProducto[0].imagen = 'http://localhost:8090/api/productos/producto/imagen/' + item.productoCaracteristicas?.imagenesProducto[0]?.imagen; this.productos.push(item); console.log('itemnombre ' + item.nombre); }); } console.log('tamaño' + this.productos.length); }); } recuperaProducto(idProducto: number): Producto { this.producto = new Producto(); if (idProducto) { this.productoService.getProductoById(idProducto).subscribe((producto) => { this.producto = producto; console.log(this.producto.nombre); }); } return this.producto; } getProductoNumero(): number { return this.productos.length; } getProductos(): Producto[] { return this.productos; } tramitarPedido() { this.pedido = new Pedido(); this.pedido.estado = 'nuevo'; this.pedido.idUsuario = this.personaService.usuario.id; this.pedido.total = this.total; let lista = new Array<ProductosPedido>(); for (var index in this.cesta.productoCantidad) { let productoPedido = new ProductosPedido(); productoPedido.nombre = this.productos.find( (x) => x.id === this.cesta.productoCantidad[index].idProducto )?.nombre || ''; productoPedido.cantidad = Number( this.cesta.productoCantidad[index].cantidad ); lista.push(productoPedido); } this.pedido.listaProcutosPedido = lista; console.log(this.pedido.listaProcutosPedido.length); this.pedidoService.setPedido(this.pedido).subscribe((respuesta) => { this.router.navigate(['/direccion/' + respuesta.id]); }); } eliminaElemnetoDeCesta(id: number) { if (this.cesta.productoCantidad.length == 1) { this.cesta.productoCantidad = []; } else { let nuevalistaProductoCantidad = new Array<ProductoCantidad>(); for (var index in this.cesta.productoCantidad) { if (this.cesta.productoCantidad[index].idProducto != id) { console.log(id + '/' + this.cesta.productoCantidad[index].id); let productoPedidoEnLista = new ProductoCantidad(); productoPedidoEnLista.idProducto = this.cesta.productoCantidad[index].idProducto; productoPedidoEnLista.cantidad = this.cesta.productoCantidad[index].cantidad; nuevalistaProductoCantidad.push(productoPedidoEnLista); } } this.cesta.productoCantidad = nuevalistaProductoCantidad; } console.log(this.cesta.productoCantidad.length); this.cestaService.incluirEnCesta(this.cesta).subscribe((respuesta) => { this.cesta= new Cesta(); this.cesta = respuesta; this.setCesta(); }); } }
class MASVS: ''' Creates requirements list out of markdown files. ''' requirements = {} def __init__(self, lang): if lang == "en": target = "../Document" else: target = "../Document-{}".format(lang) for file in order_filenames(target): for line in open(os.path.join(target, file)): regex = re.compile(r'\*\*(\d\.\d+)\*\*\s\|\s{0,1}(.*?)\s{0,1}\|\s{0,1}(.*?)\s{0,1}\|\s{0,1}(.*?)\s{0,1}\|(\s{0,1}(.*?)\s{0,1}\|)?') m = re.search(regex, line) if m: req = {} num_id = m.group(1).strip() mstg_id = m.group(2).replace(u"\u2011", "-") req['id'] = num_id req['category'] = mstg_id req['text'] = m.group(3).strip() if m.group(5): req['L1'] = len(m.group(4).strip()) > 0 req['L2'] = len(m.group(5).strip()) > 0 req['R'] = False else: req['R'] = True req['L1'] = False req['L2'] = False self.requirements[mstg_id] = req def to_json(self): ''' Returns a JSON-formatted string ''' return json.dumps(self.requirements) def to_yaml(self): ''' Returns a YAML-formatted string ''' return yaml.dump(self.requirements, allow_unicode=True, indent=4, default_flow_style=False, sort_keys=False) def to_xml(self): ''' Returns XML ''' xml = '<requirements>\n' for id, r in self.requirements.items(): xml += f"\t<requirement id='{r['id']}' category='{r['category']}' L1='{int(r['L1'])}' L2='{int(r['L2'])}' R='{int(r['R'])}'>\n\t\t{escape(r['text'])}\n\t</requirement>\n" xml += '</requirements>' return xml def to_csv(self): ''' Returns CSV ''' si = StringIO() writer = csv.DictWriter(si, ['id', 'category', 'text', 'L1', 'L2', 'R'], extrasaction='ignore') writer.writeheader() rows = [r for id, r in self.requirements.items()] writer.writerows(rows) return si.getvalue()
SARS COV-2 anti-nucleocapsid and anti-spike antibodies in an emergency department healthcare worker cohort: September 2020 April 2021 Background Emergency department (ED) workers have an increased seroprevalence of SARS-CoV-2 antibodies. However, breakthrough infections in ED workers have led to a reduced workforce within a strained healthcare system. By measuring levels of IgG antibodies to the SARS-CoV-2 nucleocapsid and spike antigens in ED workers, we determined the incidence of infection and described the course of antibody levels. We also measured the antibody response to vaccination and examined factors associated with immunogenicity. Methods We conducted a prospective cohort study of ED workers conducted at a single ED from September 2020April 2021. IgG antibodies to the SARS-CoV-2 nucleocapsid antigen were measured at baseline, 3, and 6 months, and IgG antibodies to the SARS-CoV-2 spike antigen were measured at 6 months. Results At baseline, we found 5 out of 139 (3.6%) participants with prior infection. At 6 months, 4 of the 5 had antibody results below the test manufacturer's positivity threshold. We identified one incident case of SARS-COV-2 infection out of 130 seronegative participants (0.8%, 95% CI 0.024.2%). In 131 vaccinated participants (125 BNT162b2, 6 mRNA-1273), 131 tested positive for anti-spike antibodies. We identified predictors of anti-spike antibody levels: time since vaccination, prior COVID-19 infection, age, and vaccine type. Each additional week since vaccination was associated with an 11.1% decrease in anti-spike antibody levels. (95% CI 6.215.8%). Conclusion ED workers experienced a low incidence of SARS-CoV-2 infection and developed antibodies in response to vaccines and prior infection. Antibody levels decreased markedly with time since infection or vaccination. Background and importance Since the beginning of the severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) pandemic, researchers have estimated the cumulative incidence of infection by measuring IgG antibodies to SARS-COV-2. Based on studies of anti-SARS-CoV-2 seroprevalence, frontline healthcare workers have an increased risk of infection as compared to non-healthcare workers. Emergency department (ED) workers may be particularly vulnerable as they provide care for undifferentiated patients whose COVID-19 status is unknown and perform high-risk procedures such as nasopharyngeal swab testing, endotracheal intubation, and cardiopulmonary resuscitation. Despite initial studies suggesting high seroprevalence, it is unclear if ED health care workers were at increased risk for contracting COVID-19 through the pandemic. In addition to their use in estimating the incidence of COVID-19, anti-SARS-CoV-2 antibody assays have also been used in the development of COVID-19 vaccines. Phase 1 trials of the BNT162b2 (Pfizer-BioNTech) vaccine used receptor-binding domain (RBD) or S1 IgG assays to measure vaccine immunogenicity. In December 2020, ED healthcare workers were offered the BNT162b2 (Pfizer-BioNTech) and the mRNA-1273 (Moderna) mRNA vaccines, which were both found to have high vaccine efficacy (greater than 90%) to prevent symptomatic American Journal of Emergency Medicine 54 81-86 COVID-19 infection. In the summer of 2021, breakthrough COVID infections, confirmed by SARS-COV-2 PCR testing, in those fully vaccinated have been increasingly reported, including among frontline healthcare workers. Breakthrough infections in frontline healthcare workers have led to a reduced workforce within an already strained healthcare system. As variants (Delta and Omicron) have spread throughout the United States, concerns about reduced vaccine efficacy have arisen. Despite initial reports of the durable protection, it now appears that immunity may wane. Objectives By measuring levels of IgG antibodies to the SARS-CoV-2 nucleocapsid and spike antigens during the COVID-19 pandemic in a cohort of ED workers, we sought to determine the cumulative incidence of infection and describe the longitudinal course of antibody levels in those who were infected. Also, as ED healthcare workers received COVID-19 vaccinations, we measured post-vaccination antibody levels to examine factors associated with immunogenicity, including time since vaccination, age, and prior COVID-19 infection. We hypothesized that time since vaccination is independently associated with reduced antibody levels. Study design and setting This was a prospective cohort study of ED healthcare workers conducted at a single medical center from September 1, 2020, to April 3, 2021. The University of California, San Francisco, is an academic medical center with EDs at separate adult and pediatric sites. These EDs are staffed by a team of ED healthcare workers including nurses, physicians, advanced practice practitioners, and patient care technicians. This study was performed in accordance with the STROBE guidelines and was approved by the local institutional review board. Selection of participants Participants were enrolled voluntarily as previous described. Briefly, we enrolled ED healthcare workers (attending physicians, resident physicians, nurses, nurse practitioners, physician assistants, patient care technicians, and pharmacists). We excluded healthcare workers who were pregnant, immune compromised, or who were planning to move or unable to attend study visits. Participants were recruited by email and informed consent was obtained electronically. Procedures and outcomes The original intent of the study was to measure anti-nucleocapsid antibody levels to determine the incidence of COVID-19 infection among ED healthcare workers. Participants attended three study visits: Baseline (September 2020); three-months (December2020); and six-months -March 2021. At each study visit, participants were interviewed and underwent venipuncture. All blood specimens were tested for immunoglobulin G (IgG) antibodies to the SARS-CoV-2 nucleocapsid antigen with a chemiluminescent immunoassay (Abbott Architect SARS-CoV-2 IgG; Abbott Laboratories, Abbott Park, IL) to identify prior infection with SARS-CoV-2. At baseline and 3-months, specimens that were positive for the anti-nucleocapsid antibody were also tested with a different chemiluminescent immunoassay for anti-spike antibodies (Diasorin Liaison SARS-CoV-2 S1/ S2 IgG; Diasorin Inc., Cypress, CA) to minimize false positives. We obtained both qualitative and quantitative results for all antibody assays. The manufacturer's threshold for the anti-nucleocapsid antibody test is 1.4 arbitrary units (AU); whereas the United Kingdom Medicines & Healthcare products Regulatory Agency (MHRA) published a lower threshold of 0.49 AU. By the 6-month visit, 138/139 participants had received a COVID mRNA vaccine, and we decided to measure anti-spike antibody levels to evaluate immunogenicity of vaccines in ED healthcare workers. Vaccination with either the BNT162b2 (Pfizer-BioNTech) or MRNA-1273 (Moderna) stimulates an anti-spike antibody response but not an anti-nucleocapsid antibody response; thus testing for anti-nucleocapsid antibodies still allows identification of past infection, even in vaccinated persons. To quantify antibody responses post-vaccine, we also measured IgG antibodies to the SARS-CoV-2 spike protein (Abbott AdviseDx SARS-CoV-2 IgG II). This assay for anti-spike antibodies provides a quantitative signal that correlates linearly with the antibody concentration. Exposures of interest The survey obtained at each visit included clinical variables about exposure to COVID-19 patients and risk factors for community-based exposure. We also surveyed participants regarding vaccination type and dates of vaccination. Primary data analysis Continuous participant characteristics were summarized in the demographics and results tables with means and standard deviations (SDs) or medians and interquartile ranges (IQR) as appropriate. Categorical variables were presented as frequencies and percentages. To study the pattern of quantitative anti-nucleocapsid antibody levels over time, we identified all participants who had an antinucleocapsid antibody level above the MHRA threshold at any of the 3 study visits and presented their sequential levels along with additional details. We also used the cross-sectional 6-month data to compare weeks since second vaccine dose and anti-spike antibody levels. For this analysis, we used linear regression with the natural logarithm of the anti-spike antibody level as the outcome and weeks since second vaccine dose, age in decades, prior COVID-19 infection, and vaccine type as independent variables (based on a review of the prior literature). We excluded participants who had not yet had their second vaccine dose by their 6-month visit. We calculated the percentage change in antibody level for a 1-unit increase in the predictor as (exp () -1) x 100%, where is the linear regression coefficient. All analyses were conducted with STATA MP (version 16). The sample size was initially calculated for the baseline study and was described previously. Results Our study enrolled 139 of 360 (38.6%) eligible ED healthcare workers starting September 1, 2020. All 139 participants provided baseline demographic data, survey answers, and venous blood specimens. Of the 139 participants, 90 (64.7%) were female, 88 (63.3%) white, with a median age of 36 (IQR 27-61) years. Most of the participants were nurses, attending physicians, or resident physicians. 97% of participants at baseline and 98% at 3 and 6 months reported using N95 respirators for high-risk patients, and 92-94% reported contact with at least one COVID-positive patient at each timepoint, while 50% reported contact with >10 COVID-positive patients (Table 1). In total, five participants (3.6%) were lost to 6-month follow-up due to the following: administrative leave (n = 1), graduating residency and leaving the area (n = 1), no longer on staff (n = 2), and no response (n = 1). Anti-nucleocapsid antibody results Antibody results from the first study visit were reported previously. During the first study visit, we identified 4/139 (2.9%) who were positive for anti-nucleocapsid antibodies based on the manufacturer's threshold of 1.4 AU. (Table 2, Participants A-D) One participant ( Table 2, Participant E) had an elevated, albeit below-threshold, quantitative result of 1.19. This participant had a pre-study, documented positive chemiluminescent antibody assay, positive PCR for SARS-CoV-2, and clinical symptoms consistent with COVID-19 disease. Thus, we considered this participant seropositive, resulting in a first round seroprevalence of 5/139 (3.6%). Of the five who were classified as seropositive at baseline, four had 6-month results that were below the manufacturer's positivity threshold (Fig. 1). Of the 130 initially seronegative participants with follow-up, only one tested positive for antinucleocapsid antibodies at study visits 2 and 3. This incident case was an ED nurse working clinically through the winter of 2020 during which the community prevalence of acute COVID-19 in San Francisco reached its peak. This participant tested positive at the 3-month visit and had previously had a positive PCR and a consistent clinical syndrome. (Table 2, Participant F). The cumulative incidence of SARS-CoV-2 infection was therefore 1/130 (0.8%, 95% CI 0.02-4.2%) over the 6 months from 10/2020 to 3/2021. Using the MHRA alternative (lower) threshold for anti-nucleocapsid positivity, we identified 10 participants with a positive test at any of the 3 study visits. (Table 2). Five participants (A-E) were positive at baseline due to SARS-CoV-2 infection, and one (F) was the incident case identified at the 3-month visit. The remaining four participants (W-Z) are listed as "indeterminate" in Table 2. Participants with confirmed infection had steep decreases in the quantitative antibody measurement after infection. But these 4 participants showed lower but persistently elevated measurements. (Fig. 1) One of the four (Participant W) had an above-threshold anti-nucleocapsid antibody measurement at 6 months after 2 near-threshold levels at baseline and 3 months, while the anti-spike antibody results were negative. This participant was able to obtain pre-COVID plasma for testing, and it had a similarly elevated anti-nucleocapsid antibody measurement. We suspect this individual's elevated antibodies were due to prior infection with an endemic coronavirus and cross-reacting antibodies. Anti-spike antibody results Of the 139 baseline participants, 138 were vaccinated, with 130 receiving the BNT162b2 (Pfizer-BioNTech) vaccine and 8 receiving the MRNA-1273 (Moderna) vaccine. We measured anti-spike antibody at 6 months in 134, but 3 had not yet received their second dose, leaving 131 fully vaccinated participants (125 BNT162b2, 6 MRNA-1273). 131/131 (100%, 95% CI 97.3 to 100%) vaccinated participants tested positive for anti-spike antibodies. The median anti-spike antibody level was 4938.6 AU, IQR 3098.2-8333.5 AU. (Fig. 2) We modelled the logarithm of the anti-spike antibody level as function of four predictors: time since vaccination, prior COVID-19 infection, age, and vaccine type. (Table 3) Each additional week since vaccination was associated with an 11.1% decrease in anti-spike antibody levels. (95% CI 6.2 to 15.8%) and each additional decade in age was associated with a 10.8% decrease. Prior infection was associated with 78% higher antibody levels on univariable analysis and almost 3 times the level on multivariable analysis. However, only 5 of the 131 participants included in this analysis had been infected. Discussion We conducted a prospective cohort study evaluating anti-SARS-CoV-2 antibodies in ED healthcare workers from September 2020 to April 2021, spanning a total of six months. Overall, we observed a low baseline seroprevalence and identified only one incident case during our study period. Also, we observed a pattern of decline in antinucleocapsid antibodies in those with confirmed COVID-19 infection such that 4 out of 5 participants classified as seropositive at baseline had below-threshold results at the end of the study period. Previous research on risk of COVID-19 infection in frontline providers has been mixed, with some studies finding substantially higher rates of infection relative to the surrounding community. One factor that may contribute to our study's low incidence is that nearly all participants (98%) reported routinely using N95 respirators. Another factor may be the low incidence of COVID-19 in the community served. Since the beginning of the pandemic, approximately 4.1% of our community has been infected, similar to our observed seroprevalence. Essentially all of our cohort was vaccinated during the study period and had positive anti-spike antibodies post-vaccination. However, we determined that increasing time since vaccination was significantly associated with lower antibody levels, which may have implications for immunity in healthcare workers. In a recent case-control study, reduced antibody levels were associated with breakthrough infections in those who were vaccinated. Khoury et al. reported that the decay in patients' neutralizing antibody titer over the first 250 days post vaccine led to significant loss of protection against infection but still protected patients from severe disease. This time frame is consistent with our regression results, which suggested a 6-16% decay in antibody levels per week post vaccination. Our results are consistent with other studies demonstrating healthcare workers with a previous COVID-19 infection had higher antibody titers compared to healthcare workers who were COVID-19 nave, and those finding that older age is associated with a less robust antibody response to vaccination. Our study is unique because we obtained both the anti-nucleocapsid antibody binary result (using manufacturer threshold) and the quantitative result. For those participants with a positive anti-nucleocapsid antibody or those vaccinated, we then obtained anti-spike antibody qualitative and quantitative results. Our dual antibody measurement strategy allowed us to differentiate prior COVID-19 infection (both positive) compared to vaccination (only anti-spike positive). We observed that, regardless of participant factors, including previous COVID-19 infection or vaccination, antibody levels decreased over time. The waning of antibody levels has important implication for population-based surveillance and research, including an understanding of the time interval required to assess whether participants have had previous infection. For example, if we only obtain binary results every 6 months, we will miss participants who were infected (e.g., Table 2, Participant E). Limitations Our study is subject to several limitations. We did not measure neutralizing antibodies or correlates of T-cell immunity. Instead, we used multiple commercial assays, which have been shown to predict neutralization activity against SARS-CoV-2. Also, previous research suggests that neutralizing antibody levels are highly predictive of immune protection against symptomatic infection. In addition, few study participants had evidence of COVID infection. Our study was conducted at a single center with a low community prevalence of COVID-19, and the low sero-incidence may not be generalizable to other ED workers serving higher prevalence communities. This highlights the need for future multicenter studies in broader populations. Conclusion In our study, even prior to vaccination, the risk of ED healthcare workers contracting SARS-CoV-2 was relatively low. We found that antibody levels decrease markedly over the 6-9 months after infection or vaccination and that the antibody response to vaccination appears to be inversely related to age. The clinical implications of these findings are not fully understood but suggest the need for vigilant surveillance of healthcare workers for evidence of waning immunity to infection and severe disease. Funding This study was funded by the University of California Office of the President. The funder did not have a role in study design, analysis, or reporting.
<gh_stars>0 # Generated by Django 2.2.1 on 2019-05-31 15:24 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('work', '0005_auto_20190530_2019'), ] operations = [ migrations.AlterField( model_name='projectspecs', name='preview', field=models.ImageField(default='default.png', upload_to='previews'), ), ]
<filename>src/main/java/com/applaudo/snacks/api/domain/Product.java package com.applaudo.snacks.api.domain; import java.math.BigDecimal; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.SequenceGenerator; import javax.persistence.Table; import javax.validation.constraints.DecimalMin; import javax.validation.constraints.Min; import javax.validation.constraints.NotNull; @Entity @Table(schema = "public", name = "product") public class Product { @Id @GeneratedValue(generator = "product_id_seq", strategy = GenerationType.AUTO) @SequenceGenerator(name = "product_id_seq", sequenceName = "public.product_id_seq", initialValue = 1, allocationSize = 1) @Column(name = "id") private Integer id; @NotNull @Column(name = "name") private String name; @NotNull @Column(name = "price") @DecimalMin(value = "0.0", inclusive = false) private BigDecimal price; @Min(0) @Column(name = "stock") private Integer stock = 0; @Min(0) @Column(name = "likes") private Integer likes = 0; /** * @return the id */ public Integer getId() { return id; } /** * @return the name */ public String getName() { return name; } /** * @return the price */ public BigDecimal getPrice() { return price; } /** * @return the stock */ public Integer getStock() { return stock; } /** * @return the likes */ public Integer getLikes() { return likes; } /** * @param id the id to set */ public void setId(Integer id) { this.id = id; } /** * @param name the name to set */ public void setName(String name) { this.name = name; } /** * @param price the price to set */ public void setPrice(BigDecimal price) { this.price = price; } /** * @param stock the stock to set */ public void setStock(Integer stock) { this.stock = stock; } /** * @param likes the likes to set */ public void setLikes(Integer likes) { this.likes = likes; } }
Eduard Hagenbach-Bischoff Eduard Hagenbach-Bischoff (20 February 1833, in Basel – 23 December 1910, in Basel) was a Swiss physicist. The Hagenbach-Bischoff quota (a voting system) is named after him. The son of the theologian Karl Rudolf Hagenbach, he studied physics and mathematics in Basel (with Rudolf Merian), Berlin (with Heinrich Wilhelm Dove and Heinrich Gustav Magnus), Geneva, Paris (with Jules Célestin Jamin) and obtained his Ph.D. in 1855 at Basel. He taught at the Gewerbeschule (vocational school) in Basel and was after his habilitation, a professor of mathematics at the University of Basel for one year. From 1863 to 1906 he was a full professor of physics at Basel (successor of Gustav Heinrich Wiedemann). In 1874 he became director of the institute of physics at the newly founded “Bernoullianum” in Basel, and from 1874 to 1879 he was president of the Swiss Academy of Sciences. Hagenbach-Bischoff was involved in the popularisation of science, and at the “Bernoullianum” he gave more than 100 popular talks, such as one in 1896 on the newly discovered X rays.
A Retrospective Cohort Analysis of the Impact of Osteoarthritis on Disability Leave, Workers Compensation Claims, and Healthcare Payments Objectives: Examine short-term disability (STD) and workers compensation (WC) associated leave and wage replacements, and overall direct healthcare payments, among employees with osteoarthritis (OA) versus other chronically painful conditions; quantifying the impact of opioid use. Methods: Analysis of employees with more than or equal to two STD or WC claims for OA or pre-specified chronically painful conditions (control) in the IBM MarketScan Research Databases (2014 to 2017). Results: The OA cohort (n=144,355) had an estimated +1.2 STD days, +$152 STD payments, and +$1410 healthcare payments relative to the control cohort (n=392,639; P<0.001). WC days/payments were similar. Differences were partially driven by an association between opioid use, increased STD days/payments, and healthcare payments observed in pooled cohorts (P<0.001). Conclusions: OA is associated with high STD days/payments and healthcare payments. Opioid use significantly contributes to these and this should be considered when choosing treatment. productivity. 5,6, The burden of OA has been consistently shown to increase with disease severity, including a strong association between severity and declining work productivity, increasing work absence, and increasing unemployment. Though their use is discouraged in treatment guidelines, opioids are one of the most common prescription medications provided to employees with OA in the U.S. Yet, opioids provide minimal improvements in pain and function for employees with OA and are associated with further increases in healthcare costs/utilization and lost wages. In the U.S., many employers provide short-term disability (STD) benefits for employees who are temporarily unable to work due to an illness, injury, pregnancy, or recovery from a medical procedure. 28,29 Most employers are also required to provide workers' compensation (WC) insurance that pays medical expenses and wage replacements to employees for injuries or illness that are caused by work-related activities. 30 These programs are beneficial to both employers and employees, as they provide employees with a guaranteed payment to cover the financial impact of injuries or illness and fulfill employers' obligation to compensate employees for lost time at work and healthcare costs. Currently, little is known about the relative impact of OA on these types of disability leave in the U.S. The impact of opioid use on these outcomes is also not well characterized. Specific objectives of this retrospective, observational cohort study were to compare STD and WC leave days/payments and direct healthcare payments between employees with OA versus other chronically painful conditions in a U.S. working adult population. The effect of opioid use on these specific outcomes was also assessed. Data Source This was a retrospective, non-interventional database analysis using anonymized patient-level claims data from the IBM MarketScan Research Databases (MarketScan Commercial Claims and Encounters and Health and Productivity Management databases). Data are from a non-random sample of large employers' healthcare/disability insurance claims from employees geographically dispersed throughout the U.S. The CCAE database contains indicators for annual and monthly health benefits enrollment (including demographics, plan sponsor information, and health plan design attributes). It also includes claims for inpatient, outpatient, and prescription pharmacy treatments. Treatment claims data include information on dates of services, one or more diagnoses (International Classification of Diseases and Related Health Problems , Ninth Revision or Tenth Revision ), therapeutic class (for pharmacy claims), and payment details. The HPM database contains indicators of annual eligibility, and claims for, STD and WC benefits. WC claims data include the primary diagnosis (ICD-9 or ICD-10) and date of injury or illness for which benefits were authorized, the number of lost workdays (if any), and the value of wage replacements and healthcare payments associated with the claim. STD claims data include the primary diagnosis (ICD-9 or ICD-10) and date of injury or illness for which benefits were authorized, the number of lost workdays, and the value of wage replacements. Employee Sample Unique individuals were identified in each database based on a common enrollee identification number. Individuals (aged 18 to 64 years) were considered for inclusion in the study based on their eligibility for benefits. They must have been the primary beneficiary (ie, the employee) and eligible for medical, pharmacy, STD, and WC benefits for all months from January 2014 through December 2017 (48 months consecutively). Cohorts Eligible employees were divided into two cohorts based on their treatment history for OA or other pre-specified painful conditions (ICD-9 and ICD-10 primary or secondary/additional diagnosis codes listed in Table 1). Employees were included in the OA cohort if they had two or more treatment claims with primary or supplemental diagnoses of OA. Employees were included in the other chronically painful conditions (control) cohort if they had two or more treatment claims with a diagnosis for a pre-specified painful condition at least 30 days apart. 31 Employees not meeting the criteria for either cohort were excluded. For the purposes of developing statistical weights and controlling for confounding characteristics (described below), we created an indicator variable for each of the conditions included in the control cohort. The index date for each employee was the first claim with an eligible diagnosis code. Each employee record was divided to provide a pre-index period (January 1, 2014 to index date) and a post-index observation period (index date to December 31, 2017). Dependent Variables Dependent variables were the cumulative lost workdays due to STD or WC (including 0 lost days for WC claims that only incurred medical or other payments), STD payments (wage replacement), WC payments (sum of wage replacements, medical, and other payments including legal fees and vocational rehabilitation), and healthcare payments (sum of inpatient, outpatient, and prescription drug claim payments) during the observation period. To ensure that we did not overestimate the influence of OA on STD outcomes-for example, after adjusting for the confounding influences of age and sex, we assumed no mechanism for OA to influence STD claims for conditions such as pregnancy or cancerwe only included STD claims with a diagnosis for OA, a different pre-specified painful condition, or for a condition found in the WC claims data (typically injuries and musculoskeletal conditions). This resulted in the exclusion of 64% of the STD claims that accounted for 61% of STD lost workdays and 63% of STD payments. Prescription Medications Employees' prescribed use of acetaminophen, duloxetine, hyaluronic acid, tramadol, non-tramadol opioids, nonsteroidal antiinflammatory drugs (NSAIDs), and corticosteroids was identified by the U.S. Food and Drug Administration national drug code included with each prescription pharmacy claim. Employees with a prescription were coded as a 1 for that drug, and a 0 otherwise. For statistical weighting (described below), medications were assessed prior to the index period. For our final models, we included only medications prescribed for at least 3% of employees in both cohorts combined-opioids, NSAIDs, acetaminophen, and tramadol. Given our general interest in the relationship between opioid use, disability, and healthcare payments, we measured medication use at any time during the study period for use in our regression models. Health Plan Type We calculated an employee's health plan type based on the number of months prior to the observation period they were enrolled in a preferred provider organization, health maintenance organization, point-of-service, or high-deductible health plan. We then converted the number of months into a proportion of the preobservation period. While an employee could have been enrolled in more than one type of health plan, 88% were continuously enrolled in the same type of plan. Comorbidities We used diagnosis information (ICD-9 and ICD-10 codes) from inpatient and outpatient claims to create dichotomous variables indicating whether an employee received treatment for several comorbid conditions during the pre-observation period. These were obesity, diabetes mellitus, hypertension, hyperlipidemia, sleeping problems, anxiety, or depression (ICD codes listed in Table 1). Demographics Our models controlled for available employee and employer characteristics. Employer characteristics were limited to the industries of the plan sponsors included in the CCAE and HPM datasets that have both WC and STD data. Industries were durable goods manufacturing; non-durable goods manufacturing; transportation, communications and utilities; services; finance, insurance and real estate; and retail trade. Employee demographics were sex (male or female), age, and Census region (North Central, Northeast, South, and West). Indicators for whether an employee was unionized (yes or no) and whether they were salaried or paid hourly were also given. Statistical Method All analyses were conducted using Stata version 14.2 (StataCorp, TX). Statistical Weighting One challenge when conducting analyses of observational data is that ''assignment'' to either a treatment or control group is non-random and may be associated with the outcome of interest. 32 In this case, selection bias complicates the interpretation of effect sizes. We tried to address the risk of selection bias with inverse probability of treatment weighting (IPTW) to balance the measured characteristics across the OA and control cohorts. 33 Univariate Analysis We report the proportions of employees with a claim for WC, STD, or either type during the observation period in the supplemental material. We compared the differences across the cohorts using a test of independence from two-way contingency tables. We also report proportions for the most common diagnoses. Regression Analyses We conducted a series of multivariable regression models to estimate employees' outcomes during the observation period. Each outcome was estimated using a separate model that included an indicator of the cohort, indicators of prescription medication use, non-OA pain and other comorbid conditions, health plan information, industry, and employee demographics. Additionally, each model also controlled for employees' outcomes prior to the index period. For example, the model estimating WC lost workdays during the index period included a measure of WC lost workdays observed prior to the index period. For the WC and STD lost workdays and payments models, we treated the outcomes as over-dispersed count data and used a negative binomial estimator. Healthcare payments showed a strong positive skew and initial regression models produced non-normally distributed residuals. For these reasons, we transformed the outcome variable by the natural log and estimated using ordinary least squares regression. Because we were interested in whether use of prescription opioids influences WC and STD outcomes differently for the OA and control cohort, each model included an interaction term for the combination of cohort and opioids indicator variables. We report the overall OA cohort and opioids coefficients (to reflect the sample means of each group), and the linear combinations of the main and interaction action effects for the four sample populations represented by the interaction. We express the coefficients from the negative binomial estimators as incidence rate ratios (IRR; ie, the proportional difference in expected counts) for a one-unit change in the covariate and describe estimated counts for selected outcomes. RESULTS Based on the selection criteria, 1,023,712 employees were identified in the IBM MarketScan Research Databases as eligible for inclusion. Of these, 144,355 met the inclusion criteria in the OA cohort and 392,639 met the inclusion criteria for the control cohort (Fig. 1). Cohort Comparison Before Weighting Standardized differences between means describe the balance between cohorts. Maximum standardized differences of about 10% indicate a reasonable level of balance for a given covariate between cohorts. 32,34 Before weighting, the original OA and control cohorts were unbalanced on most of the covariates (Table 2). Importantly, the original OA cohort had higher pre-index use of prescription pain medications than the original control cohort, ranging from around 2.5times higher incidence of opioid (42% vs 17%) and acetaminophen (5% vs 2%) use to six-times higher incidence of tramadol (12% vs 2%) use during the observation period. Employees with OA were, on average, older and had higher rates of comorbidities, including many of the control cohort conditions. Cohort Comparison After Weighting IPTW improved the balance of the sample across the study covariates (Table 2). After IPTW, five diagnosis variables-indicators for abdominal pain, genitourinary pain, non-OA joint pain, back pain, and limb pain-had an absolute standardized difference of at least 10%. The log of pre-index medical treatment payments had a standardized difference of -20.7%, whereas the standardized difference of the untransformed variable was 0.9%. The change in the direction of the standardized difference after log transforming reflects a longer righthand skew within the OA cohort (skew 32.6) than in the control cohort (skew 22.6). The weighted mean index date across both cohorts occurred on March 29, 2015, suggesting an average observation period of about 33 months and a pre-index period of about 16 months. Of particular interest for the current study, after weighting, around one in five employees across both cohorts had at least one pre-index opioid (21%) or NSAID prescription (18%). The most common painful conditions for inclusion in the control group were non-OA joint pain, limb pain, and back pain. Taken together, 86% of the control cohort had at least one of these conditions, compared with 72% of the OA cohort. During the pre-index period, the weighted OA cohort had lower mean WC and STD days and WC and STD payments than the control cohort ( Table 2). Healthcare payments for the OA cohort were skewed rightward, resulting in marginally higher mean payments. Univariate Analyses Supplemental Table 1, http://links.lww.com/JOM/A996, shows the proportions of employees with any WC or STD claim during the observation period and with the most common diagnoses associated with each type of claim. Regression Analyses In the IPTW-weighted multiple regression models (Table 3), we were principally interested in coefficients for the OA cohort, use of opioids, and the interaction between these covariates. Given the OA cohort opioids interaction included in the models, Table 4 reports the overall OA cohort and opioids coefficients and the linear combinations of the main and interaction action effects for the four sample populations represented by the interaction. Cohort Comparison On average, employees in the OA cohort were estimated to have 12% fewer WC days and 16% lower WC payments than employees in the control cohort over the observation period (Table 4; payments were P < 0.05). Employees in both cohorts were estimated to have about 0.6 WC days and $184 to $219 in WC payments, with an overall average of $199 (AE$25; Fig. 2A and B). Estimated incidence rates for STD days over the observation period were 90% higher in the OA cohort (Table 4; P < 0.001), while STD payments were about twice as high (Table 4; P < 0.001). The models estimated about 1.4 STD days and $160 STD payments for the control cohort over observation, and 1.2 additional STD days and $152 in additional STD payments for the OA cohort ( Fig. 2C and D). Impact of Opioids Opioid use was a significant predicator for all outcomes. Employees prescribed opioids had significantly higher estimated lost workdays and payments ( Lower absolute standardized differences in means indicate a greater balance among cohorts for a specific covariate. Standardized differences no greater than 10% to 25% have been proposed as indicating acceptable balance. Control cohort comprises employees with a broad range of other (non-OA) chronically painful conditions. HMO, health maintenance organization; IPTW, inverse probability treatment weighting; NSAIDs, nonsteroidal anti-inflammatory drugs; OA, osteoarthritis; POS, point-ofservice; PPO, preferred provider organization; STD, short-term disability; WC, workers' compensation. prescriptions. On average, employees not prescribed opioids had about 0.4 WC days and about 0.8 STD days. Employees prescribed opioids had an additional 0.5 WC days and an additional 3.8 STD days ( Fig. 2A and C). Employees prescribed opioids also had an additional $163 in WC payments, $520 in additional STD payments, and $12,239 in additional healthcare payments (Fig. 2B, D, and E). Since NSAIDs, acetaminophen, and tramadol were included as controls in the models, the strict opioid comparison was to Control cohort comprises employees with a broad range of other (non-OA) chronically painful conditions. HMO, health maintenance organization; nbreg, negative binomial regression; NSAIDs, nonsteroidal anti-inflammatory drugs; OA, osteoarthritis; POS, point-of-service; PPO, preferred provider organization; STD, short-term disability; WC, workers' compensation. employees not prescribed any of the most common pain management drugs. Wald chi-squared tests conducted after each model indicated that the coefficient for opioids was significantly higher (P 0.05 in each case) than the coefficients for the other drugs. Cohort and Opioids Interactions Opioid use was a particularly strong predictor of higher STD days/payments and healthcare payments in the OA cohort (Table 4). Among employees not prescribed opioids, estimated incidence rates for STD days were about 43% higher in the OA cohort (P < 0.001). By comparison, the IRR is 150% higher among employees prescribed opioids. Comparable results for STD payments were of similar magnitudes. Among employees not prescribed opioids, estimated healthcare payments were about 4% lower for the OA cohort than for the control cohort. By comparison, among employees prescribed opioids, estimated healthcare payments were 24% higher in the OA cohort. This suggests that the positive and significant overall association between the OA cohort and healthcare payments is driven by employees prescribed opioids. DISCUSSION Findings from this retrospective, non-interventional database analysis demonstrate that U.S. employees with OA had an estimated 90% higher incidence of STD days, 96% higher STD payments, and 9% higher healthcare payments than a control cohort of employees with other chronically painful conditions. While WC lost workdays were generally uncommon, they were 12% fewer and associated with 16% lower WC payments among employees with OA. These data demonstrate the particular importance of OA as a cause of disability lost workdays, associated wage replacements, and healthcare payments in the context of other chronically painful conditions. A combined cohort analysis additionally showed all outcomes to be higher among employees who took opioids versus those who did not. The effect of opioid use was found to be a major driver of increased disability days and payments in employees with OA. The negative association between OA, work productivity, and overall economic burden has been demonstrated in a number of studies, but usually in comparison with the general population of employees. 8,11,35 -37 The absolute number of additional disability days and payments incurred by employees with OA varies considerably with methodology (region, population, data source, joints affected by OA, modeling, etc) and is not easily compared between studies. Using data from the 2009 U.S. National Health and Wellness Survey, DiBonaventura et al 12 showed rate ratios for absenteeism and presenteeism in employees with OA to range from 1.04 to 1.86 relative to those without OA, depending on disease severity. These findings are similar to our findings of around twice the risk of STD days and STD payments compared with employees with other chronically painful conditions. Our study showed a lower risk of WC days and payments ($0.8 times risk) among employees with OA versus other chronically painful conditions. However, we also found almost no WC claims for OA in either cohort. Employees with chronic pain might be less likely to claim WC than employees in the general population due to the ''healthy worker effect''-where ''unhealthy'' employees (those with physical limitations) are less likely to take physically demanding jobs, thus are less likely to incur work-related injuries and also find it easier to stay at work after an injury (eg, on light duties) because of the nature of their job and workplace. 38,39 This effect might be occurring more commonly in our cohort with other chronically painful conditions (mainly non-OA joint pain, back pain, or limb pain). It has been suggested that factors other than comorbidities, such as age, can have a considerable role to play in the costs associated with work-related injuries; however, age was controlled for in our models. 40 While the estimated mean annual number of disability days associated with OA may not be as high as other chronic conditions, such as spinal injury or limb loss, the high prevalence of OA means that these lost days can have a large cumulative impact on employers. 6,41 We found that employees with OA incurred an extra $1410 (9%) in estimated healthcare payments (inpatient, outpatient, and prescription drug claims) compared with employees with other chronically painful conditions over a mean observation period of 33 months. This is in the context of the known high costs of treating chronic pain conditions, indicating that OA is associated with a particularly notable economic burden. 42 While the impact of OA versus the general employee population has been demonstrated, the relative impact versus other chronically painful conditions has not been well studied. 6,8,10,11,15,21,24,36,39,41,43 Prior to our study, we are only aware of Jetha et al 44 who identified arthritis to be associated with the longest duration of disability (STD and long-term disability claims combined) when compared with seven other chronic conditions (diabetes, hypertension, coronary artery disease, depression, low back pain, chronic pulmonary disease, or cancer) in a large sample from a U.S. private insurance claims database. Opioids continue to be prescribed to patients with OA despite the number of annual prescriptions declining in the U.S. as a result of efforts to address the opioid epidemic. 45 Treatment guidelines generally recommend against the use of opioids in patients with OA due to limited evidence of a positive impact on pain or function; however, treatment options are limited. 46 A major finding from our study was that, across cohorts, opioid use was associated with significantly higher number of estimated disability days and payments of all types. The magnitude of the differences exceeded those between cohorts (2.4-and 2.2-times higher WC days and Table 3 for linear coefficients. Results for WC and STD days and payments represent incidence rate ratios from negative binomial regression estimates. Results for healthcare payments are interpreted as the percentage change in the geometric mean. Control cohort comprises employees with a broad range of other (non-OA) chronically painful conditions. OA, osteoarthritis; STD, short-term disability; WC, workers' compensation. A B D C E FIGURE 2. Estimated outcomes by cohort and opioid use. Control cohort comprises employees with a broad range of other (non-OA) chronically painful conditions. Shows mean AE 95% confidence interval from a multivariate regression model. Indicates opioids prescribed during the study period. OA, osteoarthritis; STD, short-term disability; WC, workers' compensation. with negative outcomes in people with chronic pain (including pain due to OA) are likely multifactorial. Opioids have been shown to provide no additional benefit in pain-related function over nonopioid medications for people with back pain and OA and are associated with more adverse effects. 22 The known adverse effect profile and risk of addiction associated with opioids likely contributes to the negative work productivity outcomes observed in our analysis. Notable strengths of our study include the large sample size and the long length of follow-up (mean observation of $33 months), which exceeds most similar studies. The breadth of data captured is also a major strength. We believe that our comparison strategy (OA vs other chronically painful conditions) is unique, allowing the dissection of the impact of OA on work productivity to be carved out in comparison to a large group of chronically painful conditions. Our study approach also has several limitations. Firstly, IBM MarketScan data are derived from a non-random sample of large employers' healthcare and disability insurance benefits and are not generalizable to the population of U.S. employees working for small or mid-sized employers. Secondly, our cohort does not include those with more severe and limiting chronic pain. We required continuous employment and benefit enrollment from 2014 to 2017, so those who subsequently left employment (and lost their insurance coverage) due to chronic pain are not included in our analysis. Thirdly, because data are based on treatment-seeking/benefit claiming behavior, they may not be comprehensive with regards to health conditions that did not trigger treatment or a claim. We do not have records of reportable onthe-job injuries that did not result in claims for medical treatment or wage replacement. This may have resulted in an undercount of less serious or ''near-miss'' incidents where OA or opioids were contributing factors. Fourthly, supplemental sources of health condition data such as self-reported health risk assessments, values obtained by lab tests, details of disease severity or duration, or data about patient characteristics, such as socioeconomic status (eg, household income), race/ethnicity, and granular geographic location (eg, zip code or census block) are not available but may have associations with our outcomes. This could contribute to omitted variable bias. Additionally, WC and STD claims do not capture the full burden of illness, and lost workdays as a result of absenteeism or presenteeism (without a STD or WC claim) are not included. We are further aware that STD benefit offerings vary by state, insurer, industry, and company; however, these factors are not detailed in the source database. A final but important limitation is related to the designation of other conditions qualifying for inclusion in the control cohort. This cohort was developed to allow the relative impact of OA to be measured and included a broad range of chronically painful conditions. These conditions may not be clinically or qualitatively similar to OA insofar as they increase the risks of WC or STD experience. The selections may also limit the ability to generate feasible propensity scores if the employees' underlying demographic profiles differ from employees with OA. It is notable that joint pain was the most common condition in both the control and OA cohorts. In the control cohort there is a possibility that some of the claims for chronic joint pain (and other conditions) represent undiagnosed OA. This would bias our findings towards the null and therefore true differences between the cohorts may be larger than stated. CONCLUSION In the context of increasing OA prevalence, our findings add to the growing evidence of a significant association between OA, opioid use, and increased work absence. OA was associated with a higher incidence of work productivity loss, and higher short-term disability and healthcare payments, than a comparator cohort including a broad selection of other chronically painful conditions. The use of opioids was a key driver in this finding. We highlight the importance of understanding and considering therapies prescribed to employees with OA and chronic musculoskeletal pain so that their ability to work is supported.
package gui; import java.awt.Color; import java.awt.Font; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import javax.swing.JButton; import javax.swing.JLabel; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JPasswordField; import javax.swing.JScrollPane; import javax.swing.JTable; import javax.swing.JTextField; import javax.swing.ListSelectionModel; import javax.swing.table.DefaultTableModel; import LibExceptions.LibrarianNotExistException; import LibExceptions.addLibrarianExceptionGui; import LibExceptions.addUserLibraryExceptionGui; import Model.Librarian; import View.ViewModelHelper; public class PanelAdminAddLibrarians extends JPanel { private JTextField firstNameField; private JTextField lastNameField; private JTextField userIDField; private JPasswordField passWordField; private JPasswordField confirmPassWordField; private LibraryGui libraryGui; private JTable librariansTable; private DefaultTableModel librariansTableModel; private JButton btnBack; /** * Create the panel. */ public PanelAdminAddLibrarians(LibraryGui libraryGui) { this.libraryGui=libraryGui; setBounds(0, 0, 986, 694); setVisible(true); setBackground(new Color(192, 192, 192)); setLayout(null); JLabel lblLibrarians = new JLabel("Librarians"); lblLibrarians.setBounds(35, -1, 116, 46); lblLibrarians.setFont(new Font("Tahoma", Font.PLAIN, 18)); add(lblLibrarians); JLabel lblIDNew = new JLabel("User ID"); lblIDNew.setFont(new Font("Tahoma", Font.PLAIN, 14)); lblIDNew.setBounds(35, 112, 81, 40); add(lblIDNew); JLabel lblfirstName = new JLabel("First Name"); lblfirstName.setFont(new Font("Tahoma", Font.PLAIN, 14)); lblfirstName.setBounds(35, 72, 100, 40); add(lblfirstName); JLabel lbllastName = new JLabel("Last Name"); lbllastName.setFont(new Font("Tahoma", Font.PLAIN, 14)); lbllastName.setBounds(310, 72, 100, 40); add(lbllastName); JLabel lblpassWord = new JLabel("Password"); lblpassWord.setFont(new Font("Tahoma", Font.PLAIN, 14)); lblpassWord.setBounds(310, 112, 100, 40); add(lblpassWord); JLabel lblconfirmPassword = new JLabel("<PASSWORD> Password"); lblconfirmPassword.setFont(new Font("Tahoma", Font.PLAIN, 14)); lblconfirmPassword.setBounds(286, 152, 116, 40); add(lblconfirmPassword); //--Libririan's TextFields--- firstNameField = new JTextField(); firstNameField.setBounds(127, 82, 140, 20); firstNameField.setColumns(10); add(firstNameField); userIDField = new JTextField(); userIDField.setColumns(10); userIDField.setBounds(127, 122, 140, 20); add(userIDField); lastNameField = new JTextField(); lastNameField.setColumns(10); lastNameField.setBounds(405, 82, 140, 20); add(lastNameField); passWordField = new JPasswordField(); passWordField.setColumns(10); passWordField.setBounds(405, 122, 140, 20); add(passWordField); confirmPassWordField = new JPasswordField(); confirmPassWordField.setColumns(10); confirmPassWordField.setBounds(405, 162, 140, 20); add(confirmPassWordField); String[] headersNames = new String[] { "First name" , "Last name" ,"ID" }; librariansTableModel = new DefaultTableModel(){ public boolean isCellEditable(int rowIndex, int mColIndex) { return false; } }; librariansTable = new JTable(librariansTableModel); librariansTable.setLocation(74, 383); librariansTable.setEnabled(true); librariansTable.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); librariansTable.setRowSelectionAllowed(true); librariansTableModel.setColumnIdentifiers(headersNames); librariansTable.setSize(700, 100); librariansTable.setVisible(true); JScrollPane pane; pane= new JScrollPane(librariansTable); pane.setBounds(35, 246, 652, 342); pane.setEnabled(false); add(pane); JButton librarianAddNewUser = new JButton("Add Librarian"); librarianAddNewUser.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { Librarian l=null; try { //check if fields haven't been filled String ErrMsg=""; if (firstNameField.getText().isEmpty()) ErrMsg="\nFirst Name"; if (lastNameField.getText().isEmpty()) ErrMsg+="\nLast Name"; if (userIDField.getText().isEmpty()) ErrMsg+="\nID"; if (passWordField.getText().isEmpty()) ErrMsg+="\nPassword"; if (confirmPassWordField.getText().isEmpty()) ErrMsg+="\nConfirm Password"; if (!ErrMsg.isEmpty()) throw new addUserLibraryExceptionGui("Missing or invalid data in the following field(s): " + ErrMsg); //check if passWordField field not equals to confirmPassWordField if (!passWordField.getText().equals(confirmPassWordField.getText())) throw new addUserLibraryExceptionGui("Passwords dose not match.\nPlease enter same password in both fields"); //valid that librarian coudn't have ID: "Admin" in all forms, and not already in the Library data if(libraryGui.getLibrarySys().getUserByID(userIDField.getText())!=null || userIDField.getText().toLowerCase().equals("admin")) throw new addUserLibraryExceptionGui("User with the same ID already exists"); l= new Librarian(firstNameField.getText(), lastNameField.getText(), userIDField.getText(), passWordField.getText()); libraryGui.getLibrarySys().addLibrarian(l); //throws addLibrarianExceptionGui.java libraryGui.DataWasChanged(); ClearFields(); Reloadelibrarians(); //JOptionPane that tell if librarian added successfully JOptionPane.showConfirmDialog(null, "Librarian " + l.toString()+" was added successfully", "Add librarian" ,JOptionPane.CLOSED_OPTION, JOptionPane.INFORMATION_MESSAGE); }catch (addUserLibraryExceptionGui exception1) { return; }catch(addLibrarianExceptionGui exception2){ return; } } }); librarianAddNewUser.setBounds(552, 212, 135, 23); add(librarianAddNewUser); JButton btRemoveReader = new JButton("Remove Librarian"); btRemoveReader.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { Librarian l=null; if (librariansTable.getSelectedRow()<0) { JOptionPane.showConfirmDialog(null, "Please select a librarian to remove" , "Remove selected librarian" ,JOptionPane.CLOSED_OPTION, JOptionPane.INFORMATION_MESSAGE); return; } try { String firstName= librariansTable.getValueAt(librariansTable.getSelectedRow(), 0).toString(); String lastName= librariansTable.getValueAt(librariansTable.getSelectedRow(), 1).toString(); String ID= librariansTable.getValueAt(librariansTable.getSelectedRow(), 2).toString(); if(firstName==null || lastName==null || ID==null) return; int input = JOptionPane.showConfirmDialog(null, "Do you want to remove " + firstName +" " + lastName +"?" , "Remove Selected librarian",JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE); if (input == JOptionPane.NO_OPTION) return; l= new Librarian(firstName, lastName, ID, ""); libraryGui.getLibrarySys().removeLibrarian(l); //throws LibrarianNotExistException libraryGui.DataWasChanged(); Reloadelibrarians(); //JOptionPane that tell if librarian was removed successfully JOptionPane.showConfirmDialog(null, "Librarian " + l.toString()+" was removed successfully", "Remove librarian" ,JOptionPane.CLOSED_OPTION, JOptionPane.INFORMATION_MESSAGE); } catch (LibrarianNotExistException exception) { return; } } }); btRemoveReader.setBounds(35, 212, 135, 23); add(btRemoveReader); Reloadelibrarians(); //Return to the main panel of the user btnBack = new JButton("Back"); btnBack.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent arg0) { libraryGui.Home(); } }); btnBack.setBounds(845, 11, 89, 23); add(btnBack); } //Initialize all the components when the user enter to the panel public void InitPanel() { ClearFields(); } //Clear All the Fields in the panel private void ClearFields() { firstNameField.setText(""); lastNameField.setText(""); userIDField.setText(""); passWordField.setText(""); confirmPassWordField.setText(""); } //reload librarians from Library class to the table private void Reloadelibrarians() { ClearModelAndSetHeaders(new String[] {"First name" , "Last name" ,"ID"}); int i=0; for(Librarian l: libraryGui.getLibrarySys().getLibrarians()) { Object[] CurrentUser = new Object[] { l.getFirstName(), l.getLastName(), l.getId() }; librariansTableModel.insertRow(i++, CurrentUser); } ViewModelHelper.SortTableOn2Columns(librariansTable, 0, 1); } //Clear old rows and Headers from the table and give new Headers to the table private void ClearModelAndSetHeaders(String[] Headers) { while (librariansTableModel.getRowCount() > 0) { librariansTableModel.removeRow(0); } librariansTableModel.setColumnIdentifiers(Headers); librariansTable.getTableHeader().setReorderingAllowed(false);//for not allow to move columns by the headers } }
// Copyright 2017 <NAME>. All rights reserved. // Use of this source code is governed by the MIT license that can be found in // the LICENSE file. // //go:build static && php8 // +build static,php8 package gophp // #cgo LDFLAGS: -ldl -lm -lcurl -lpcre -lssl -lcrypto -lresolv -ledit -lz -lxml2 import "C"
package com.testfabrik.webmate.javasdk.testmgmt; import org.joda.time.DateTime; import java.util.Objects; /** * Information about a Test. */ public class TestInfo { private TestTemplateId id; private String name; private DateTime creationTime; private String description; private int version; private TestInfo() {} public TestInfo(TestTemplateId id, String name, DateTime creationTime, String description, int version) { this.id = id; this.name = name; this.creationTime = creationTime; this.description = description; this.version = version; } /** * Id of Test. * @return id of Test */ public TestTemplateId getId() { return id; } /** * Name of Test. * @return name of Test */ public String getName() { return name; } /** * Time when the Test was created. * @return creation time */ public DateTime getCreationTime() { return creationTime; } /** * Human readable description of Test. * @return human readable description */ public String getDescription() { return description; } /** * Version of Test document. * @return version of test. */ public int getVersion() { return version; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TestInfo testInfo = (TestInfo) o; return version == testInfo.version && id.equals(testInfo.id) && name.equals(testInfo.name) && creationTime.equals(testInfo.creationTime) && description.equals(testInfo.description); } @Override public int hashCode() { return Objects.hash(id, name, creationTime, description, version); } @Override public String toString() { return "TestInfo{" + "id=" + id + ", name='" + name + '\'' + ", creationTime=" + creationTime + ", description='" + description + '\'' + ", version=" + version + '}'; } }
The IT&T sector is the least proactive in attracting and retaining mature-aged workers, despite the nation's rapidly ageing workforce and growing skills shortage, according to the latest Hudson report survey. The survey of 8345 employers across Australia shows that only 32.3 percent of employers in the IT sector and 23.8 percent in the telecomms sector are actively seeking to attract and retain mature-aged workers. The IT industry is the least inclined of all sectors to tap this pool of talent. In other industries, 46.9 percent of financial and insurance firms, 45.1 percent of professional services, and 44.3 percent of government all make recruiting mature-aged workers a priority. The other sectors surveyed were wholesale and distribution, construction, engineering and property, and advertising, marketing and media. Martin Retschko, director of IT&T at Hudson, said IT&T employers should take serious note of these findings or risk losing competitive advantage. He said the industry is very short-term in its thinking and focused on skills that are immediately in demand. "There is an ageing workforce that has a wealth of knowledge and experience, but their skills may not be the ones that are immediately in demand," he said. "Some of these workers are being left behind because of a lack of training and development and lack of thinking ahead about where the skills of tomorrow might be." Retschko suggests employers should start looking at their recruitment forecasts alongside the projects they need to deliver over the next six to 12 months, and look at how they can use the resources they already have onboard. Retaining mature aged staff is important, Retschko says, not only to keep the corporate knowledge, but also to retain mentors and coaches in the workplace which adds value to newer staff members. Introducing flexible working options such as teleworking, job sharing and part-time work are the main ways to retain the older generation of workers, Retschko said. "There is still a fair bit of churn within the IT&T sector and these initiatives would slow that down," he said. ACS vice president Catherine Jaktman said older workers should be viewed as valuable assets to any organization, adding it is critical that the industry embraces the opportunities to retain skilled workers. "Older IT workers provide the experience of working on challenging IT projects. They also know how to build successful teams of ICT people to deliver to a client," she said. The IT&T industry is always going through changes with advancement and take-up of new technology. Employers often look to new graduates as being more creative, often forgetting that it is the older workers who have experience with implementing technology changes." Jaktman suggests employers should be offering more flexible working conditions to mature workers, allowing a work-life balance. "Mobile phone and broadband developments in particular mean working from home has never been easier. An increase in the use of teleworking will allow people currently inhibited from participating in the workforce due to family responsibilities, age or disabilities, to offer their skills to the Australian economy."
<gh_stars>1-10 package de.switajski.priebes.flexibleorders.web; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.net.MalformedURLException; import java.util.Map; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.springframework.stereotype.Component; import org.springframework.web.servlet.view.AbstractView; import com.itextpdf.text.Document; import com.itextpdf.text.DocumentException; import com.itextpdf.text.pdf.PdfWriter; import de.switajski.priebes.flexibleorders.itextpdf.PdfConfiguration; import de.switajski.priebes.flexibleorders.itextpdf.PdfDocumentAppender; import de.switajski.priebes.flexibleorders.itextpdf.PdfDocumentAppenderFactory; import de.switajski.priebes.flexibleorders.itextpdf.PdfTemplateFactory; import de.switajski.priebes.flexibleorders.itextpdf.PdfUtils; import de.switajski.priebes.flexibleorders.itextpdf.dto.ReportInPdf; @Component public class PdfView extends AbstractView { PdfConfiguration config; public PdfView() { setContentType("application/pdf"); config = new PdfConfiguration(); } @Override protected final void renderMergedOutputModel( Map<String, Object> model, HttpServletRequest request, HttpServletResponse response) throws Exception { ReportInPdf report = (ReportInPdf) model.get(ReportInPdf.class.getSimpleName()); // IE workaround: write into byte array first. ByteArrayOutputStream baos = createTemporaryOutputStream(); // Apply preferences and build metadata. Document document = new PdfUtils().newDocument(); PdfWriter writer = newWriter(document, baos); prepareWriter(report, writer, request); buildPdfMetadata(model, document, request); PdfDocumentAppender appender = new PdfDocumentAppenderFactory(writer).create(report); // Build PDF document. document.open(); appender.append(document); document.close(); // Flush to HTTP response. writeToResponse(response, baos); } protected void buildPdfMetadata( Map<String, Object> model, Document document, HttpServletRequest request) {} protected PdfWriter newWriter(Document document, OutputStream os) throws DocumentException { return PdfWriter.getInstance(document, os); } protected void prepareWriter( ReportInPdf report, PdfWriter writer, HttpServletRequest request) throws DocumentException, MalformedURLException, IOException { writer.setViewerPreferences(getViewerPreferences()); writer.setPageEvent(new PdfTemplateFactory(config.logo()).create(report)); } protected int getViewerPreferences() { return PdfWriter.ALLOW_PRINTING | PdfWriter.PageLayoutSinglePage; } }
<gh_stars>100-1000 package sharding import ( "context" "errors" "sync" "sync/atomic" "time" "github.com/fagongzi/log" "github.com/fagongzi/util/json" "github.com/fagongzi/util/task" "github.com/infinivision/prophet" "seata.io/server/pkg/meta" "seata.io/server/pkg/transport" ) // Store is a container of fragments, which maintains a set of fragments type Store interface { // Meta returns the current store's metadata Meta() meta.StoreMeta // Cfg returns the configuration Cfg() Cfg // Start start all fragments managed by the store Start() // FragmentsState returns the state of the fragments FragmentsState() FragmentsState // GetStoreAddr returns the store address GetStoreAddr(storeID uint64) (string, error) // LeaderPeer returns the fragment's leader peer LeaderPeer(fid uint64) (prophet.Peer, error) // CreateFragment create a new fragment and save it to the local data CreateFragment() meta.Fragment // MustPutFragment update the store's fragment metadata MustPutFragment(frag meta.Fragment) // GetFragment returns a fragment replicatation from the store, // when `leader` is true, only return the leader replicatation GetFragment(fid uint64, leader bool) *PeerReplicate // ForeachReplicate do something on every `replicatations`, break if the funcation return false ForeachReplicate(func(*PeerReplicate) bool) // AddReplicate add a replicatation AddReplicate(*PeerReplicate) // AddPeer add a peer to the exist fragment AddPeer(fid uint64, peer prophet.Peer) // RemovePeer remove the peer from the exist fragment RemovePeer(uint64, prophet.Peer) // Transport returns the seata message transport Transport() transport.Transport // ShardingTransport returns the sharding message transport ShardingTransport() Transport // HandleShardingMsg handle the sharding message, maybe returns a response. HandleShardingMsg(data interface{}) interface{} // AddRM add a resource manager AddRM(rms meta.ResourceManagerSet) // RenewRMLease renew the resource manager's lease RenewRMLease(pid, sid string) } type store struct { sync.RWMutex cfg Cfg meta meta.StoreMeta replicates *sync.Map pd prophet.Prophet bootOnce *sync.Once pdStartedC chan struct{} runner *task.Runner storage storage trans Transport seataTrans transport.Transport resources map[string][]meta.ResourceManager ops uint64 } // NewStore returns store with cfg func NewStore(cfg Cfg) Store { s := new(store) s.cfg = cfg s.meta = meta.StoreMeta{ Addr: cfg.ShardingAddr, ClientAddr: cfg.Addr, Labels: cfg.Labels, } s.resources = make(map[string][]meta.ResourceManager) s.replicates = &sync.Map{} s.bootOnce = &sync.Once{} s.runner = task.NewRunner() if s.cfg.storeID != 0 { s.meta.ID = s.cfg.storeID } if s.cfg.storage != nil { s.storage = s.cfg.storage } if cfg.shardingTrans != nil { s.trans = cfg.shardingTrans } else { s.trans = newShardingTransport(s) } if cfg.seataTrans != nil { s.seataTrans = s.cfg.seataTrans } else { s.seataTrans = transport.NewTransport(cfg.TransWorkerCount, s.rmAddrDetecter, cfg.TransSendCB) } return s } func (s *store) Meta() meta.StoreMeta { return s.meta } func (s *store) Cfg() Cfg { return s.cfg } func (s *store) LeaderPeer(fid uint64) (prophet.Peer, error) { pr := s.GetFragment(fid, false) if pr == nil { return prophet.Peer{}, nil } leader, err := pr.tc.CurrentLeader() if err != nil { return prophet.Peer{}, err } var storeID uint64 for _, p := range pr.frag.Peers { if p.ID == leader { storeID = p.ContainerID break } } return prophet.Peer{ID: leader, ContainerID: storeID}, nil } func (s *store) GetStoreAddr(storeID uint64) (string, error) { c, err := s.pd.GetStore().GetContainer(storeID) if err != nil { return "", err } return c.(*ContainerAdapter).meta.Addr, nil } func (s *store) ForeachReplicate(fn func(*PeerReplicate) bool) { s.replicates.Range(func(key, value interface{}) bool { return fn(value.(*PeerReplicate)) }) } func (s *store) Transport() transport.Transport { return s.seataTrans } func (s *store) ShardingTransport() Transport { return s.trans } func (s *store) Start() { s.startProphet() log.Infof("begin to start store %d", s.meta.ID) s.trans.Start() log.Infof("peer transport start at %s", s.cfg.ShardingAddr) s.seataTrans.Start() log.Infof("seata transport start") s.startFragments() log.Infof("fragments started") _, err := s.runner.RunCancelableTask(s.runGCRMTask) if err != nil { log.Fatalf("run gc task failed with %+v", err) } _, err = s.runner.RunCancelableTask(s.runManualTask) if err != nil { log.Fatalf("run manual task failed with %+v", err) } _, err = s.runner.RunCancelableTask(s.runHBTask) if err != nil { log.Fatalf("run hb task failed with %+v", err) } _, err = s.runner.RunCancelableTask(s.runCheckConcurrencyTask) if err != nil { log.Fatalf("run check concurrency task failed with %+v", err) } for i := 0; i < s.cfg.PRWorkerCount; i++ { idx := uint64(i) _, err = s.runner.RunCancelableTask(func(ctx context.Context) { s.runPRTask(ctx, idx) }) if err != nil { log.Fatalf("run pr event loop task failed with %+v", err) } } } func (s *store) startFragments() error { err := s.storage.loadFragments(func(value []byte) (uint64, error) { frag := meta.Fragment{} json.MustUnmarshal(&frag, value) pr, err := createPeerReplicate(s, frag) if err != nil { return 0, err } s.AddReplicate(pr) return frag.ID, nil }) if err != nil { log.Fatalf("load fragments failed with %+v", err) } return nil } func (s *store) AddReplicate(pr *PeerReplicate) { pr.workerID = uint64(s.cfg.PRWorkerCount-1) & pr.id s.replicates.Store(pr.id, pr) } func (s *store) doRemovePR(id uint64) { s.replicates.Delete(id) } func (s *store) GetFragment(id uint64, leader bool) *PeerReplicate { if pr, ok := s.replicates.Load(id); ok { p := pr.(*PeerReplicate) if !leader || (leader && p.isLeader()) { return p } return nil } return nil } func (s *store) AddPeer(id uint64, peer prophet.Peer) { pr := s.GetFragment(id, true) if nil == pr { return } pr.Lock() defer pr.Unlock() pr.addPeer(peer) s.MustPutFragment(pr.frag) log.Infof("%s new peer %+v added", pr.tag, peer) } func (s *store) RemovePeer(id uint64, peer prophet.Peer) { pr := s.GetFragment(id, true) if nil == pr { return } pr.Lock() defer pr.Unlock() pr.removePeer(peer) s.MustPutFragment(pr.frag) s.trans.Send(peer.ContainerID, &meta.RemoveMsg{ ID: id, }) log.Infof("%s peer %+v removed", pr.tag, peer) } func (s *store) MustPutFragment(frag meta.Fragment) { err := s.storage.putFragment(frag) if err != nil { log.Fatalf("save frag %+v failed with %+v", frag, err) } } func (s *store) mustRemoveFragment(fid uint64) { err := s.storage.removeFragment(fid) if err != nil { log.Fatalf("remove frag %d failed with %+v", fid, err) } } func (s *store) AddRM(rms meta.ResourceManagerSet) { s.Lock() now := time.Now() for _, rm := range rms.ResourceManagers { rm.LastHB = now values := s.resources[rm.Resource] values = append(values, rm) s.resources[rm.Resource] = values log.Infof("%s added", rm.Tag()) } s.Unlock() } func (s *store) RenewRMLease(pid, sid string) { for _, rms := range s.resources { for idx, rm := range rms { if rm.RMSID == sid { rms[idx].ProxySID = pid rms[idx].LastHB = time.Now() } } } } func (s *store) rmAddrDetecter(fid uint64, resource string) (meta.ResourceManager, error) { s.RLock() rms, ok := s.resources[resource] if !ok { s.RUnlock() return meta.ResourceManager{}, errors.New("no available RM, not registered") } all := len(rms) log.Debugf("resource %s take available resource manager from %d resources", resource, all) if all == 0 { s.RUnlock() return meta.ResourceManager{}, errors.New("no available RM, rms == 0") } now := time.Now() c := 0 for { rm := rms[int(atomic.AddUint64(&s.ops, 1))%all] if now.Sub(rm.LastHB) <= s.cfg.RMLease { s.RUnlock() return rm, nil } c++ log.Debugf("resource %s take available resource manager of %s was not available", resource, rm.Tag()) if c >= all { break } } s.RUnlock() return meta.ResourceManager{}, errors.New("has no available RM") } // just for test type emptyStore struct { } func (s *emptyStore) Meta() meta.StoreMeta { return meta.StoreMeta{} } func (s *emptyStore) Cfg() Cfg { return Cfg{} } func (s *emptyStore) Start() {} func (s *emptyStore) FragmentsState() FragmentsState { return FragmentsState{} } func (s *emptyStore) GetStoreAddr(storeID uint64) (string, error) { return "", nil } func (s *emptyStore) LeaderPeer(fid uint64) (prophet.Peer, error) { return prophet.Peer{}, nil } func (s *emptyStore) CreateFragment() meta.Fragment { return meta.Fragment{} } func (s *emptyStore) MustPutFragment(meta.Fragment) {} func (s *emptyStore) GetFragment(fid uint64, leader bool) *PeerReplicate { return nil } func (s *emptyStore) ForeachReplicate(func(*PeerReplicate) bool) {} func (s *emptyStore) AddReplicate(*PeerReplicate) {} func (s *emptyStore) AddPeer(fid uint64, peer prophet.Peer) {} func (s *emptyStore) RemovePeer(uint64, prophet.Peer) {} func (s *emptyStore) Transport() transport.Transport { return nil } func (s *emptyStore) ShardingTransport() Transport { return nil } func (s *emptyStore) HandleShardingMsg(data interface{}) interface{} { return nil } func (s *emptyStore) AddRM(rms meta.ResourceManagerSet) {} func (s *emptyStore) RenewRMLease(pid, sid string) {}
<reponame>Morgancentral99/CentralAPI<filename>src/github/morgancentral99/central/Main.java package github.morgancentral99.central; public class Main { }
Update: The Silver Alert has been canceled around 2:30 p.m. Wednesday after the man was located. ALTUS, Okla. – A Silver Alert has been issued for a missing 82-year-old Oklahoma man. Robert Reid was last seen around 5 a.m. on Wednesday in the 1400 block of Ridgecrest in Altus. Reid left Altus to pick up someone in Hollis, but he never arrived. His vehicle is a white 2010 Dodge Caravan with Oklahoma tag E77894. If you know of Reid’s whereabouts, contact authorities.
The number was untraceable - almost. On Election Night in 2010, The Baltimore Sun's switchboard lit up with reports of a suspicious "robocall": it told voters to relax, that President Obama and Gov.Martin O'Malley had been successful, and that there was nothing left to do but wait for the results. Those who called us said they believed the call was a trick to keep Democratic voters home, and one person provided the number from their caller ID. Naturally, The Sun wanted to know who was behind the call; Democrats including Mayor Stephanie Rawlings-Blake immediately had tried to pin it on former Gov. Robert L. Ehrlich Jr.'s campaign, but his camp vehemently denied it and the Republican party was calling for an investigation. Some speculated that it was actually the Democrats, trying to make voters think Republicans were attempting to suppress their vote. The Sun was eventually able to pinpoint the election consultants behind it; the state prosecutor's office later filed criminal charges. Julius Henson is on trial this week, and Paul Schurick, campaign manager for Ehrlich, was convicted of four charges in the matter last year. Here's how the story came together: The first attempts to track the number, which had a DC area code, were not immediately successful; when it was dialed, the caller received a dial tone, and database and search engines turned up nothing. A reporter even paid a few bucks for two online services that claimed to be able to trace any number, but struck out there as well. [Side note: Be wary of such services!] When a reporter searched Google for web sites containing the number, there were a few hits - in the form of complaints on various web sites about past unsolicited election robocalls. The complaints came from around the country, and seemed to indicate the calls came from Democratic candidates. People reported unsolicited calls from the number regarding school board elections in Tallahassee, Fla., a candidate for mayor in Gadsden, Ala., the Democratic incumbent governor in Tennessee, a Democratic candidate for the New York state senate, and even someone purporting to be poet Maya Angelou. Another said they heard a voice that said he was a "lifelong Republican," but didn't elaborate on what the message was, and another said the call they received was about a "person hosting a town meeting for Democrats." The issue then was finding one of the candidates who had facilitated a call using this service. If they could say, or if The Sun could discern through campaign finance records, who they had paid for their robodials, a reporter could identify callers and press them further. Finally, a breakthrough came in a nonpartisan mayoral candidate from Gadsden, Ala., who referred The Sun to his campaign manager, a consultant from Birmingham who worked with Democrats. He said he had used a service called Robodial.org, because of its low rates. In addition, though the New York state senate candidate didn't immediately return my call, she had an expenditure for Robodial.org in her campaign finance reports. The theory about a reverse-psychology move by Democrats was gaining steam. In addition to the Democratic consultant from Birmingham and the Democratic New York candidate, Robodial.org's website explicitly said the company worked only with progressive and Democratic causes and candidates, and would not work with Republicans. The Sun sent an e-mail to Mark Hampton, the owner of Robodial.org, and told him that while he wanted to help Democrats exclusively, his service appeared to have been used to try to keep Democrats home. He was disturbed, and quickly got back to The Sun after checking his records. He said the calls - more than 50,000 of them, he said at the time - were paid for by a woman named Rhonda Russell, who was previously political director for the liberal group Progressive Maryland. He also said she had been a longtime customer. Were the Democrats behind the call after all? "The consultant who set up that call has been using our system for a couple of years, and in the past we understood that her calls were in support of Democratic candidates," Hampton told The Sun in an e-mail. "Apparently something has changed." But she had listed herself for purposes of the call as an employee of a firm called Universal Elections, and a reporter immediately went to the campaign finance database to see who had given money to that firm. It was Ehrlich's campaign that had directed more than $97,000 to two companies affiliated with Henson – Politics Today and Universal Elections. The Sun couldn't reach Henson immediately, but posted a story online. And Henson later in the day held an impromptu news conference outside his home in which he admitted that he was behind the calls. He said the intent was to motivate Republican voters, but records showed the calls went out to Democratic households. Soon after, he would be criminally charged, along with Ehrlich aide Schurick.
<reponame>lastweek/source-freebsd<filename>src/sys/dev/mmc/mmc_subr.c /*- * Copyright (c) 2006 <NAME>. All rights reserved. * Copyright (c) 2006 <NAME> <<EMAIL>> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Portions of this software may have been developed with reference to * the SD Simplified Specification. The following disclaimer may apply: * * The following conditions apply to the release of the simplified * specification ("Simplified Specification") by the SD Card Association and * the SD Group. The Simplified Specification is a subset of the complete SD * Specification which is owned by the SD Card Association and the SD * Group. This Simplified Specification is provided on a non-confidential * basis subject to the disclaimers below. Any implementation of the * Simplified Specification may require a license from the SD Card * Association, SD Group, SD-3C LLC or other third parties. * * Disclaimers: * * The information contained in the Simplified Specification is presented only * as a standard specification for SD Cards and SD Host/Ancillary products and * is provided "AS-IS" without any representations or warranties of any * kind. No responsibility is assumed by the SD Group, SD-3C LLC or the SD * Card Association for any damages, any infringements of patents or other * right of the SD Group, SD-3C LLC, the SD Card Association or any third * parties, which may result from its use. No license is granted by * implication, estoppel or otherwise under any patent or other rights of the * SD Group, SD-3C LLC, the SD Card Association or any third party. Nothing * herein shall be construed as an obligation by the SD Group, the SD-3C LLC * or the SD Card Association to disclose or distribute any technical * information, know-how or other confidential information to any third party. */ #include <sys/cdefs.h> __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> #include <sys/lock.h> #include <sys/mutex.h> #include <sys/time.h> #include <dev/mmc/bridge.h> #include <dev/mmc/mmc_private.h> #include <dev/mmc/mmc_subr.h> #include <dev/mmc/mmcreg.h> #include <dev/mmc/mmcbrvar.h> #include "mmcbus_if.h" #define CMD_RETRIES 3 #define LOG_PPS 5 /* Log no more than 5 errors per second. */ int mmc_wait_for_cmd(device_t busdev, device_t dev, struct mmc_command *cmd, int retries) { struct mmc_request mreq; struct mmc_softc *sc; int err; do { memset(&mreq, 0, sizeof(mreq)); memset(cmd->resp, 0, sizeof(cmd->resp)); cmd->retries = 0; /* Retries done here, not in hardware. */ cmd->mrq = &mreq; if (cmd->data != NULL) cmd->data->mrq = &mreq; mreq.cmd = cmd; if (MMCBUS_WAIT_FOR_REQUEST(busdev, dev, &mreq) != 0) err = MMC_ERR_FAILED; else err = cmd->error; } while (err != MMC_ERR_NONE && retries-- > 0); if (err != MMC_ERR_NONE && busdev == dev) { sc = device_get_softc(busdev); if (sc->squelched == 0 && ppsratecheck(&sc->log_time, &sc->log_count, LOG_PPS)) { device_printf(sc->dev, "CMD%d failed, RESULT: %d\n", cmd->opcode, err); } } return (err); } int mmc_wait_for_app_cmd(device_t busdev, device_t dev, uint16_t rca, struct mmc_command *cmd, int retries) { struct mmc_command appcmd; struct mmc_softc *sc; int err; sc = device_get_softc(busdev); /* Squelch error reporting at lower levels, we report below. */ sc->squelched++; do { memset(&appcmd, 0, sizeof(appcmd)); appcmd.opcode = MMC_APP_CMD; appcmd.arg = (uint32_t)rca << 16; appcmd.flags = MMC_RSP_R1 | MMC_CMD_AC; if (mmc_wait_for_cmd(busdev, dev, &appcmd, 0) != 0) err = MMC_ERR_FAILED; else err = appcmd.error; if (err == MMC_ERR_NONE) { if (!(appcmd.resp[0] & R1_APP_CMD)) err = MMC_ERR_FAILED; else if (mmc_wait_for_cmd(busdev, dev, cmd, 0) != 0) err = MMC_ERR_FAILED; else err = cmd->error; } } while (err != MMC_ERR_NONE && retries-- > 0); sc->squelched--; if (err != MMC_ERR_NONE && busdev == dev) { if (sc->squelched == 0 && ppsratecheck(&sc->log_time, &sc->log_count, LOG_PPS)) { device_printf(sc->dev, "ACMD%d failed, RESULT: %d\n", cmd->opcode, err); } } return (err); } int mmc_switch(device_t busdev, device_t dev, uint16_t rca, uint8_t set, uint8_t index, uint8_t value, u_int timeout, bool status) { struct mmc_command cmd; struct mmc_softc *sc; int err; KASSERT(timeout != 0, ("%s: no timeout", __func__)); sc = device_get_softc(busdev); memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SWITCH_FUNC; cmd.arg = (MMC_SWITCH_FUNC_WR << 24) | (index << 16) | (value << 8) | set; /* * If the hardware supports busy detection but the switch timeout * exceeds the maximum host timeout, use a R1 instead of a R1B * response in order to keep the hardware from timing out. */ if (mmcbr_get_caps(busdev) & MMC_CAP_WAIT_WHILE_BUSY && timeout > mmcbr_get_max_busy_timeout(busdev)) cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; else cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; /* * Pause re-tuning so it won't interfere with the busy state and also * so that the result of CMD13 will always refer to switching rather * than to a tuning command that may have snuck in between. */ sc->retune_paused++; err = mmc_wait_for_cmd(busdev, dev, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE || status == false) goto out; err = mmc_switch_status(busdev, dev, rca, timeout); out: sc->retune_paused--; return (err); } int mmc_switch_status(device_t busdev, device_t dev, uint16_t rca, u_int timeout) { struct timeval cur, end; int err; uint32_t status; KASSERT(timeout != 0, ("%s: no timeout", __func__)); /* * Note that when using a R1B response in mmc_switch(), bridges of * type MMC_CAP_WAIT_WHILE_BUSY will issue mmc_send_status() only * once and then exit the loop. */ end.tv_sec = end.tv_usec = 0; for (;;) { err = mmc_send_status(busdev, dev, rca, &status); if (err != MMC_ERR_NONE) break; if (R1_CURRENT_STATE(status) == R1_STATE_TRAN) break; getmicrouptime(&cur); if (end.tv_sec == 0 && end.tv_usec == 0) { end.tv_usec = timeout; timevaladd(&end, &cur); } if (timevalcmp(&cur, &end, >)) { err = MMC_ERR_TIMEOUT; break; } } if (err == MMC_ERR_NONE && (status & R1_SWITCH_ERROR) != 0) return (MMC_ERR_FAILED); return (err); } int mmc_send_ext_csd(device_t busdev, device_t dev, uint8_t *rawextcsd) { struct mmc_command cmd; struct mmc_data data; int err; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(rawextcsd, 0, MMC_EXTCSD_SIZE); cmd.opcode = MMC_SEND_EXT_CSD; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = rawextcsd; data.len = MMC_EXTCSD_SIZE; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(busdev, dev, &cmd, CMD_RETRIES); return (err); } int mmc_send_status(device_t busdev, device_t dev, uint16_t rca, uint32_t *status) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = (uint32_t)rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(busdev, dev, &cmd, CMD_RETRIES); *status = cmd.resp[0]; return (err); }
/* * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2010-2011 Texas Instruments Incorporated, * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2010-2011 Texas Instruments Incorporated, * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Texas Instruments Incorporated nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #define OMAP_ABE_INIT_SM_ADDR 0x0 #define OMAP_ABE_INIT_SM_SIZE 0xC80 #define OMAP_ABE_S_DATA0_ADDR 0xC80 #define OMAP_ABE_S_DATA0_SIZE 0x8 #define OMAP_ABE_S_TEMP_ADDR 0xC88 #define OMAP_ABE_S_TEMP_SIZE 0x8 #define OMAP_ABE_S_PHOENIXOFFSET_ADDR 0xC90 #define OMAP_ABE_S_PHOENIXOFFSET_SIZE 0x8 #define OMAP_ABE_S_GTARGET1_ADDR 0xC98 #define OMAP_ABE_S_GTARGET1_SIZE 0x38 #define OMAP_ABE_S_GTARGET_DL1_ADDR 0xCD0 #define OMAP_ABE_S_GTARGET_DL1_SIZE 0x10 #define OMAP_ABE_S_GTARGET_DL2_ADDR 0xCE0 #define OMAP_ABE_S_GTARGET_DL2_SIZE 0x10 #define OMAP_ABE_S_GTARGET_ECHO_ADDR 0xCF0 #define OMAP_ABE_S_GTARGET_ECHO_SIZE 0x8 #define OMAP_ABE_S_GTARGET_SDT_ADDR 0xCF8 #define OMAP_ABE_S_GTARGET_SDT_SIZE 0x8 #define OMAP_ABE_S_GTARGET_VXREC_ADDR 0xD00 #define OMAP_ABE_S_GTARGET_VXREC_SIZE 0x10 #define OMAP_ABE_S_GTARGET_UL_ADDR 0xD10 #define OMAP_ABE_S_GTARGET_UL_SIZE 0x10 #define OMAP_ABE_S_GTARGET_BTUL_ADDR 0xD20 #define OMAP_ABE_S_GTARGET_BTUL_SIZE 0x8 #define OMAP_ABE_S_GCURRENT_ADDR 0xD28 #define OMAP_ABE_S_GCURRENT_SIZE 0x90 #define OMAP_ABE_S_GAIN_ONE_ADDR 0xDB8 #define OMAP_ABE_S_GAIN_ONE_SIZE 0x8 #define OMAP_ABE_S_TONES_ADDR 0xDC0 #define OMAP_ABE_S_TONES_SIZE 0x60 #define OMAP_ABE_S_VX_DL_ADDR 0xE20 #define OMAP_ABE_S_VX_DL_SIZE 0x60 #define OMAP_ABE_S_MM_UL2_ADDR 0xE80 #define OMAP_ABE_S_MM_UL2_SIZE 0x60 #define OMAP_ABE_S_MM_DL_ADDR 0xEE0 #define OMAP_ABE_S_MM_DL_SIZE 0x60 #define OMAP_ABE_S_DL1_M_OUT_ADDR 0xF40 #define OMAP_ABE_S_DL1_M_OUT_SIZE 0x60 #define OMAP_ABE_S_DL2_M_OUT_ADDR 0xFA0 #define OMAP_ABE_S_DL2_M_OUT_SIZE 0x60 #define OMAP_ABE_S_ECHO_M_OUT_ADDR 0x1000 #define OMAP_ABE_S_ECHO_M_OUT_SIZE 0x60 #define OMAP_ABE_S_SDT_M_OUT_ADDR 0x1060 #define OMAP_ABE_S_SDT_M_OUT_SIZE 0x60 #define OMAP_ABE_S_VX_UL_ADDR 0x10C0 #define OMAP_ABE_S_VX_UL_SIZE 0x60 #define OMAP_ABE_S_VX_UL_M_ADDR 0x1120 #define OMAP_ABE_S_VX_UL_M_SIZE 0x60 #define OMAP_ABE_S_BT_DL_ADDR 0x1180 #define OMAP_ABE_S_BT_DL_SIZE 0x60 #define OMAP_ABE_S_BT_UL_ADDR 0x11E0 #define OMAP_ABE_S_BT_UL_SIZE 0x60 #define OMAP_ABE_S_BT_DL_8K_ADDR 0x1240 #define OMAP_ABE_S_BT_DL_8K_SIZE 0x18 #define OMAP_ABE_S_BT_DL_16K_ADDR 0x1258 #define OMAP_ABE_S_BT_DL_16K_SIZE 0x28 #define OMAP_ABE_S_BT_UL_8K_ADDR 0x1280 #define OMAP_ABE_S_BT_UL_8K_SIZE 0x10 #define OMAP_ABE_S_BT_UL_16K_ADDR 0x1290 #define OMAP_ABE_S_BT_UL_16K_SIZE 0x20 #define OMAP_ABE_S_SDT_F_ADDR 0x12B0 #define OMAP_ABE_S_SDT_F_SIZE 0x60 #define OMAP_ABE_S_SDT_F_DATA_ADDR 0x1310 #define OMAP_ABE_S_SDT_F_DATA_SIZE 0x48 #define OMAP_ABE_S_MM_DL_OSR_ADDR 0x1358 #define OMAP_ABE_S_MM_DL_OSR_SIZE 0xC0 #define OMAP_ABE_S_24_ZEROS_ADDR 0x1418 #define OMAP_ABE_S_24_ZEROS_SIZE 0xC0 #define OMAP_ABE_S_DMIC1_ADDR 0x14D8 #define OMAP_ABE_S_DMIC1_SIZE 0x60 #define OMAP_ABE_S_DMIC2_ADDR 0x1538 #define OMAP_ABE_S_DMIC2_SIZE 0x60 #define OMAP_ABE_S_DMIC3_ADDR 0x1598 #define OMAP_ABE_S_DMIC3_SIZE 0x60 #define OMAP_ABE_S_AMIC_ADDR 0x15F8 #define OMAP_ABE_S_AMIC_SIZE 0x60 #define OMAP_ABE_S_DMIC1_L_ADDR 0x1658 #define OMAP_ABE_S_DMIC1_L_SIZE 0x60 #define OMAP_ABE_S_DMIC1_R_ADDR 0x16B8 #define OMAP_ABE_S_DMIC1_R_SIZE 0x60 #define OMAP_ABE_S_DMIC2_L_ADDR 0x1718 #define OMAP_ABE_S_DMIC2_L_SIZE 0x60 #define OMAP_ABE_S_DMIC2_R_ADDR 0x1778 #define OMAP_ABE_S_DMIC2_R_SIZE 0x60 #define OMAP_ABE_S_DMIC3_L_ADDR 0x17D8 #define OMAP_ABE_S_DMIC3_L_SIZE 0x60 #define OMAP_ABE_S_DMIC3_R_ADDR 0x1838 #define OMAP_ABE_S_DMIC3_R_SIZE 0x60 #define OMAP_ABE_S_BT_UL_L_ADDR 0x1898 #define OMAP_ABE_S_BT_UL_L_SIZE 0x60 #define OMAP_ABE_S_BT_UL_R_ADDR 0x18F8 #define OMAP_ABE_S_BT_UL_R_SIZE 0x60 #define OMAP_ABE_S_AMIC_L_ADDR 0x1958 #define OMAP_ABE_S_AMIC_L_SIZE 0x60 #define OMAP_ABE_S_AMIC_R_ADDR 0x19B8 #define OMAP_ABE_S_AMIC_R_SIZE 0x60 #define OMAP_ABE_S_ECHOREF_L_ADDR 0x1A18 #define OMAP_ABE_S_ECHOREF_L_SIZE 0x60 #define OMAP_ABE_S_ECHOREF_R_ADDR 0x1A78 #define OMAP_ABE_S_ECHOREF_R_SIZE 0x60 #define OMAP_ABE_S_MM_DL_L_ADDR 0x1AD8 #define OMAP_ABE_S_MM_DL_L_SIZE 0x60 #define OMAP_ABE_S_MM_DL_R_ADDR 0x1B38 #define OMAP_ABE_S_MM_DL_R_SIZE 0x60 #define OMAP_ABE_S_MM_UL_ADDR 0x1B98 #define OMAP_ABE_S_MM_UL_SIZE 0x3C0 #define OMAP_ABE_S_AMIC_96K_ADDR 0x1F58 #define OMAP_ABE_S_AMIC_96K_SIZE 0xC0 #define OMAP_ABE_S_DMIC0_96K_ADDR 0x2018 #define OMAP_ABE_S_DMIC0_96K_SIZE 0xC0 #define OMAP_ABE_S_DMIC1_96K_ADDR 0x20D8 #define OMAP_ABE_S_DMIC1_96K_SIZE 0xC0 #define OMAP_ABE_S_DMIC2_96K_ADDR 0x2198 #define OMAP_ABE_S_DMIC2_96K_SIZE 0xC0 #define OMAP_ABE_S_UL_VX_UL_48_8K_ADDR 0x2258 #define OMAP_ABE_S_UL_VX_UL_48_8K_SIZE 0x60 #define OMAP_ABE_S_UL_VX_UL_48_16K_ADDR 0x22B8 #define OMAP_ABE_S_UL_VX_UL_48_16K_SIZE 0x60 #define OMAP_ABE_S_UL_MIC_48K_ADDR 0x2318 #define OMAP_ABE_S_UL_MIC_48K_SIZE 0x60 #define OMAP_ABE_S_VOICE_8K_UL_ADDR 0x2378 #define OMAP_ABE_S_VOICE_8K_UL_SIZE 0x18 #define OMAP_ABE_S_VOICE_8K_DL_ADDR 0x2390 #define OMAP_ABE_S_VOICE_8K_DL_SIZE 0x10 #define OMAP_ABE_S_MCPDM_OUT1_ADDR 0x23A0 #define OMAP_ABE_S_MCPDM_OUT1_SIZE 0xC0 #define OMAP_ABE_S_MCPDM_OUT2_ADDR 0x2460 #define OMAP_ABE_S_MCPDM_OUT2_SIZE 0xC0 #define OMAP_ABE_S_MCPDM_OUT3_ADDR 0x2520 #define OMAP_ABE_S_MCPDM_OUT3_SIZE 0xC0 #define OMAP_ABE_S_VOICE_16K_UL_ADDR 0x25E0 #define OMAP_ABE_S_VOICE_16K_UL_SIZE 0x28 #define OMAP_ABE_S_VOICE_16K_DL_ADDR 0x2608 #define OMAP_ABE_S_VOICE_16K_DL_SIZE 0x20 #define OMAP_ABE_S_XINASRC_DL_VX_ADDR 0x2628 #define OMAP_ABE_S_XINASRC_DL_VX_SIZE 0x140 #define OMAP_ABE_S_XINASRC_UL_VX_ADDR 0x2768 #define OMAP_ABE_S_XINASRC_UL_VX_SIZE 0x140 #define OMAP_ABE_S_XINASRC_MM_EXT_IN_ADDR 0x28A8 #define OMAP_ABE_S_XINASRC_MM_EXT_IN_SIZE 0x140 #define OMAP_ABE_S_VX_REC_ADDR 0x29E8 #define OMAP_ABE_S_VX_REC_SIZE 0x60 #define OMAP_ABE_S_VX_REC_L_ADDR 0x2A48 #define OMAP_ABE_S_VX_REC_L_SIZE 0x60 #define OMAP_ABE_S_VX_REC_R_ADDR 0x2AA8 #define OMAP_ABE_S_VX_REC_R_SIZE 0x60 #define OMAP_ABE_S_DL2_M_L_ADDR 0x2B08 #define OMAP_ABE_S_DL2_M_L_SIZE 0x60 #define OMAP_ABE_S_DL2_M_R_ADDR 0x2B68 #define OMAP_ABE_S_DL2_M_R_SIZE 0x60 #define OMAP_ABE_S_DL2_M_LR_EQ_DATA_ADDR 0x2BC8 #define OMAP_ABE_S_DL2_M_LR_EQ_DATA_SIZE 0xC8 #define OMAP_ABE_S_DL1_M_EQ_DATA_ADDR 0x2C90 #define OMAP_ABE_S_DL1_M_EQ_DATA_SIZE 0xC8 #define OMAP_ABE_S_EARP_48_96_LP_DATA_ADDR 0x2D58 #define OMAP_ABE_S_EARP_48_96_LP_DATA_SIZE 0x78 #define OMAP_ABE_S_IHF_48_96_LP_DATA_ADDR 0x2DD0 #define OMAP_ABE_S_IHF_48_96_LP_DATA_SIZE 0x78 #define OMAP_ABE_S_VX_UL_8_TEMP_ADDR 0x2E48 #define OMAP_ABE_S_VX_UL_8_TEMP_SIZE 0x10 #define OMAP_ABE_S_VX_UL_16_TEMP_ADDR 0x2E58 #define OMAP_ABE_S_VX_UL_16_TEMP_SIZE 0x20 #define OMAP_ABE_S_VX_DL_8_48_LP_DATA_ADDR 0x2E78 #define OMAP_ABE_S_VX_DL_8_48_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_VX_DL_8_48_HP_DATA_ADDR 0x2EE0 #define OMAP_ABE_S_VX_DL_8_48_HP_DATA_SIZE 0x38 #define OMAP_ABE_S_VX_DL_16_48_LP_DATA_ADDR 0x2F18 #define OMAP_ABE_S_VX_DL_16_48_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_VX_DL_16_48_HP_DATA_ADDR 0x2F80 #define OMAP_ABE_S_VX_DL_16_48_HP_DATA_SIZE 0x28 #define OMAP_ABE_S_VX_UL_48_8_LP_DATA_ADDR 0x2FA8 #define OMAP_ABE_S_VX_UL_48_8_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_VX_UL_48_8_HP_DATA_ADDR 0x3010 #define OMAP_ABE_S_VX_UL_48_8_HP_DATA_SIZE 0x38 #define OMAP_ABE_S_VX_UL_48_16_LP_DATA_ADDR 0x3048 #define OMAP_ABE_S_VX_UL_48_16_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_VX_UL_48_16_HP_DATA_ADDR 0x30B0 #define OMAP_ABE_S_VX_UL_48_16_HP_DATA_SIZE 0x28 #define OMAP_ABE_S_BT_UL_8_48_LP_DATA_ADDR 0x30D8 #define OMAP_ABE_S_BT_UL_8_48_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_BT_UL_8_48_HP_DATA_ADDR 0x3140 #define OMAP_ABE_S_BT_UL_8_48_HP_DATA_SIZE 0x38 #define OMAP_ABE_S_BT_UL_16_48_LP_DATA_ADDR 0x3178 #define OMAP_ABE_S_BT_UL_16_48_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_BT_UL_16_48_HP_DATA_ADDR 0x31E0 #define OMAP_ABE_S_BT_UL_16_48_HP_DATA_SIZE 0x28 #define OMAP_ABE_S_BT_DL_48_8_LP_DATA_ADDR 0x3208 #define OMAP_ABE_S_BT_DL_48_8_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_BT_DL_48_8_HP_DATA_ADDR 0x3270 #define OMAP_ABE_S_BT_DL_48_8_HP_DATA_SIZE 0x38 #define OMAP_ABE_S_BT_DL_48_16_LP_DATA_ADDR 0x32A8 #define OMAP_ABE_S_BT_DL_48_16_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_BT_DL_48_16_HP_DATA_ADDR 0x3310 #define OMAP_ABE_S_BT_DL_48_16_HP_DATA_SIZE 0x28 #define OMAP_ABE_S_ECHO_REF_48_8_LP_DATA_ADDR 0x3338 #define OMAP_ABE_S_ECHO_REF_48_8_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_ECHO_REF_48_8_HP_DATA_ADDR 0x33A0 #define OMAP_ABE_S_ECHO_REF_48_8_HP_DATA_SIZE 0x38 #define OMAP_ABE_S_ECHO_REF_48_16_LP_DATA_ADDR 0x33D8 #define OMAP_ABE_S_ECHO_REF_48_16_LP_DATA_SIZE 0x68 #define OMAP_ABE_S_ECHO_REF_48_16_HP_DATA_ADDR 0x3440 #define OMAP_ABE_S_ECHO_REF_48_16_HP_DATA_SIZE 0x28 #define OMAP_ABE_S_XINASRC_ECHO_REF_ADDR 0x3468 #define OMAP_ABE_S_XINASRC_ECHO_REF_SIZE 0x140 #define OMAP_ABE_S_ECHO_REF_16K_ADDR 0x35A8 #define OMAP_ABE_S_ECHO_REF_16K_SIZE 0x28 #define OMAP_ABE_S_ECHO_REF_8K_ADDR 0x35D0 #define OMAP_ABE_S_ECHO_REF_8K_SIZE 0x18 #define OMAP_ABE_S_DL1_EQ_ADDR 0x35E8 #define OMAP_ABE_S_DL1_EQ_SIZE 0x60 #define OMAP_ABE_S_DL2_EQ_ADDR 0x3648 #define OMAP_ABE_S_DL2_EQ_SIZE 0x60 #define OMAP_ABE_S_DL1_GAIN_OUT_ADDR 0x36A8 #define OMAP_ABE_S_DL1_GAIN_OUT_SIZE 0x60 #define OMAP_ABE_S_DL2_GAIN_OUT_ADDR 0x3708 #define OMAP_ABE_S_DL2_GAIN_OUT_SIZE 0x60 #define OMAP_ABE_S_DC_HS_ADDR 0x3768 #define OMAP_ABE_S_DC_HS_SIZE 0x8 #define OMAP_ABE_S_DC_HF_ADDR 0x3770 #define OMAP_ABE_S_DC_HF_SIZE 0x8 #define OMAP_ABE_S_VIBRA_ADDR 0x3778 #define OMAP_ABE_S_VIBRA_SIZE 0x30 #define OMAP_ABE_S_VIBRA2_IN_ADDR 0x37A8 #define OMAP_ABE_S_VIBRA2_IN_SIZE 0x30 #define OMAP_ABE_S_VIBRA2_ADDR_ADDR 0x37D8 #define OMAP_ABE_S_VIBRA2_ADDR_SIZE 0x8 #define OMAP_ABE_S_VIBRACTRL_FORRIGHTSM_ADDR 0x37E0 #define OMAP_ABE_S_VIBRACTRL_FORRIGHTSM_SIZE 0xC0 #define OMAP_ABE_S_RNOISE_MEM_ADDR 0x38A0 #define OMAP_ABE_S_RNOISE_MEM_SIZE 0x8 #define OMAP_ABE_S_CTRL_ADDR 0x38A8 #define OMAP_ABE_S_CTRL_SIZE 0x90 #define OMAP_ABE_S_VIBRA1_IN_ADDR 0x3938 #define OMAP_ABE_S_VIBRA1_IN_SIZE 0x30 #define OMAP_ABE_S_VIBRA1_TEMP_ADDR 0x3968 #define OMAP_ABE_S_VIBRA1_TEMP_SIZE 0xC0 #define OMAP_ABE_S_VIBRACTRL_FORLEFTSM_ADDR 0x3A28 #define OMAP_ABE_S_VIBRACTRL_FORLEFTSM_SIZE 0xC0 #define OMAP_ABE_S_VIBRA1_MEM_ADDR 0x3AE8 #define OMAP_ABE_S_VIBRA1_MEM_SIZE 0x58 #define OMAP_ABE_S_VIBRACTRL_STEREO_ADDR 0x3B40 #define OMAP_ABE_S_VIBRACTRL_STEREO_SIZE 0xC0 #define OMAP_ABE_S_AMIC_96_48_DATA_ADDR 0x3C00 #define OMAP_ABE_S_AMIC_96_48_DATA_SIZE 0x98 #define OMAP_ABE_S_DMIC0_96_48_DATA_ADDR 0x3C98 #define OMAP_ABE_S_DMIC0_96_48_DATA_SIZE 0x98 #define OMAP_ABE_S_DMIC1_96_48_DATA_ADDR 0x3D30 #define OMAP_ABE_S_DMIC1_96_48_DATA_SIZE 0x98 #define OMAP_ABE_S_DMIC2_96_48_DATA_ADDR 0x3DC8 #define OMAP_ABE_S_DMIC2_96_48_DATA_SIZE 0x98 #define OMAP_ABE_S_DBG_8K_PATTERN_ADDR 0x3E60 #define OMAP_ABE_S_DBG_8K_PATTERN_SIZE 0x10 #define OMAP_ABE_S_DBG_16K_PATTERN_ADDR 0x3E70 #define OMAP_ABE_S_DBG_16K_PATTERN_SIZE 0x20 #define OMAP_ABE_S_DBG_24K_PATTERN_ADDR 0x3E90 #define OMAP_ABE_S_DBG_24K_PATTERN_SIZE 0x30 #define OMAP_ABE_S_DBG_48K_PATTERN_ADDR 0x3EC0 #define OMAP_ABE_S_DBG_48K_PATTERN_SIZE 0x60 #define OMAP_ABE_S_DBG_96K_PATTERN_ADDR 0x3F20 #define OMAP_ABE_S_DBG_96K_PATTERN_SIZE 0xC0 #define OMAP_ABE_S_MM_EXT_IN_ADDR 0x3FE0 #define OMAP_ABE_S_MM_EXT_IN_SIZE 0x60 #define OMAP_ABE_S_MM_EXT_IN_L_ADDR 0x4040 #define OMAP_ABE_S_MM_EXT_IN_L_SIZE 0x60 #define OMAP_ABE_S_MM_EXT_IN_R_ADDR 0x40A0 #define OMAP_ABE_S_MM_EXT_IN_R_SIZE 0x60 #define OMAP_ABE_S_MIC4_ADDR 0x4100 #define OMAP_ABE_S_MIC4_SIZE 0x60 #define OMAP_ABE_S_MIC4_L_ADDR 0x4160 #define OMAP_ABE_S_MIC4_L_SIZE 0x60 #define OMAP_ABE_S_SATURATION_7FFF_ADDR 0x41C0 #define OMAP_ABE_S_SATURATION_7FFF_SIZE 0x8 #define OMAP_ABE_S_SATURATION_ADDR 0x41C8 #define OMAP_ABE_S_SATURATION_SIZE 0x8 #define OMAP_ABE_S_XINASRC_BT_UL_ADDR 0x41D0 #define OMAP_ABE_S_XINASRC_BT_UL_SIZE 0x140 #define OMAP_ABE_S_XINASRC_BT_DL_ADDR 0x4310 #define OMAP_ABE_S_XINASRC_BT_DL_SIZE 0x140 #define OMAP_ABE_S_BT_DL_8K_TEMP_ADDR 0x4450 #define OMAP_ABE_S_BT_DL_8K_TEMP_SIZE 0x10 #define OMAP_ABE_S_BT_DL_16K_TEMP_ADDR 0x4460 #define OMAP_ABE_S_BT_DL_16K_TEMP_SIZE 0x20 #define OMAP_ABE_S_VX_DL_8_48_OSR_LP_DATA_ADDR 0x4480 #define OMAP_ABE_S_VX_DL_8_48_OSR_LP_DATA_SIZE 0xE0 #define OMAP_ABE_S_BT_UL_8_48_OSR_LP_DATA_ADDR 0x4560 #define OMAP_ABE_S_BT_UL_8_48_OSR_LP_DATA_SIZE 0xE0 #define OMAP_ABE_S_MM_DL_44P1_ADDR 0x4640 #define OMAP_ABE_S_MM_DL_44P1_SIZE 0x300 #define OMAP_ABE_S_TONES_44P1_ADDR 0x4940 #define OMAP_ABE_S_TONES_44P1_SIZE 0x300 #define OMAP_ABE_S_MM_DL_44P1_XK_ADDR 0x4C40 #define OMAP_ABE_S_MM_DL_44P1_XK_SIZE 0x10 #define OMAP_ABE_S_TONES_44P1_XK_ADDR 0x4C50 #define OMAP_ABE_S_TONES_44P1_XK_SIZE 0x10 #define OMAP_ABE_S_SRC_44P1_MULFAC1_ADDR 0x4C60 #define OMAP_ABE_S_SRC_44P1_MULFAC1_SIZE 0x8 #define OMAP_ABE_S_SATURATION_EQ_ADDR 0x4C68 #define OMAP_ABE_S_SATURATION_EQ_SIZE 0x8 #define OMAP_ABE_S_BT_DL_48_8_LP_NEW_DATA_ADDR 0x4C70 #define OMAP_ABE_S_BT_DL_48_8_LP_NEW_DATA_SIZE 0x88 #define OMAP_ABE_S_BT_DL_8_48_OSR_LP_DATA_ADDR 0x4CF8 #define OMAP_ABE_S_BT_DL_8_48_OSR_LP_DATA_SIZE 0x3C8 #define OMAP_ABE_S_VX_UL_48_8_LP_NEW_DATA_ADDR 0x50C0 #define OMAP_ABE_S_VX_UL_48_8_LP_NEW_DATA_SIZE 0x88 #define OMAP_ABE_S_VX_UL_8_48_OSR_LP_DATA_ADDR 0x5148 #define OMAP_ABE_S_VX_UL_8_48_OSR_LP_DATA_SIZE 0x3C8
/// Compiles and executes the contents of a file. pub fn run_file(&self, path: &Path) -> Result<(), Error> { let mut f = File::open(path) .map_err(|e| IoError::new(IoMode::Open, path, e))?; let mut buf = String::new(); f.read_to_string(&mut buf) .map_err(|e| IoError::new(IoMode::Read, path, e))?; self.run_main(&buf, path.to_string_lossy().into_owned()) }
<filename>src/tests/dray/t_dray_mpi_faces.cpp // Copyright 2019 Lawrence Livermore National Security, LLC and other // Devil Ray Developers. See the top-level COPYRIGHT file for details. // // SPDX-License-Identifier: (BSD-3-Clause) #include "test_config.h" #include "gtest/gtest.h" #include "t_utils.hpp" #include <dray/dray.hpp> #include <dray/filters/mesh_boundary.hpp> #include <dray/rendering/renderer.hpp> #include <dray/rendering/surface.hpp> #include <dray/io/blueprint_reader.hpp> #include <dray/math.hpp> #include <fstream> #include <mpi.h> TEST (dray_volume_render, dray_volume_render_multidom) { MPI_Comm comm = MPI_COMM_WORLD; dray::dray::mpi_comm(MPI_Comm_c2f(comm)); std::string root_file = std::string (DATA_DIR) + "laghos_tg.cycle_000350.root"; std::string output_path = prepare_output_dir (); std::string output_file = conduit::utils::join_file_path (output_path, "tg_mpi_faces"); remove_test_image (output_file); dray::Collection dataset = dray::BlueprintReader::load (root_file); dray::MeshBoundary boundary; dray::Collection faces = boundary.execute(dataset); // Camera const int c_width = 512; const int c_height = 512; dray::Camera camera; camera.set_width (c_width); camera.set_height (c_height); camera.azimuth(20); camera.elevate(10); camera.reset_to_bounds (dataset.bounds()); std::shared_ptr<dray::Surface> surface = std::make_shared<dray::Surface>(faces); surface->field("density"); surface->draw_mesh (true); surface->line_thickness(.1); dray::Renderer renderer; renderer.add(surface); dray::Framebuffer fb = renderer.render(camera); if(dray::dray::mpi_rank() == 0) { fb.composite_background(); fb.save (output_file); EXPECT_TRUE (check_test_image (output_file)); } } int main(int argc, char* argv[]) { int result = 0; ::testing::InitGoogleTest(&argc, argv); MPI_Init(&argc, &argv); result = RUN_ALL_TESTS(); MPI_Finalize(); return result; }
package net.mgsx.game.examples.td.tools; import com.badlogic.ashley.core.Entity; import net.mgsx.game.core.EditorScreen; import net.mgsx.game.examples.td.components.Platform; public class PlatformTool extends TileTool { public PlatformTool(EditorScreen editor) { super("Tile - Platform", editor); } @Override protected void edit(Entity entity) { if(Platform.components.has(entity)) entity.remove(Platform.class); else entity.add(getEngine().createComponent(Platform.class)); } }
A novel mutation of the GTPcyclohydrolase I gene in a patient with hereditary progressive dystoniddoparesponsive dystonia We report a 37-year-old Japanese woman with hereditary progressive dystonia with marked diurnal fluctuation and dopa-responsive dystonia. She developed dystonia in the lower limbs at the age of 11 years, followed by spasmodic torticollis and resting tremor of the feet, which responded remarkably to low doses of levodopa (100 mg/day). Concentrations of biopterin and neopterin in CSF were decreased. Polymerase chain reaction analysis of the guanosine 5'-triphosphate cyclohydrolase I gene revealed a novel mutation (Thr186→Lys).
1. Field of the Disclosure The disclosure relates in general to a kickstand and, more particularly, to a kickstand for a portable electronic device. 2. Background Art Over the years various kickstands have been marketed for use to support mobile phones and other portable electronic devices. Such conventional kickstands include HTC kickstands, Evo kickstands, 4G phone kickstands, and other types of kickstands. Many of these conventional prior kickstands are flimsy, awkward, and cumbersome, contain too much metal, are heavy, unstable, and/or defective. Furthermore, undesirably many of these conventional kickstands are not part of the mobile phone or other portable electronic device but are part of a separate protective carrying case which encloses or encases the mobile phone or other portable electronic device. It is, therefore, desirable to provide an improved kickstand for use with mobile phones and other portable electronic devices, which overcomes most, if not all of the preceding disadvantages. It is also desirable to create a drop-in kickstand mechanism assembly with minimal metallic components to achieve better weight reduction and minimal interference with antenna structures as well as provide functional capability for audio, high audio porting, and user interface with a hall-effect switch and magnet.
<filename>src/main/java/com/emc/rpsp/fal/commons/Capability.java package com.emc.rpsp.fal.commons; import lombok.AllArgsConstructor; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement @AllArgsConstructor public enum Capability { SYNC_REPLICATION("SyncReplication"), VOLUMES_GREATER_THAN_TWO_TERA_BYTES("VolumesGreaterThanTwoTeraBytes"), VIRTUAL_ACCESS("VirtualAccess"), VIRTUAL_ACCESS_WITH_ROLL("VirtualAccessWithRoll"), READ_ONLY_ON_REPLICA("ReadOnlyOnReplica"), FAKE_VOLUME_SIZE("FakeVolumeSize"), VOLUME_RESIZE("VolumeResize"), UNMAP_VOLUME_COMMAND("UnmapVolumeCommand"), UNKNOWN("Unknown"); private String name; public String toString() { return name; } }
// Resolve implements the database.Resolver interface. func (s *SubsliceResolvable) Resolve(ctx context.Context) (interface{}, error) { slice, err := database.Resolve(ctx, s.Slice.ID()) if err != nil { return nil, err } return slice.([]byte)[s.First : s.First+s.Count], nil }
<reponame>Htet-Phyo-Lin/autoTask import os ci = input("Commit name : ") gitPush = [ "git", "git status", "git add .", "git commit -m \"{}\"".format(ci), "git push", ] for x in gitPush: os.system(x) #Written by <NAME>. #If you have any errors. plz contact me " <EMAIL> " .
A Blended Process Model for Agile Software Development with Lean Concept This research addresses a known set of issues with Agile Software Development through a unique form of solution. In fact, the research approach can be considered as one of the first ever research to propose a hybrid process paradigm to overcome Agile process issues with the assistance of Lean manufacturing principles. Research results show a significant improvement for the normal Agile practices, which indeed a unique and worthy finding for Agile practitioners. After years of being practiced in the industry the Agile software development process possesses standard characteristics of a process paradigm. However, due to the inherited higher degree of flexibility and the exceptional abstract nature of the process principles, Agile process heavily depends upon the project and people norms once it is implemented. Having more flexibility is a better attribute for a process, if it is used by competent experts who can take productive decisions at right moments. However, depending too much on expert knowledge to process and product adjustments is a questionable concern to a growing project with rapid changes to its development and releases. Software applications are complex and intangible products, which are difficult to manage. Hence Software Lifecycle management becomes one of the key research areas in software engineering. Due to the nature of the software, software researchers and practitioners are focused on improving the software processes which are used to develop software. The underline assumption is that there is a direct correlation between the quality of the process and the quality of the developed software. A software process can be considered as a set of tools, methods and practices, we use to produce a software product. These are the prime parameters, also known as Key Process Areas (KPAs), that differentiate the process based software development from ad-hoc programming. Identifying KPAs is one of the main considerations when a certain process model to be improved. In this research, the KPAs of Agile practice were studied and reviewed for required improvements, considering criticisms on those. Specially, the driving KPAs of Agile practice such as, the non-standardized process flow, reliance on key people, and immense flexibility were the considerations for this study. Then the Lean principle for possible key practices to incorporate with classical Agile practice was examined. 10
/// in place sort /// faster performance for average cases /// not stable /// requires no extra memory void quickSort(int *ar, int st, int en){ if(st >= en) return; int pivot = en; int pVal = ar[en]; int id = st; while(id + 1 < pivot){ if(ar[id] < pVal) id++; else{ swap(ar[id], ar[pivot]); swap(ar[id], ar[--pivot]); } } if(id != pivot and ar[id] >= pivot) swap(ar[id], ar[pivot]); quickSort(ar, st, pivot - 1); quickSort(ar, pivot + 1, en); }
<filename>moco-core/src/main/java/com/github/dreamhead/moco/resource/reader/PlainVariable.java package com.github.dreamhead.moco.resource.reader; import com.github.dreamhead.moco.Request; public class PlainVariable implements Variable { private Object text; public PlainVariable(final Object text) { this.text = text; } @Override public Object toTemplateVariable(final Request request) { return text; } }
/** * Model tests for V1ServiceAccountTokenProjection */ public class V1ServiceAccountTokenProjectionTest { private final V1ServiceAccountTokenProjection model = new V1ServiceAccountTokenProjection(); /** * Model tests for V1ServiceAccountTokenProjection */ @Test public void testV1ServiceAccountTokenProjection() { // TODO: test V1ServiceAccountTokenProjection } /** * Test the property 'audience' */ @Test public void audienceTest() { // TODO: test audience } /** * Test the property 'expirationSeconds' */ @Test public void expirationSecondsTest() { // TODO: test expirationSeconds } /** * Test the property 'path' */ @Test public void pathTest() { // TODO: test path } }
Gender, Family, Work and School: Demographic and Other Deterrents to Adult Participation Using the results of a larger study utilizing Cross's three-part formulation of the 24-item barrier scale devised by Carp, Peterson, and Roelfs, this paper presents demographic differences in the perception of barriers to adult higher education. The scale items were tested with a population of prospective students at public institutions of higher learning, and the resulting six factors were compared against demographic categories of gender, marital status, a number of dependent children, income, and hours of work per week.
<filename>router/src/main/java/io/github/prototypez/service/app/entity/AppEntity.java package io.github.prototypez.service.app.entity; public class AppEntity { public String data; public AppEntity(String data) { this.data = data; } }
/** * Crear una palabra, guardarla en la bbdd y usarla * * @param p palabra a crear y usar * @return objeto de la palabra creada */ @POST @Path("/insert") public String usarPalabra(@QueryParam("p") String p) { Palabra palabra = new Palabra(); try { palabra.setPalabra(p); palabra.setHaSidoUsada(0); PalabraBBDD.insert(p, palabra.HaSidoUsada()); } catch (Exception e) { palabra = null; System.out.println("Error al crear la palabra : " + e.getMessage()); } return palabra.getPalabra(); }
C5b9 complement complex in autoimmune demyelination and multiple sclerosis: Dual role in neuroinflammation and neuroprotection Complement system activation plays an important role in innate and acquired immunity. Activation of complement leads to the formation of C5b9 terminal complex. While C5b9 can promote cell lysis, sublytic assembly of C5b9 on plasma membranes induces cell cycle activation and survival. Multiple sclerosis (MS) and its animal model experimental allergic encephalomyelitis (EAE) are inflammatory demyelinating diseases of the central nervous system (CNS) mediated by activated lymphocytes, macrophages/microglia and the complement system. Complement activation may contribute to the pathogenesis of these diseases through its dual role: the ability of activated terminal complex C5b9 to promote demyelination and the capacity of sublytic C5b9 to protect oligodendrocytes (OLG) from apoptosis. By inducing EAE in C5deficient mice, we showed that complement C5 promotes remyelination and protects oligodendrocytes from apoptotic cell death. These findings indicate that activation of complement C5b9 plays a proinflammatory role in the acute phase of the disease, but may also be neuroprotective during the chronic phase of the disease.
<filename>student-distrib/desktop.h #ifndef DESKTOP_H #define DESKTOP_H #include "modex.h" #include "types.h" #define SHOW_MIN 0 /* hide the last six pixels of boundary */ /* * Define maze minimum and maximum dimensions. The description of make_maze * in maze.c gives details on the layout of the maze. Minimum values are * chosen to ensure that a maze fills the scrolling region of the screen. * Maximum values are somewhat arbitrary. */ #define MAZE_MIN_X_DIM ((SCROLL_X_DIM + (BLOCK_X_DIM - 1) + 2 * SHOW_MIN) / (2 * BLOCK_X_DIM)) #define MAZE_MAX_X_DIM 50 #define MAZE_MIN_Y_DIM ((SCROLL_Y_DIM + (BLOCK_Y_DIM - 1) + 2 * SHOW_MIN) / (2 * BLOCK_Y_DIM)) #define MAZE_MAX_Y_DIM 30 /* * maze array index calculation macro; maze dimensions are valid only * after a call to make_maze */ #define MAZE_INDEX(a,b) ((a) + ((b) + 1) * maze_x_dim * 2) // #ifndef __GLOBAL__ // #define __GLOBAL__ // #endif /* bit vector of properties for spaces in the maze */ typedef enum { MAZE_NONE = 0, /* empty */ MAZE_WALL = 1, /* wall */ MAZE_FRUIT_1 = 2, /* fruit (3-bit field, with 0 for no fruit) */ MAZE_FRUIT_2 = 4, MAZE_FRUIT_3 = 8, LAST_MAZE_FRUIT_BIT = MAZE_FRUIT_3, MAZE_FRUIT = (MAZE_FRUIT_1 | MAZE_FRUIT_2 | MAZE_FRUIT_3), MAZE_EXIT = 16, /* exit from maze */ MAZE_REACH = 128 /* seen already (not shrouded in mist) */ } maze_bit_t; // variables borrowed from mazegame.c typedef struct { /* dynamic values within a level -- you may want to add more... */ unsigned int map_x, map_y; /* current upper left display pixel */ int is_ModX; } game_info_t; /* create a desktop and place UI for it */ extern int make_desktop(int x_dim, int y_dim); /* fill a buffer with the pixels for a horizontal line of the maze */ extern void fill_horiz_buffer(int x, int y, unsigned char buf[SCROLL_X_DIM]); /* fill a buffer with the pixels for a vertical line of the maze */ extern void fill_vert_buffer(int x, int y, unsigned char buf[SCROLL_Y_DIM]); extern int32_t desktop_open(const uint8_t* filename); extern int32_t desktop_close(int32_t fd); extern int32_t desktop_read(int32_t fd, void* buf, int32_t nbytes); extern int32_t desktop_write(int32_t fd, const void* buf, int32_t nbytes); void init_game_info(); #endif /* MAZE_H */
Familial neurofibromatosis type 1 has diverse manifestations in skin and is associated with steatocystoma multiplex Y. Zhang, M. Fang, X. Ding, L. Tang and X. Zhang Department of Dermatology, The First Affiliated Hospital of USTC, Division of Life Sciences and Medicine, University of Science and Technology of China, Hefei, China; Department of Dermatology, the First Affiliated Hospital, Anhui Medical University, Institute of Dermatology, Anhui Medical University, Key Laboratory of Dermatology (Anhui Medical University), Ministry of Education,Hefei, China; and Department of Clinical Medical, the First Clinical Medical College, Anhui Medical University, Hefei, China
The present invention relates generally to 3-dimensional (3D) computerized tomography (CT) and, more specifically, to an improved cone beam CT system and method for efficiently acquiring projection data for image reconstruction of a region of interest using circular scans, wherein the detector area is optimally utilized for all source positions. A system employing cone beam geometry has been developed for three-dimensional (3D) computed tomography (CT) imaging that comprises a cone beam x-ray source and a 2D area detector. An object to be imaged is scanned, preferably over a 360 degree angular range and along its entire length, by any one of various methods wherein the position of the area detector is fixed relative to the source, and relative rotational and translational movement between the source and object provides the scanning (irradiation of the object by radiation energy). The cone beam approach for 3D CT has the potential to achieve 3D imaging in both medical and industrial applications with improved speed, as well as improved dose utilization when compared with conventional 3D CT apparatus (i.e., a stack of slices approach obtained using parallel or fan beam x-rays). As a result of the relative movement of the cone beam source to a plurality of source positions (i.e., xe2x80x9cviewsxe2x80x9d) along the scan path, the detector acquires a corresponding plurality of sequential sets of cone beam projection data (also referred to herein as cone beam data or projection data), each set of cone beam data being representative of x-ray attenuation caused by the object at a respective one of the source positions. The cone beam projection data is then processed to reconstruct a 3D image of the object using image reconstruction methods known in the art. Various methods have been developed for 3D image reconstruction for cone beam x-ray imaging systems. For example, a back projection cone beam image reconstruction technique is described in U.S. Pat. No. 5,257,183, which issued on Oct. 26, 1993 to Kwok C. Tam, entitled xe2x80x9cMethod and Apparatus for Converting Cone Beam X-Ray Projection Data To Planar Integral and Reconstructing a Three-Dimensional Computerized Tomography (CT) Image of an Objectxe2x80x9d, which is incorporated herein by reference. This patent discloses a method and apparatus for converting cone beam data to values representing planar integrals on any arbitrary set of planes in Radon space for 3D image reconstruction through an inverse Radon transformation. Back projections can be mathematically accomplished for a cone beam source by inverse Radon transforming suitable planar integrals. The planar integrals are computed from detector integrals which utilize the measured cone beam projection data, i.e., the detected attenuated intensity representative of the density distributions of the object. The use of a cone beam source expedites data acquisition. A direct Radon inversion of three dimensional cone beam data from a cone beam source is not possible. Thus, before an inverse Radon transform can be undertaken in a three dimensional cone beam data implementation, the cone beam detector integrals must be reconfigured into planar integrals suitable for inverse Radon transformation. U.S. Pat. No. 5,257,183 discloses a method for image reconstruction by calculating Radon derivative data from the acquired cone beam data. The Radon derivative data is typically determined by calculating line integrals for a plurality of line segments drawn in the acquired cone beam data. Radon space driven conversion of the derivative data is used to develop an exact image reconstruction of a region-of-interest (ROI) in the object. FIG. 1 illustrates a typical circular scanning geometry for three dimensional CT scanning employing cone beam geometry. An object 10 to be imaged is positioned within a field of view between a cone beam point source 11 and a two dimensional detector array 12 that acquires cone beam projection data. An axis of rotation 13 passes through the field of view and the object 10. A midplane 14 is defined as the plane that (i) is normal to the axis of rotation 13 and (ii) contains the cone beam point source 11. In the exemplary embodiments described herein, the axis of rotation 26 is the v axis, having its origin (0,0,0) at its intersection with the midplane. The coordinate system is fixed relative to the source 11 and detector 12. When scanning the object 10 at a plurality of angular positions, the source 11 moves relative to the object 10 and the field of view rotates along a circular scanning trajectory 15 lying in the midplane 14, while the detector 12 remains fixed with respect to the source 11 (or alternatively the object 10 can be rotated while the source 11 and detector 12 remain stationary). Data is acquired at a plurality of source positions during the scan. Data collected at the detector 12 represent line integrals through the object 10. The approach to reconstruction then embodies calculating planar integrals on a set of planes from various line integrals through the object, then performing an inverse Radon transform on the planar integrals to reconstruct a three dimensional image of the object. It is known that data collected in such a single circular scan is incomplete and artifacts may be introduced into the reconstructed image. One image reconstruction method that uses two circular scans and a Radon transform approach to three dimensional CT imaging is disclosed, for example, in U.S. Pat. No. 5,383,119, which issued on Jan. 17, 1995 to Kwok Tam, entitled xe2x80x9cMethod and Apparatus For Acquiring Complete Radon Data for Exactly Reconstructing a Three Dimensional Computerized Tomography Image Of a Portion of an Object Irradiated By a Cone Beam Sourcexe2x80x9d, which is incorporated herein by reference. This patent discloses a method for imaging a region of interest (ROI) in a long object by scanning the ROI in 2 circular scan paths and a line scan that connects the two circular scans. The method enables exact 3D reconstruction of the image of a portion of interest of an object in the field of view of a cone beam source by selectively disregarding unwanted Radon data which may corrupt the imaging process and selectively recovering Radon data that would otherwise be lost. The method enables acquisition of a complete Radon data set through proper choice of scanning configuration and selective partitioning and manipulation of the acquired data. More specifically, referring to FIG. 2, a ROI xcexa9o within a long object 20 (e.g., human body) is reconstructed by cone beam data collected by (i) an upper scan trajectory 21 and lower scan trajectory 22, which are taken to be spaced circular trajectories that are normal to the axis of rotation v, and (ii) a connecting line scan. A circular scan is performed for both the upper and lower bounds of the ROI and the data from the two circular scans are combined in such a manner as to appropriately reconstruct the ROI. FIG. 3 depicts the method for combining the cone beam data from two circular scan paths for reconstructing the ROI in a long object, wherein cone beam source positions A and B correspond to the top and bottom scans, respectively. In general, processing the cone beam data for an exact image reconstruction involves filtering, either implicitly or explicitly, all line segments on the detector (i.e., the integration line segments). In the data combination process, at each cone beam view, the line segments being filtered are restricted to only the cone beam data bound by the angular ranges as depicted in FIG. 3. In this manner, the totality of the cone beam data from all the contributing source positions covers every plane of integration intersecting the ROI in its entirety without any overlap. More specifically, an image reconstruction method described in the above-incorporated U.S. Pat. No. 5,383,119 comprises the steps of performing a circular scan using a cone beam source along a circular path enclosing a ROI at the upper and lower extent of the ROI and joining the upper and lower circular scan paths by a connecting scan path. The upper, lower, and connecting scan paths collectively define a complete scan path for the ROI. Then, each plane of integration corresponding to a data point in Radon space is selectively partitioned based on the intersection of the plane with the ROI. Then, based on the partitioning, the cone beam data is selectively manipulated to discard data that is not directly attributable to the ROI and to recover otherwise missing data directly attributable to the ROI. The process of selectively portioning each plane of integration comprises categorizing each plane of integration based on the manner in which the plane intersects the ROI. More specifically, to assess the adequacy of the filling of Radon space with Radon data, each integration plane that contributes to points in Radon space is categorized as follows: (i) a plane that does not intersect the ROI; (ii) a plane that intersects the ROI only; (iii) a plane that intersects the ROI and also either the region above the ROI or the region below the ROI, but not both; and (iv) a plane that intersects the ROI and also both the region above and the region below the ROI. The process of selective manipulation generally comprises selectively disregarding cone beam data contributed from regions beyond the ROI in the long object to eliminate otherwise image corrupting data and selectively combining cone beam data obtained from the upper and lower scans to recover data otherwise missing due to corruption by regions beyond the ROI. In particular, for case (i), the planar integral will be zero so the corresponding cone beam data is discarded. For case (ii), the planar integral is computed from the cone beam data based on the methods described in the above-incorporated U.S. Pat. No. 5,257,183. For case (iii), the planar integral is computed from the top scan data or the corresponding bottom scan data as follows: the unwanted contribution of that portion of the object beyond the ends of the ROI to the computation of the planar integrals is eliminated by discarding all the cone beam data whose paths traverse the region beyond the ends of the ROI. For case (iv), the data is selectively manipulated by selectively combining cone beam data from top and bottom level scans for each plane intersecting both the top and bottom levels of the ROI, as diagrammatically shown in FIG. 3. Finally, missing Radon data is then filled in by any scan connecting the upper and lower circular scans. Further details of the above method for acquiring complete Radon data for exact image reconstruction of a 3D image of the ROI can be found in the above-incorporated U.S. Pat. No. 5,383,119. One known method for restricting the cone beam data to the appropriate angular range is accomplished by a xe2x80x9cmaskingxe2x80x9d process, which facilitates efficient 3D CT imaging when only the ROI in the object is to be imaged, as is normally the case. During a scan, the scanning trajectory is sampled at a plurality of source positions where cone beam energy is emitted toward the ROI. After passing through the ROI the residual energy at each of the source positions is acquired on the area detector as a given one of a plurality of sets of cone beam data. Each set of the cone beam data is then xe2x80x9cmaskedxe2x80x9d so as to remove a portion of the cone beam data that is outside a given sub-section of a projection of the ROI in the object and to retain cone beam projection data that is within the given sub-section. The shape of each mask for a given set of cone beam data will vary based on the scan orbit. The cone beam data that is retained (via the masking process) is then processed so as to develop reconstruction data. An exact image of the ROI is developed by combining the reconstruction data from the various source positions which intersect a common integration plane. As such, the masks are commonly referred to as xe2x80x9cdata-combinationxe2x80x9d masks. Typically, and in a preferred embodiment of the present invention, projection data for image reconstruction is acquired by applying a xe2x80x9cmaskxe2x80x9d to each set of the projection data so that data inside the boundaries of each mask form a corresponding plurality of masked 2D data sets. FIG. 4 is a diagram that illustrates masks used in the exemplary embodiments of FIGS. 2 and 3, wherein circular scans are taken along the upper and lower boundaries of the ROI. FIG. 4a illustrates the upper circular scan trajectory 21 and lower circular scan trajectory 22 which are separated by a distance of 2H (line scan) and each trajectory having a diameter of 2R. FIG. 4b illustrates a mask M which is applied to the cone beam data on detector D for the upper circular scan 21. As shown, the mask M boundaries comprise a top curve and a bottom curve, which are formed by projecting the upper circular scan path from a current source position. More specifically, for a flat detector D located at the rotation axis such that the line connecting the source to the detector origin is normal to the detector plane, the equation of the mask M for a given source position on the top circular scan path is defined by the following: Top Boundary: v=0 Bottom Boundary v = - H ⁡ ( 1 + u 2 R 2 ) where u and v are the Cartesian coordinate axes of the detector with the v axis coinciding with the rotation axis and R is the radius of the circles. FIG. 4c illustrates a mask M for the bottom circular scan 22, which is just the inverse of the mask for the upper scan. As discussed above, in medical x-ray scanners, the source and the area detector are always fixed relative to each other, with the source projecting onto the center of the detector. In such a framework, when the data masks illustrated in FIG. 4 are used to filter the cone beam data, only one half of the detector area is essentially utilized in capturing data at each of the circular scans, while the other half of the detector is not utilized at all. For instance, as shown in FIG. 4b, the cone beam data in the upper half of the detector, i.e., v greater than 0, is filtered out via the mask and consequently, that portion of the detector is essentially not used for capturing data during the circular scan. Accordingly, an improved image reconstruction method that would enable efficient utilization of the detector area, is highly desirable. The present invention is directed to a method for cone beam region of interest imaging of long objects with an area detector employing a series of circular scans, wherein the detector area is essentially fully utilized for all source positions. In one aspect of the invention, a method for imaging a region of interest (ROI) of an object uses cone beam computed tomography, wherein a cone beam source is fixed with respect to a two dimensional detector. The method comprises performing a first and second circular scan along a first and second extent, respectively, of a ROI within an object being imaged. The cone beam source and detector are fixed such that during the first and second circular scans, one half of the detector area is used to acquire cone beam projection data corresponding to the ROI and a second half of the detector area is used to acquire cone beam projection data corresponding to a first and a second extended portion of the object, which extend beyond the first and second extents, respectively, of the ROT. A line scan is taken along the object connecting the first and second circular scans. The ROI is reconstructed by selectively combining the cone beam projection data corresponding to the ROI acquired from the first and second circular scans, and the first and second extended portions of the object are reconstructed using the cone beam projection data corresponding to the extended portions acquired from the first and second circular scans and the cone beam projection data acquired from the line scan. An extended ROI is reconstructed by combining the reconstructed ROI and reconstructed first and second extended portions These and other objects, features and advantages of the present invention will be described or become apparent from the following detailed description of preferred embodiments, which is to be read in connection with the accompanying drawings.
The invention generally relates to controlling the actual fuel delivered to individual combustion chambers and, more particularly, the individual control of combustion chamber air/fuel ratios. Feedback control systems are known for controlling the average air/fuel ratio of the engine in response to a single oxygen sensor coupled to the engine exhaust manifold. More specifically, open loop control is first established by simultaneously varying the pulse width of all fuel injector drive signals the same amount in relation to a measurement of airflow inducted into the engine. Feedback control is then established by further adjusting all the drive signals simultaneously by the same amount in response to the exhaust gas oxygen sensor thereby achieving a desired average air/fuel ratio. A problem with this approach is that the air/fuel ratio is an average of the individual air/fuel ratios of each combustion chamber. A variation in air/fuel ratios among the combustion chambers is most likely For example, each fuel injector may actually deliver a different quantity of fuel when actuated by the identical drive signal due to such factors as manufacturing tolerances, component wear, and clogging. Even though known feedback control systems may achieve the desired average air/fuel ratio, the variations in air/fuel ratios among combustion chambers may result in less than optimal power, driveability, and emission control. An approach to controlling air/fuel ratios of the individual combustion chambers is disclosed in U.S. Pat. No. 4,483,300 issued to Hosaka et al. In simplified terms, fluctuations in the exhaust gas sensor signal are examined to detect cylinder to cylinder distribution of the air/fuel ratio. A disadvantage of this approach is that a very fast exhaust gas oxygen sensor is required to detect variations in the exhaust output of each cylinder. A further disadvantage is that because exhaust output of each cylinder is mixed in an exhaust manifold, the signal to noise ratio with respect to each cylinder is very low requiring complex signal processing techniques. Another disadvantage of this approach is the complexity of the computations and microprocessor capability required. Since a typical engine microprocessor must control numerous engine functions, the memory available for storing additional program codes is severely limited. Accordingly, the approach disclosed by Hosaka et al may not be suitable for a large number of automobile applications.
Superior sagittal sinus thrombus secondary to occult meningioma: a case report. A 58-year-old man presented with intermittent white flashes in both eyes during the past year. Six years earlier, the patients received a diagnosis of superior sagittal sinus (SSS) thrombosis on the basis of elevated intracranial pressure and imaging findings, for example, computed tomography, magnetic resonance imaging, magnetic resonance venography, and cerebral angiography, and was treated with urokinase and anticoagulatants. Symptoms resolved and the patient remained well until March 2009, when intermittent white flashes started to occur in both the eyes. The patient did not seek medical help until 1 year later (March 2010). Cerebral angiography (digital subtraction arteriography) revealed SSS thrombosis and a network of collateral venous circulation. Computed tomography and magnetic resonance imaging demonstrated a mass in the parietooccipital lobe that surrounded the SSS. Pathologic examination of the specimen removed during surgery revealed meningioma.
Metabolic evidence that deficiencies of vitamin B-12 (cobalamin), folate, and vitamin B-6 occur commonly in elderly people. Measurements of the serum concentrations of the metabolites homocysteine, cystathionine, methylmalonic acid, and 2-methylcitric acid, which accumulates when vitamin B-12-, folate-, and vitamin B-6-dependent enzymatic reactions are impaired, should provide a better indication of intracellular deficiency of these vitamins. We measured the serum concentration of these vitamins and the four metabolites in 99 healthy young people, 64 healthy elderly subjects, and 286 elderly hospitalized patients. A low serum vitamin B-12 concentration was found in 6% and 5%, low folate in 5% and 19%, and low vitamin B-6 in 9% and 51%, and one or more metabolites were elevated in 63% and 83% of healthy elderly subjects and elderly hospitalized patients, respectively. These results strongly suggest that the prevalence of tissue deficiencies of vitamin B-12, folate, and vitamin B-6 as demonstrated by the elevated metabolite concentrations is substantially higher than that estimated by measuring concentrations of the vitamins.
<gh_stars>0 package chefmod.cards.attacks; import chefmod.ChefMod; import chefmod.actions.FreezeAction; import chefmod.cards.AbstractChefCard; import com.megacrit.cardcrawl.actions.AbstractGameAction; import com.megacrit.cardcrawl.actions.common.DiscardSpecificCardAction; import com.megacrit.cardcrawl.actions.common.ExhaustSpecificCardAction; import com.megacrit.cardcrawl.characters.AbstractPlayer; import com.megacrit.cardcrawl.dungeons.AbstractDungeon; import com.megacrit.cardcrawl.monsters.AbstractMonster; import com.megacrit.cardcrawl.powers.EquilibriumPower; import com.megacrit.cardcrawl.relics.RunicPyramid; import static chefmod.ChefMod.makeID; public class HotSoup extends AbstractChefCard { public static String ID = makeID(HotSoup.class.getSimpleName()); public HotSoup() { super(ID, 2, CardType.ATTACK, CardRarity.COMMON, CardTarget.ENEMY ); baseDamage = damage = 17; upgradeDamageBy = 4; damages = true; } @Override public void use(AbstractPlayer p, AbstractMonster m) { dealDamage(m, AbstractGameAction.AttackEffect.FIRE); } @Override public void triggerOnEndOfPlayerTurn() { if (isEthereal) { addToTop(new ExhaustSpecificCardAction(this, AbstractDungeon.player.hand)); } else { ChefMod.cardsToFreeze.add(this); if (AbstractDungeon.player.hasRelic(RunicPyramid.ID) || AbstractDungeon.player.hasPower(EquilibriumPower.POWER_ID) || retain || selfRetain) { addToTop(new DiscardSpecificCardAction(this, AbstractDungeon.player.hand)); } } } @Override public void onRetained() { addToBot(new FreezeAction(this)); } }
Generally, in many occasions, an injectable liquid should be continuously injected to a patient. At this time, a liquid injection apparatus is used, and the liquid injection apparatus includes a filter device for removing impurities and air contained in the injectable liquid. FIG. 12 is a perspective view showing an example of a conventional liquid injection apparatus, which may be referred to in U.S. Pat. No. 4,781,698. A liquid injection apparatus 1000 includes an injectable liquid storing means 102 such as a bottle or pack where an injectable liquid is stored, and a flexible extension tube 104 extending from the injectable liquid storing means 102 to transport the injectable liquid supplied from the injectable liquid storing means 102. A dropping unit 106 partially inserted into the injectable liquid storing means 102 to drop the injectable liquid is integrally provided at an upstream end of the extension tube 104, and a control means 108 for controlling the amount of the supplied injectable liquid by blocking the flow of the injectable liquid flowing along the extension tube 104 is provided in the middle portion of the extension tube 104. In addition, there is provided a filter device 100 for filtering the supplied injectable liquid to remove impurities and air in the supplied injectable liquid, and a distal end connector 110 to which an injection needle N inserted into the body of a patient is detachably coupled is provided at the rear end of the corresponding extension tube 104. Here, the extension tube 104 is provided to have the same inner diameter over the entire moving path of the injectable liquid, and instead of the injection needle N, a catheter well known in the art may also be coupled to the distal end connector 110 of the extension tube 104. A cap (not shown) is coupled to the distal end connector 110 so as to prevent the injectable liquid from being contaminated before the liquid injection apparatus 1000 is used. To use the liquid injection apparatus 1000, the corresponding cap is removed and wasted from the distal end connector, and the injection needle N or catheter is coupled thereto. In addition, if a user (a nurse or the like) inserts the injection needle N into the body of a patient and then completely controls the amount of injected liquid by using the control means 108, the injectable liquid stored in the injectable liquid storing means 102 is discharged and supplied by small quantity, flows along the extension tube 104, and then, is injected into the body of the patient through the injection needle N coupled to the distal end connector 110. Meanwhile, if impurities (for example, glass fragments generated in opening a glass ampoule) or air is injected together with an injectable liquid into the body of a patient, such impurities or air may damage the blood vessel or brain of the patient and cause a fatal danger. Therefore, in order to prevent this problem, the filter device 100 described above is provided on the path of the extension tube 104, and the capillary tube is provided in a distal end of the extension tube 104 connected to the filter device 100. The capillary tube installed in the distal end of the extension tube 104 prevents the medicine causing a fatal damage to the organ of the patent when injected fast, such as an anticancer medicine or antibiotics, from being excessively injected. An example of the filter device 100 of the above liquid injection apparatus 1000 is shown in FIG. 13. Referring to the sectional view of FIG. 13, the filter device 100 includes filter receiving plates 100a-1 and 100a-2 made of synthetic resin provided at an outer side thereof to be spaced apart from each other in parallel to define a predetermined space therein, and both ends of the filter receiving plates 100a-1 and 100a-2 are converged and connected to the extension tube 104 so that the internal space communicates with the extension tube. Two thin sheet-type filters 100b-1 and 100b-2 in parallel with each other are housed in the internal space defined by both the filter receiving plates 100a-1 and 100a-2. The liquid permeable filter 100b-1 is provided at the side where an injectable liquid flows in, and the gas permeable filter 100b-2 is provided at the next, wherein the filters 100b-1 and 100b-2 are made of porous synthetic resin material with a predetermined mesh. In addition, in the filter receiving plate 100a-2 in which the gas permeable filter 100b-2 is provided, an air discharge hole 100a-2-1 is formed at a corresponding location. Therefore, in the conventional filter device 100 as shown in FIG. 13, if the injectable liquid supplied from the injectable liquid storing means 102 flows into the filter device 100, impurities such as glass fragments contained in the corresponding injectable liquid are filtered off while the injectable liquid passes through the liquid permeable filter 100b-1, and then, if the corresponding injectable liquid passes through the gas permeable filter 100b-2, the air is discharged to the outside through the air discharge hole 100a-2-1 after the injectable liquid passes through the gas permeable filter 100b-2. However, the conventional filter device has a problem in that a flow rate of the injectable liquid is lowered since two filters 100b-1 and 100b-2 blocks the flow path of the injectable liquid. In addition, the conventional filter device has a structural limit in that the air in the injectable liquid is not entirely discharged through the air discharge hole 100a-2-1 after passing through the gas permeable filter 100b-2, but partially flows into the extension tube 104. Moreover, the capillary tube installed in the distal end of the extension tube 104 should be sealably connected to the filter device 100, and at this time impurities of adhesive components flow into the filter device and/or the capillary tube. In order to overcome the above problems, the present inventors have suggested new filter devices and a liquid injection apparatus having the same. These new devices are configured to effectively remove air or solid impurities such as glass fragments in the medicine. Examples of such new devices are disclosed in Korean Patent Application Nos. 10-2006-0033027 and 10-2007-0051334. These filter devices are excellent, but they can be improved further in common with other excellent technologies. Particularly, in the technical field of the present invention, there are unceasing demands on the improvement of a filter device capable of effectively removing air and solid impurities such as glass fragments in a medicine and allowing sealable connection with a capillary in the extension tube without adhesive while minimizing a change in flow rate of the medicine.
Chemical trend of exchange couplings in diluted magnetic II-VI semiconductors We have calculated the chemical trend of magnetic exchange parameters ($J_{dd}$, $N \alpha$, and $N \beta$) of Zn-based II-VI semiconductors ZnA (A=O, S, Se, and Te) doped with Co or Mn. We show that a proper treatment of electron correlations by the LSDA+$U$ method leads to good agreement between experimental and theoretical values of the nearest-neighbor exchange coupling $J_{dd}$ between localized 3$d$ spins in contrast to the LSDA method. The exchange couplings between localized spins and doped electrons in the conduction band $N \alpha$ are in good agreement with experiment as well. But the values for $N \beta$ (coupling to doped holes in the valence band) indicate a cross-over from weak coupling (for A=Te and Se) to strong coupling (for A=O) and a localized hole state in ZnO:Mn. That hole localization explains the apparent discrepancy between photoemission and magneto-optical data for ZnO:Mn. I. INTRODUCTION After the seminal discovery of ferromagnetism in GaAs:Mn 1 with a critical temperature T c as high as 110 K there is worldwide a renewed interest in diluted magnetic semiconductors (DMS). Recently, the Curie temperature in GaAs:Mn could be pushed to values of about 180 K by a careful control of the annealing conditions during the growth process. 2 There is a great search activity to look for alternative materials, especially in the class of II-VI semiconductors (SC). Ferromagnetism (FM) in diluted II-VI SC is known for a long time with up to now low T c values, however. 3 They also serve as model materials since they allow to control the magnetic ions and the doped charge carriers independently. In such a way it was possible to demonstrate the carrier-induced mechanism of the ferromagnetic state in Pb-doped SnTe:Mn 4 or in p-doped ZnTe:Mn. 5 The DMS combine ferromagnetism with the conductivity properties of semiconductors. Therefore, they are ideal materials for applications in spintronics where not only the electron charge but also the spin of the charge carrier is used for information processing. For instance, they allow to resolve the conductivity mismatch problem which hinders a high polarizability of injected electrons in a ferromagnetic metal/semiconductor junction. 6 The ferromagnetism in the traditionally known DMS arises due to Zener's p-d exchange mechanism. 7 The 3d transition metal impurities lead to localized spins S i. Hole doping into the valence band (either by the 3d transition metals itself or by other acceptor impurities) provides charge carries whose spins interact with the 3d spins. This local p-d exchange coupling J v pd = N leads to a parallel arrangements of the magnetic moments since a ferromagnetic state allows a higher mobility of the doped holes. For a high doping level the material becomes more metallic and the mechanism changes to a RKKY-like interaction. From this argumentation follows immediately that the crucial parameter to increase T c is the J v pd coupling. Indeed, a simple theory of Zener's p-d exchange mechanism 8 gives T c ∝ (J v pd ) 2 x h where x h is the hole doping level. It can be expected from general grounds that a decreasing anion-cation distance leads to an increase of the p-d tight-binding hopping parameter t pd, and consequently to an increase of J v pd. That reasoning lead Dietl et al 8 to the proposal of room temperature ferromagnetism in Mn-doped ZnO or GaN, respectively, which created a tremendous activity and numerous reports on room temperature FM in II-VI DMS or similar materials. 9,10,11 However, there are serious doubts whether the reported room temperature ferromagnetism belongs really to the same class of ferromagnetism as that one observed in GaAs:Mn or ZnTe:Mn which is based on Zener's p-d exchange mechanism. For instance, in ZnO:Co ferromagnetism was reported in samples produced by laser ablation, 10,11 or by the sol-gel method, 9 whereas other samples fabricated by precursor deposition, 12 or molecular beam epitaxy (MBE) 13,14 showed no signs of ferromagnetism and antiferromagnetic couplings between nearest neighbor 3d spins. It is highly probable that the observed ferromagnetic effects in ZnO:Co are due to uncompensated spins at the surface of Co-rich antiferromagnetic nanoclusters. 15 The proposal of Dietl et al 8 was based on simple model calculations and qualitative arguments. There is a real need for a parameter free ab-initio study of the relevant exchange parameters in II-VI semiconductors to put the expected chemical tendency on a firm basis. Such a calculation of the nearest neighbor couplings of local spins J dd and the p-d exchange couplings J v pd and J c pd with valence and conduction bands, respectively, is presented here. We considered the series of Co-and Mn-doped ZnA with the anions A=Te, Se, S and O. To achieve our goal we had to solve two theoretical problems. First of all, the local spin density approximation (LSDA) is not sufficient. It leads to wrong predictions of FM in ZnO:Co even without additional hole doping, 16 to too large values of |J dd | for ZnO:Mn, and to the wrong (FM) sign of one of the two nearest neighbor exchange couplings in wurtzite ZnO:Co. 17 It was shown that this deficiency of LSDA can be repaired by taking into account the strong Coulomb correlation in the 3d shell by the LSDA+U method. To choose the U values we have to take into account that they decrease in the series from O to Te due to an increase of screening effects. The values of J dd are very well known experimentally in this series. Therefore they can be used to check the chosen U values. We will show below that for reasonable values of U we obtain J dd in good agreement with experimental results and we may explain the chemical tendency. The second theoretical problem concerns the p-d exchange coupling between the localized spins and the holes in the valence band J v pd. This coupling leads to the giant Zeemann effect 18 and it is seen in our calculations by a band-offset ∆E v between spin up and spin down of the valence band. For small values of J v pd (which means also small values of t pd ) both splittings, the experimental and the theoretical one, are proportional to the magnetic impurity concentration x. In that weak coupling regime the p-d coupling can be simply calculated by using the proportionality between splitting and x. We will show, however, that there are more and more deviations from ∆E v ∝ x if we go from ZnTe to ZnO. The exchange values obtained in that manner seem to depend on the concentration of magnetic impurities. We solve that problem by a fit to the Wigner-Seitz approach of Benoit a la Guillaume et al. 19 Our results prove that we reach the strong coupling limit for ZnO. As we will show below, in that case the impurity potential is so strong that it can bind a hole for ZnO:Mn, whereas ZnO:Co is close to the localized limit. Our ab-initio results strengthen the recent model calculations in Ref. 20. That work was aimed to explain the tremendous difference between the experimental J v pd values obtained from photoemission and magneto-optics, especially in ZnO and GaN. 21 It was argumented that this difference arises due to state localization which is confirmed by our ab-inito calculations below. But we also will show that our results for |N| are much smaller than those The organization of our paper is as follows. After presenting the super-cell method in Sec. II, we discuss the nearest neighbor exchange coupling in Sec. III. That fixes the U values unambiguously. In Sec. IV we present our results for J v pd = N and J c pd = N. Finally, in Sec. V we discuss the arguments in favor of a localized state in ZnO:Mn. II. SUPER-CELL CALCULATIONS We used super-cell calculations to determine the exchange couplings J dd, N, and N. Since we are mainly interested in the chemical tendency within the II-VI series we restrict our study to the zinc-blende structure. All compounds of the series exist in that modification, even ZnO as epitaxial layer. To calculate J dd we used super-cells of the form T 2 Zn 6 A 8 with the transition metals T=Co or Mn and with the anions A=O, S, Se, and Te. In those super-cells the magnetic ions build chains. The exchange constants are then determined by comparing the total energies of ferro-and antiferromagnetic arrangements. We have checked that the influence of finite size effects is negligible (not larger than 6 per cent for J dd ) by performing some calculations with T 2 Zn 14 A 16 super-cells. For J v pd we used super-cells with three different concentrations of magnetic ions, x = 1/4, x = 1/8, and x = 1/32, i.e. TZn 3 A 4, TZn 7 A 8 and TZn 31 A 32. As will be explained below, these numerical results have to be fitted with the Wigner-Seitz approach to obtain J v pd. In all calculations we used the experimental lattice constants a = 6.101, 5.668, and 5.410 A for ZnTe, ZnSe, and ZnS, respectively. 25 The super-cell calculations were performed using the full-potential local-orbital (FPLO) band structure scheme. 28 In the FPLO method (version FPLO5) a minimum basis approach with optimized local orbitals is employed, which allows for accurate and efficient total energy calculations. For the present calculations we used the following basis set: Zn,Co,Mn: 3s3p:4s4p3d, O: 2s2p;3d, S: 3s3p3d, Se: 4s4p3d, and Te: 5s5p4d. The site-centered potentials and densities were expanded in spherical harmonic contributions up to l max = 12. The exchange and correlation potential was treated in two different ways. First, the local spin-density approximation (LSDA) was used in the parametrization of Perdew and Wang. 29 However, as will be shown below in more detail, this approximation has severe deficiencies in the present case. The energetical positions of the Co(Mn) 3d states with respect to the valence band are incorrectly given in the LSDA calculation. They are expected to be much lower in energy and this correlation effect was taken into account by using the FPLO implementation of the LSDA+U method in the atomic limit scheme. 30,31 The convergence of the total energies with respect to the k-space integrations were checked for each of the super-cells independently. The calculations for each cell were first performed within the LSDA approximation using basis optimization. The LSDA+U calculations were then made starting from the LSDA optimized basis but with no basis optimization in the self-consistency cycle in order to obtain convergence. The Slater parameters F 2 and F 4 for Mn and Co in the LSDA+U calculations were chosen close to atomic values, namely F 2 = 7.4 eV and F 4 = 4.6 eV (corresponding to the Hund exchange coupling J H = 0.86 eV) for Mn, and F 2 = 7.9 eV and F 4 = 5.0 eV (J H = 0.92 eV) for Co. The Slater parameter F 0 = U, however, is much more screened and its influence has been investigated more in detail (see below). III. D-D EXCHANGE COUPLINGS In this Section we are going to determine the exchange couplings between two localized magnetic ions. We are considering two nearest neighbor impurities, each carrying a local spin S i. Then, the Heisenberg Hamiltonian for a localized pair of spins is given by The corresponding total energies per magnetic ion for ferromagnetic (FM) and antiferromagnetic (AFM) arrangements of the two spins, E F M and E AF M, lead to the energy difference between the FM and AFM states: where S T is the total spin of two parallel spins S, i.e. S T = 3 or 5 for Co or Mn. That energy difference can be compared with the corresponding energy differences of isolated pairs in the large super-cells. Those super-cells where the magnetic ions form chains are different, however. Then, each magnetic ion has two nearest neighbor magnetic ions which doubles approximatively the previous energy difference. The exact energy difference between FM and AFM states of a Heisenberg chain is slightly different, but that is unimportant for our present argumentation. The calculated exchange constants J dd show a strong variation with U. That is illustrated in Fig. 1 The experimental values of J dd are known with great accuracy by magnetization step measurements or inelastic neutron scattering (see Tables I, II). The comparison of experimental and theoretical values shows that the LSDA method strongly overestimates the exchange couplings. In our method the Hubbard correlation has to be chosen between 4 and 6 eV to obtain the correct exchange couplings. The precise value of U has also a chemical tendency. That was revealed in Ref. 32 and can be explained since the compounds ZnA become less and less ionic in going from A=Zn to A=Te. The decrease of ionicity can be measured by a decrease of the charge transfer towards the magnetic ion in the series (Fig. 2) IV. P-D EXCHANGE COUPLINGS The localized magnetic moments S i which are provided by the magnetic ions Co 2+ or Mn 2+ interact with the spin of doped holes s. This interaction can be parametrized in the continuum approximation in the form: where the magnetic impurities are placed at R i. A similar interaction exists with the spin of doped electrons which is usually denoted by the parameter. If we transform the Hamiltonian into a lattice model, the interaction become The mean-field approach works very well for N which has small values in all cases. The reason is the small coupling between the conduction band, which is built by Zn 4s-4p hybridized states, with the impurity states. The calculated values are also in excellent agreement with the available experimental data (see Tables III and IV). The situation is different for N. The valence band is built by the anion p-orbitals which have generally a large overlap with impurity states. Therefore, N is much more important than N. And this tendency is increased when the lattice constant diminishes in going from Te to O. As a consequence, the mean-field description, and the proportionality between band- Tables III and IV. To resolve the deviations from the mean-field behavior a Wigner-Seitz approach was developed. 19 We will use it to calculate N more accurately (see also Ref. 43 for GaAs:Mn). In that theory, the valence band is described in the effective mass approximation with a spin dependent impurity potential. The Hamiltonian for one impurity has the form: Replacing the spin operator s by s z we obtain a spin polarized scattering potential where = +1(−1) =↑ (↓) and U = W − SJ v pd /2 with the local spin S = 5/2 and 3/2 for Mn and Co, respectively. The muffin-tin radius of the scattering potential b was fixed such that the corresponding spheres around the cations fill in completely the space of the solid, where (x, ) = |E /E M F | is the ratio of this eigenvalue to the mean-field result The deviation is controlled by the dimensionless fitting parameter where U c is the critical potential value for the bound state creation. Tables III and IV. V. LOCALIZED STATE The Wigner-Seitz fit for ZnO:Mn results in the dimensionless coupling parameter ↓ = −1.12 corresponding to a localized hole state. That is also directly visible in the density of the conduction (N ) and valence (N ) bands for ZnA:Mn (A=O, S, Se, and Te). The theoretical results were obtained by the LSDA+U method, analyzed within the mean-field approximation (N M F and N M F ) and the Wigner-Seitz approach 19 (N W S ). Also given are the dimensionless coupling parameters ↑ and ↓ of the Wigner-Seitz approach. Table III). Experimentally, the ferromagnetic sign of N app was recently unambiguously demonstrated for GaN:Fe which is not a II-VI SC, however. 21 The difference between N and N app can also be calculated in the Wigner-Seitz or in other approaches. 20 In contrast to ZnO:Mn we find no localization in ZnO:Co, but a situation quite close to it. In the corresponding DOS (not shown) the split band has merged with the valence band. It was already noted that in the LSDA calculations all 3d states are much higher in energy than in the LSDA+U (which contradicts however the photoemission measurements and is an artefact of LSDA On the other hand, the N values, are not very much changed by the U parameter. VI. DISCUSSION Before comparing our results with other work let us mention the limitations of our procedure. After all, the LSDA+U treats correlation effects only in an approximative manner and neglects fluctuations. This might explain the discrepancy for ZnO:Co where state localization is very probable in view of the large difference between photoemission and magnetooptics. 20,21 A more sophisticated method to treat correlation effects will probably refine the picture presented here. It means that the LSDA+U approach underestimates the localization tendency (and probably also the |N| values) in the strong coupling case. Other error sources are the limited knowledge on U, the use of the effective mass approximation in the Wigner-Seitz approach, which is furthermore restricted to only one valence band in difference to the real band structure. Our results show good agreement between theory and experiment for J dd, N and for N in the weak coupling regime (principally ZnTe and ZnSe). However, in the strong coupling case, we would like to argue that our calculated N values correspond neither to the published ones from magneto-optics (see discussion above) nor to those from photoemission. Since the photoemission values of -2.7 (-3.4 eV) for ZnO:Mn (ZnO:Co) were obtained in an The experimental core-level photoemission spectra 22 were fitted by the configuration interaction (CI) method to a MnA 4 cluster (with the anions A=O, S, Se, and Te) which fixes the hybridization parameter t pd, the Hubbard correlation in the d shell U ef f and the effective charge transfer energy between p-and d-orbitals ∆ ef f (for more details see Ref. 22). The obtained parameters are repeated in Table V and allow to determine N according to Eqn.. The N value of -3.4 eV for ZnO:Co was obtained by an identical procedure. 24 In the same perturbation approach we may, however, also calculate the nearest-neighbor exchange: 52 The calculated values are also given in Table V and show large discrepancies to the experimental results (see Table I above) especially in the strong coupling case of ZnO:Mn. Similar discrepancies can be observed by determining the hybridization parameter t pd = (pd)/3 − 2 √ 3(pd)/9 by band structure calculations. 53 These difficulties prove that the perturbation formulas have a restricted applicability and have to be treated with care especially for strong coupling. Being close in spirit to Ref. 20, our results deviate nevertheless quite considerably in the numerical values for N which were assumed there. We found a much smaller coupling and we believe that the discrepancy with the published photoemission (PE) values (which are about two times larger than our results) results from the non-justified use of the perturbative Larson formula in analyzing the PE data. As a consequence, our magnitude of the dimensionless coupling parameter ↓ = −1.12 for ZnO:Mn is much smaller than that estimated in Ref. 20 (between -2.0 and -3.3). It is highly probable, that the reduced value of |N| will also reduce the proposed ferromagnetic Curie temperature in ZnO:Co and ZnO:Mn provided that the doping level is sufficiently high to delocalize the hole states. The large discrepancies between different experimental and theoretical approaches for N in the strong coupling regime point also to the limitations of the oversimplified model Hamiltonian in that limit. The p-d hybridization t pd can then no longer be regarded as a perturbation and the approximation of an infinite valence band width will probably lead to wrong conclusions. It is highly questionable that the strong coupling case can still be analyzed in such a manner. We thank Anatole Stepanov, Sergei Ryabchenko, and Roman Kuzian for useful discussions. Financial support from the "Dnipro" (14182XB) program is grateful acknowledged.
LOS ANGELES — The Heat have moved on to life without Justise Winslow, but that doesn’t mean they’ve forgotten about him. The Heat have applied to the NBA for a $1.3 million disabled player exception with Winslow expected to miss the rest of the season after undergoing shoulder surgery, general manager Andy Elisburg confirmed Friday morning after the team’s shootaround at the Windward School. But just before Elisburg confirmed that, the Heat contacted Winslow as a sign of support for the 20-year-old. “We just Facetimed with [Wnslow] after the shootaround,” coach Erik Spoelstra said. “He’s already on the rebound right now.” Winslow is expected to be in a sling for six months before he can begin rehab after undergoing successful surgery Thursday to repair a torn labrum in his right shoulder. What’s Spoelstra’s advice for Winslow? “Just continue to be patient,” Spoelstra said. “He’s a very mature young man. He understands the big picture. It doesn’t make it any less painful being out the rest of the season. Guys want to be out there and play. He put in a lot of time in the offseason to prepare for this year and there’s a lot of things in this game that you can’t control. But you can control your mindset and your approach with the hand you’ve been dealt from here on out. So right now it’s just about recovery for the next six weeks and rest, and then at that point we’ll be able to start the process of building his body back.” Dr. Harlan Selesnick, who performed Winslow’s surgery Thursday at Miami Orthopedics & Sports Medicine Institute at Doctors Hospital, flew to Los Angeles to give the team the medical report. As far as getting a disabled player exception for Winslow, Miami will be granted the exception if an NBA-designated physician determines that Winslow is “substantially more likely than not” to be unable to play through June 15. The exception is worth 50 percent of the disabled player’s salary, which in this case is $1.3 million of Winslow’s $2.6 million salary this season. The disabled player exception can be used to sign a free agent on a salary up to $1.3 million for the rest of the season, to trade for a player in the final season of his contract worth $1.3 million or less, or to claim a player on waivers who is in the final season of his contract worth $1.3 million or less. The exception expires March 10 and it can’t be combined with any other exception or cap space. The exception gives the Heat added financial flexibility, but they could end up not using it. With the Heat not eligible to add an additional roster spot under the hardship rule, they would have to open up a roster spot to use the exception. In Josh McRoberts’ case, Miami could apply for a $2.9 million disabled player exception for him. But with the possibility of McRoberts (out indefinitely with a stress fracture in his left foot) returning this season not being ruled out, it’s probably less likely that a physician would determine that he’s unable to play through June 15. The deadline to apply for a disabled player exception is Jan. 15. [Chris Bosh has reached ‘some interesting conclusions’ in time away from basketball] [Miami Heat guard Tyler Johnson coming through during crunch time] [Want more Heat news sent directly to your Facebook feed? Make sure to like our Heat Facebook page]
<reponame>iarkhanhelsky/check_diff<filename>main.go package main import ( "github.com/iarkhanhelsky/check_diff/pkg/app" "github.com/iarkhanhelsky/check_diff/pkg/app/command" "os" ) // These values are populated by go-releaser var ( version = "dev" commit = "none" date = "unknown" ) func main() { err := app.Main(command.Env{ Args: os.Args, OutWriter: os.Stdout, ErrWriter: os.Stderr, Version: version, Commit: commit, Date: date, }) if err != nil { // TODO: Dispatch different types of errors panic(err) } }
<gh_stars>0 #include "PlasmaCarbineUpgrade.h" #include "Templates/SubclassOf.h" class AItem; class AFSDPlayerState; FUpgradeValues UPlasmaCarbineUpgrade::GetUpgradedValue(TSubclassOf<AItem> Item, AFSDPlayerState* Player, EPlasmaCarbineUpgrades NewUpgradeType) { return FUpgradeValues{}; } UPlasmaCarbineUpgrade::UPlasmaCarbineUpgrade() { this->upgradeType = EPlasmaCarbineUpgrades::RateOfFireBoostOnFullShield; }
#ifndef __SWM320_SPI_H__ #define __SWM320_SPI_H__ typedef struct { uint8_t FrameFormat; //帧格式:SPI_FORMAT_SPI、SPI_FORMAT_TI_SSI uint8_t SampleEdge; //在SPI帧格式下,选择数据采样边沿:SPI_FIRST_EDGE、SPI_SECOND_EDGE uint8_t IdleLevel; //在SPI帧格式下,选择空闲时(无数据传输时)时钟线的电平:SPI_LOW_LEVEL、SPI_HIGH_LEVEL uint8_t WordSize; //字长度, 有效值4-16 uint8_t Master; //1 主机模式 0 从机模式 uint8_t clkDiv; //SPI_CLK = SYS_CLK / clkDiv,有效值:SPI_CLKDIV_4、SPI_CLKDIV_8、... ... 、SPI_CLKDIV_512 uint8_t RXHFullIEn; //接收FIFO半满中断使能 uint8_t TXEmptyIEn; //发送FIFO 空中断使能 uint8_t TXCompleteIEn; //发送FIFO 空且发送移位寄存器空中断使能 } SPI_InitStructure; #define SPI_FORMAT_SPI 0 //Motorola SPI 格式 #define SPI_FORMAT_TI_SSI 1 //TI SSI 格式 #define SPI_FIRST_EDGE 0 //第一个时钟沿开始采样 #define SPI_SECOND_EDGE 1 //第二个时钟沿开始采样 #define SPI_LOW_LEVEL 0 //空闲时时钟线保持低电平 #define SPI_HIGH_LEVEL 1 //空闲时时钟线保持高电平 #define SPI_CLKDIV_4 0 #define SPI_CLKDIV_8 1 #define SPI_CLKDIV_16 2 #define SPI_CLKDIV_32 3 #define SPI_CLKDIV_64 4 #define SPI_CLKDIV_128 5 #define SPI_CLKDIV_256 6 #define SPI_CLKDIV_512 7 void SPI_Init(SPI_TypeDef *SPIx, SPI_InitStructure *initStruct); //SPI初始化 void SPI_Open(SPI_TypeDef *SPIx); //SPI打开,允许收发 void SPI_Close(SPI_TypeDef *SPIx); //SPI关闭,禁止收发 uint32_t SPI_Read(SPI_TypeDef *SPIx); void SPI_Write(SPI_TypeDef *SPIx, uint32_t data); void SPI_WriteWithWait(SPI_TypeDef *SPIx, uint32_t data); uint32_t SPI_ReadWrite(SPI_TypeDef *SPIx, uint32_t data); uint32_t SPI_IsRXEmpty(SPI_TypeDef *SPIx); //接收FIFO是否空,如果不空则可以继续SPI_Read() uint32_t SPI_IsTXFull(SPI_TypeDef *SPIx); //发送FIFO是否满,如果不满则可以继续SPI_Write() uint32_t SPI_IsTXEmpty(SPI_TypeDef *SPIx); //发送FIFO是否空 void SPI_INTRXHalfFullEn(SPI_TypeDef *SPIx); void SPI_INTRXHalfFullDis(SPI_TypeDef *SPIx); void SPI_INTRXHalfFullClr(SPI_TypeDef *SPIx); uint32_t SPI_INTRXHalfFullStat(SPI_TypeDef *SPIx); void SPI_INTRXFullEn(SPI_TypeDef *SPIx); void SPI_INTRXFullDis(SPI_TypeDef *SPIx); void SPI_INTRXFullClr(SPI_TypeDef *SPIx); uint32_t SPI_INTRXFullStat(SPI_TypeDef *SPIx); void SPI_INTRXOverflowEn(SPI_TypeDef *SPIx); void SPI_INTRXOverflowDis(SPI_TypeDef *SPIx); void SPI_INTRXOverflowClr(SPI_TypeDef *SPIx); uint32_t SPI_INTRXOverflowStat(SPI_TypeDef *SPIx); void SPI_INTTXHalfFullEn(SPI_TypeDef *SPIx); void SPI_INTTXHalfFullDis(SPI_TypeDef *SPIx); void SPI_INTTXHalfFullClr(SPI_TypeDef *SPIx); uint32_t SPI_INTTXHalfFullStat(SPI_TypeDef *SPIx); void SPI_INTTXEmptyEn(SPI_TypeDef *SPIx); void SPI_INTTXEmptyDis(SPI_TypeDef *SPIx); void SPI_INTTXEmptyClr(SPI_TypeDef *SPIx); uint32_t SPI_INTTXEmptyStat(SPI_TypeDef *SPIx); void SPI_INTTXCompleteEn(SPI_TypeDef *SPIx); void SPI_INTTXCompleteDis(SPI_TypeDef *SPIx); void SPI_INTTXCompleteClr(SPI_TypeDef *SPIx); uint32_t SPI_INTTXCompleteStat(SPI_TypeDef *SPIx); void SPI_INTTXWordCompleteEn(SPI_TypeDef *SPIx); void SPI_INTTXWordCompleteDis(SPI_TypeDef *SPIx); void SPI_INTTXWordCompleteClr(SPI_TypeDef *SPIx); uint32_t SPI_INTTXWordCompleteStat(SPI_TypeDef *SPIx); #endif //__SWM320_SPI_H__
The Impact of Information on Students' Beliefs and Attitudes Toward Coyotes Providing information to the public about a species can impact the public's attitudes toward that species. Overall, providing information in any of four categories of information about coyotes positively influenced attitudes toward coyotes using six attitudinal measurements (p <.01). Behavior statements most positively influenced attitudes, followed by images of coyotes, statements about humans and coyotes, and statements about coyote ecology. How well specific pieces of information were received is also discussed.
// Create: Creates a sink that exports trace spans to a destination. The // export of newly-ingested traces begins immediately, unless the sink's // `writer_identity` is not permitted to write to the destination. A // sink can export traces only from the resource owning the sink (the // 'parent'). func (r *ProjectsTraceSinksService) Create(parent string, tracesink *TraceSink) *ProjectsTraceSinksCreateCall { c := &ProjectsTraceSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.tracesink = tracesink return c }
And the Heat Goes On: Police Repression and the Modalities of Power Among security institutions, police occupy a unique position. In addition to specializing in the repression of dissent, police monitor society and enforce order. Yet within research studying state repression, how police institutions are used and deployed to control domestic threats remain under-explored, particularly as it relates to the dual functionality just described. In this study, we develop and test an explanation of police repression accounting for the bifurcation of Manns two modalities of state power: infrastructural power and despotic power. Infrastructural power allocates police resources to surveil dissidents and preemptively limit dissents emergence or escalation. Police deploy despotic power through repressive responses to political threats. Empirically, we employ unique data to investigate police repression and the modalities of power in Guatemala. To analyze how shifting the balance between infrastructural and despotic power affects police repression, we isolate damage occurring from an earthquake that exogenously reshaped the landscape of infrastructural power. Results affirm the role of infrastructural power in regulating the despotic power of the state. Where local infrastructure was most affected by the earthquake, the security apparatus lost the capacity to surveil nascent movements and predict their activity, thereby providing opportunity for dissidents to mobilize and forcing police to (over-)react rather than shutdown resistance preemptively. However, the intensity of state violence recedes as the state recovers from the infrastructural damage and regains its control of local district.
<filename>packages/client-p2p/src/index.ts // Copyright 2017-2018 @polkadot/client-p2p authors & contributors // This software may be modified and distributed under the terms // of the ISC license. See the LICENSE file for details. import { Config } from '@polkadot/client/types'; import { ChainInterface } from '@polkadot/client-chains/types'; import { MessageInterface } from '@polkadot/client-p2p-messages/types'; import { Logger } from '@polkadot/util/types'; import { P2pInterface, PeerInterface, PeersInterface } from './types'; import handlers from './handler'; import EventEmitter from 'eventemitter3'; // import handshake from 'pull-handshake'; import PullPushable from 'pull-pushable'; import pull from 'pull-stream'; import logger from '@polkadot/util/logger'; import promisify from '@polkadot/util/promisify'; import createNode from './create/node'; import defaults from './defaults'; import Peers from './peers'; import Sync from './sync'; type OnMessage = { peer: PeerInterface, message: MessageInterface }; type QueuedPeer = { peer: PeerInterface, isDialled: boolean, nextDial: number }; const DIAL_BACKOFF = 60000; const DIAL_INTERVAL = 15000; const REQUEST_INTERVAL = 15000; const l = logger('p2p'); export default class P2p extends EventEmitter implements P2pInterface { readonly chain: ChainInterface; readonly config: Config; readonly l: Logger; private dialQueue: { [index: string]: QueuedPeer }; private node: LibP2p | undefined; private peers: PeersInterface | undefined; private dialTimer: NodeJS.Timer | null; readonly sync: Sync; constructor (config: Config, chain: ChainInterface) { super(); this.config = config; this.chain = chain; this.l = l; this.sync = new Sync(config, chain); this.dialQueue = {}; this.dialTimer = null; } isStarted (): boolean { return !!this.node; } getNumPeers (): number { return this.peers ? this.peers.count() : 0; } async start (): Promise<boolean> { await this.stop(); this.node = await createNode(this.config, this.chain, l); this.peers = new Peers(this.config, this.chain, this.node); this._handleProtocol(this.node, this.peers); this._handlePing(this.node); this._onPeerDiscovery(this.node, this.peers); this._onPeerMessage(this.node, this.peers); await promisify(this.node, this.node.start); l.log(`Started on address=${this.config.p2p.address}, port=${this.config.p2p.port}`); this.emit('started'); this.node._dht.randomWalk.start(); this._requestAny(); return true; } async stop (): Promise<boolean> { if (!this.node) { return false; } if (this.dialTimer !== null) { clearTimeout(this.dialTimer); this.dialTimer = null; } const node = this.node; delete this.node; delete this.peers; node._dht.randomWalk.stop(); await promisify(node, node.stop); l.log('Server stopped'); this.emit('stopped'); return true; } // _announceBlock (hash: Uint8Array, _header: Uint8Array, body: Uint8Array): void { // if (!this.peers) { // return; // } // const header = decodeHeader(_header); // const message = new BlockAnnounce({ // header // }); // this.peers.peers().forEach((peer) => { // if (header.number.gt(peer.bestNumber)) { // peer.send(message); // } // }); // } private _onPeerDiscovery (node: LibP2p, peers: PeersInterface): void { node.on('start', () => this._dialPeers() ); peers.on('discovered', (peer: PeerInterface): void => { this._dialPeers(peer); }); } private _onPeerMessage (node: LibP2p, peers: PeersInterface): void { peers.on('message', ({ peer, message }: OnMessage): void => { const handler = handlers.find(({ type }) => type === message.type ); if (!handler) { l.error(`Unhandled message type=${message.type}`); return; } handler(this, peer, message); }); } private _handleProtocol (node: LibP2p, peers: PeersInterface): void { node.handle( defaults.PROTOCOL_DOT, async (protocol: string, connection: LibP2pConnection): Promise<void> => { try { const peerInfo = await promisify(connection, connection.getPeerInfo); const peer = peers.add(peerInfo); peers.log('protocol', peer); peer.addConnection(connection, true); if (!peer.isWritable()) { this._dialPeers(peer); } } catch (error) { l.error('protocol handling error', error); } } // , (protocol: string, requested: string, callback: (error: null, accept: boolean) => void): void => { // l.debug(() => `matching protocol ${requested}`); // callback(null, requested.indexOf(defaults.PROTOCOL) === 0); // } ); } private _handlePing (node: LibP2p): void { node.handle( defaults.PROTOCOL_PING, async (protocol: string, connection: LibP2pConnection): Promise<void> => { try { const pushable = PullPushable((error) => { l.debug(() => ['ping error', error]); }); pull(pushable, connection); pull( connection, pull.drain( (buffer: Buffer): void => { l.debug(() => ['ping (protocol)']); pushable.push(buffer); }, () => false ) ); } catch (error) { l.error('ping handling error', error); } } ); } private async _pingPeer (peer: PeerInterface): Promise<boolean> { if (!this.node) { return false; } try { // NOTE Only dial here, however the handling of ping are done in the _handlePing function // const connection = await promisify( this.node, this.node.dialProtocol, peer.peerInfo, defaults.PROTOCOL_PING ); // const pushable = PullPushable((error) => { // console.error('pingPeer', error); // }); // pull(pushable, connection); // // FIXME Once uni-directional pings are available network-wide, properly ping, // // don't just pong. (However the libp2p-ping floods the network as it stands) // pull( // connection, // pull.drain( // (buffer: Buffer): void => { // l.log(() => ['ping', peer.shortId]); // pushable.push(buffer); // }, // () => false // ) // ); } catch (error) { l.error(`error opening ping with ${peer.shortId}`, error); return false; } return true; } private async _dialPeer (peer: PeerInterface, peers: PeersInterface): Promise<boolean> { if (!this.node) { return false; } l.debug(() => `dialing ${peer.shortId}`); try { const connection = await promisify( this.node, this.node.dialProtocol, peer.peerInfo, defaults.PROTOCOL_DOT ); await this._pingPeer(peer); peer.addConnection(connection, true); peers.log('dialled', peer); return true; } catch (error) { // l.error('dial error', error); } return false; } private _dialPeers (peer?: PeerInterface): void { if (!this.node || !this.node.isStarted()) { return; } if (this.dialTimer !== null) { clearTimeout(this.dialTimer); this.dialTimer = null; } const now = Date.now(); if (peer && !this.dialQueue[peer.id]) { this.dialQueue[peer.id] = { isDialled: false, nextDial: now, peer }; } Object.values(this.dialQueue).forEach( async (item: QueuedPeer): Promise<void> => { if (!this.peers) { return; } else if (item.nextDial > now && !item.peer.isActive()) { item.isDialled = false; } else if (item.isDialled || item.nextDial < now) { return; } item.isDialled = await this._dialPeer(item.peer, this.peers); item.nextDial = now + DIAL_BACKOFF; } ); this.dialTimer = setTimeout(() => { this._dialPeers(); }, DIAL_INTERVAL); } private _requestAny (): void { if (this.peers) { this.peers.peers().forEach((peer) => this.sync.requestBlocks(peer) ); } setTimeout(() => { this._requestAny(); }, REQUEST_INTERVAL); } }
<filename>java/Java-Netty-Udp-Interaction-Center/src/main/java/com/shenjinxiang/netty/entity/Target.java package com.shenjinxiang.netty.entity; import java.net.InetSocketAddress; public class Target { private String udpHost; private int udpPort; private Sblx sblx; private InetSocketAddress address; @Override public String toString() { StringBuffer stringBuffer = new StringBuffer(); stringBuffer.append("{").append("sblx: " + sblx + ", udpHost: " + udpHost + ", udpPort: " + udpPort + "}"); return stringBuffer.toString(); } public InetSocketAddress getAddress() { return address; } public void setAddress(InetSocketAddress address) { this.address = address; } public String getUdpHost() { return udpHost; } public void setUdpHost(String udpHost) { this.udpHost = udpHost; } public int getUdpPort() { return udpPort; } public void setUdpPort(int udpPort) { this.udpPort = udpPort; } public Sblx getSblx() { return sblx; } public void setSblx(Sblx sblx) { this.sblx = sblx; } }
<filename>cards.py """ cards.py """ # Suits and faces of a deck of cards suitchars: list = "♠ ♣ ♥ ♦".split() suitnames: list = "Clubs Spades Hearts Diamonds".split() faceschars: list = "A 2 3 4 5 6 7 8 9 10 J Q K".split() # Card values cardsvalues: list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] # Card outlines horizline13: str = "─"*13 horizline11: str = "─"*11 vertline: str = "│" topright: str = "┌" topleft: str = "┐" bottomleft: str = "└" bottomright: str = "┘" sideborders = f"{vertline:14}{vertline}" blankborders = f"{vertline}{vertline}ӀѺѺѺѺѺѺѺѺѺӀ{vertline}{vertline}" cardtop: str = f"{topright}{horizline13}{topleft}" cardtop2: str = f"{vertline}{topright}{horizline11}{topleft}{vertline}" cardbottom: str = f"{bottomleft}{horizline13}{bottomright}" cardbottom2: str = f"{vertline}{bottomleft}{horizline11}{bottomright}{vertline}" """ Print all 52 cards in the standard poker deck according to their suits """ def printcards(suits: list, faces: list, start: int = 0, col: int = 5) -> None: for i in range(0, len(suitchars)): # print(f"Printing the {suitschars[i]} deck") for p in range(start, len(faceschars), col): topface = "" topsuit = "" lowface = "" lowsuit = "" centersuit = "" # Check the number of printable cards at max{col} n = len(faceschars[p:p+col]) print(cardtop*n) for x in faceschars[p:p+col]: topface += f"{vertline}{x:<2}{vertline:>12}" topsuit += f"{vertline}{suitchars[i]:<2}{vertline:>12}" centersuit += f"{vertline}{suitchars[i].center(13)}{vertline}" lowsuit += f"{vertline:12}{suitchars[i]:>2}{vertline}" lowface += f"{vertline:12}{x:>2}{vertline}" print(topface) print(topsuit) for g in range(0, 2): print(sideborders*n) print(centersuit) for g in range(0, 2): print(sideborders*n) print(lowsuit) print(lowface) print(cardbottom*n) def printhands(cards: list, start: int = 0, col: int = 5) -> None: for p in range(start, len(cards), col): topface = "" topsuit = "" lowface = "" lowsuit = "" centersuit = "" # Check the number of printable cards at max{col} n = len(cards[p:p+col]) print(cardtop*n) for x in cards[p:p+col]: # Find the matching suit character for idx, val in enumerate(suitnames): if x[1] == val: topface += f"{vertline}{x[0]:<2}{vertline:>12}" topsuit += f"{vertline}{suitchars[idx]:<2}{vertline:>12}" lowface += f"{vertline:12}{x[0]:>2}{vertline}" lowsuit += f"{vertline:12}{suitchars[idx]:>2}{vertline}" centersuit += f"{vertline}{suitchars[idx].center(13)}{vertline}" print(topface) print(topsuit) for g in range(0, 2): print(sideborders*n) print(centersuit) for g in range(0, 2): print(sideborders*n) print(lowsuit) print(lowface) print(cardbottom*n) def printsinglecard(card: tuple) -> None: if not isinstance(card, tuple): return topface = "" topsuit = "" lowface = "" lowsuit = "" centersuit = "" for idx, val in enumerate(suitnames): if card[1] == val: topface += f"{vertline}{card[0]:<2}{vertline:>12}" topsuit += f"{vertline}{suitchars[idx]:<2}{vertline:>12}" lowface += f"{vertline:12}{card[0]:>2}{vertline}" lowsuit += f"{vertline:12}{suitchars[idx]:>2}{vertline}" centersuit += f"{vertline}{suitchars[idx].center(13)}{vertline}" print(cardtop) print(topface) print(topsuit) for g in range(0, 2): print(sideborders) print(centersuit) for g in range(0, 2): print(sideborders) print(lowsuit) print(lowface) print(cardbottom) def printholecard() -> None: print(cardtop) print(cardtop2) for g in range(0, 6): print(blankborders) print(cardbottom2) print(cardbottom)
<reponame>taccisum/py_base_learn #coding=utf-8 # higher order function print '定义一个高阶函数add(a, b, handler)' def add(a, b, handler): return handler(a) + handler(b) print '定义一个handler,对原数值作平方运算' def _pow(num): return num * num; print '执行高阶函数add(1, 3, handler),结果%d ' % add(1, 3, _pow)
/* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #ifndef __SYSTEM_GLOBAL_H_INCLUDED__ #define __SYSTEM_GLOBAL_H_INCLUDED__ #include <hive_isp_css_defs.h> #include <type_support.h> /* * The longest allowed (uninteruptible) bus transfer, does not * take stalling into account */ #define HIVE_ISP_MAX_BURST_LENGTH 1024 /* * Maximum allowed burst length in words for the ISP DMA * This value is set to 2 to prevent the ISP DMA from blocking * the bus for too long; as the input system can only buffer * 2 lines on Moorefield and Cherrytrail, the input system buffers * may overflow if blocked for too long (BZ 2726). */ #define ISP_DMA_MAX_BURST_LENGTH 2 /* * Create a list of HAS and IS properties that defines the system * * The configuration assumes the following * - The system is hetereogeneous; Multiple cells and devices classes * - The cell and device instances are homogeneous, each device type * belongs to the same class * - Device instances supporting a subset of the class capabilities are * allowed * * We could manage different device classes through the enumerated * lists (C) or the use of classes (C++), but that is presently not * fully supported * * N.B. the 3 input formatters are of 2 different classess */ #define USE_INPUT_SYSTEM_VERSION_2401 #define IS_ISP_2400_SYSTEM /* * Since this file is visible everywhere and the system definition * macros are not, detect the separate definitions for {host, SP, ISP} * * The 2401 system has the nice property that it uses a vanilla 2400 SP * so the SP will believe it is a 2400 system rather than 2401... */ /* #if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) || defined(__scalar_processor_2401) */ #if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) #define IS_ISP_2401_MAMOIADA_SYSTEM #define HAS_ISP_2401_MAMOIADA #define HAS_SP_2400 /* #elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) || defined(__scalar_processor_2400)*/ #elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) #define IS_ISP_2400_MAMOIADA_SYSTEM #define HAS_ISP_2400_MAMOIADA #define HAS_SP_2400 #else #error "system_global.h: 2400_SYSTEM must be one of {2400, 2401 }" #endif #define HAS_MMU_VERSION_2 #define HAS_DMA_VERSION_2 #define HAS_GDC_VERSION_2 #define HAS_VAMEM_VERSION_2 #define HAS_HMEM_VERSION_1 #define HAS_BAMEM_VERSION_2 #define HAS_IRQ_VERSION_2 #define HAS_IRQ_MAP_VERSION_2 #define HAS_INPUT_FORMATTER_VERSION_2 /* 2401: HAS_INPUT_SYSTEM_VERSION_3 */ /* 2400: HAS_INPUT_SYSTEM_VERSION_2 */ #define HAS_INPUT_SYSTEM_VERSION_2 #define HAS_INPUT_SYSTEM_VERSION_2401 #define HAS_BUFFERED_SENSOR #define HAS_FIFO_MONITORS_VERSION_2 /* #define HAS_GP_REGS_VERSION_2 */ #define HAS_GP_DEVICE_VERSION_2 #define HAS_GPIO_VERSION_1 #define HAS_TIMED_CTRL_VERSION_1 #define HAS_RX_VERSION_2 #define HAS_NO_INPUT_FORMATTER /*#define HAS_NO_PACKED_RAW_PIXELS*/ /*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/ #define DMA_DDR_TO_VAMEM_WORKAROUND #define DMA_DDR_TO_HMEM_WORKAROUND /* * Semi global. "HRT" is accessible from SP, but * the HRT types do not fully apply */ #define HRT_VADDRESS_WIDTH 32 /* Surprise, this is a local property*/ /*#define HRT_ADDRESS_WIDTH 64 */ #define HRT_DATA_WIDTH 32 #define SIZEOF_HRT_REG (HRT_DATA_WIDTH>>3) #define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH/8) /* The main bus connecting all devices */ #define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH #define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES #define CSI2P_DISABLE_ISYS2401_ONLINE_MODE /* per-frame parameter handling support */ #define SH_CSS_ENABLE_PER_FRAME_PARAMS typedef uint32_t hrt_bus_align_t; /* * Enumerate the devices, device access through the API is by ID, * through the DLI by address. The enumerator terminators are used * to size the wiring arrays and as an exception value. */ typedef enum { DDR0_ID = 0, N_DDR_ID } ddr_ID_t; typedef enum { ISP0_ID = 0, N_ISP_ID } isp_ID_t; typedef enum { SP0_ID = 0, N_SP_ID } sp_ID_t; #if defined(IS_ISP_2401_MAMOIADA_SYSTEM) typedef enum { MMU0_ID = 0, MMU1_ID, N_MMU_ID } mmu_ID_t; #elif defined(IS_ISP_2400_MAMOIADA_SYSTEM) typedef enum { MMU0_ID = 0, MMU1_ID, N_MMU_ID } mmu_ID_t; #else #error "system_global.h: SYSTEM must be one of {2400, 2401}" #endif typedef enum { DMA0_ID = 0, N_DMA_ID } dma_ID_t; typedef enum { GDC0_ID = 0, GDC1_ID, N_GDC_ID } gdc_ID_t; /* this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums. */ #define N_GDC_ID_CPP 2 typedef enum { VAMEM0_ID = 0, VAMEM1_ID, VAMEM2_ID, N_VAMEM_ID } vamem_ID_t; typedef enum { BAMEM0_ID = 0, N_BAMEM_ID } bamem_ID_t; typedef enum { HMEM0_ID = 0, N_HMEM_ID } hmem_ID_t; typedef enum { ISYS_IRQ0_ID = 0, /* port a */ ISYS_IRQ1_ID, /* port b */ ISYS_IRQ2_ID, /* port c */ N_ISYS_IRQ_ID } isys_irq_ID_t; typedef enum { IRQ0_ID = 0, /* GP IRQ block */ IRQ1_ID, /* Input formatter */ IRQ2_ID, /* input system */ IRQ3_ID, /* input selector */ N_IRQ_ID } irq_ID_t; typedef enum { FIFO_MONITOR0_ID = 0, N_FIFO_MONITOR_ID } fifo_monitor_ID_t; /* * Deprecated: Since all gp_reg instances are different * and put in the address maps of other devices we cannot * enumerate them as that assumes the instrances are the * same. * * We define a single GP_DEVICE containing all gp_regs * w.r.t. a single base address * typedef enum { GP_REGS0_ID = 0, N_GP_REGS_ID } gp_regs_ID_t; */ typedef enum { GP_DEVICE0_ID = 0, N_GP_DEVICE_ID } gp_device_ID_t; typedef enum { GP_TIMER0_ID = 0, GP_TIMER1_ID, GP_TIMER2_ID, GP_TIMER3_ID, GP_TIMER4_ID, GP_TIMER5_ID, GP_TIMER6_ID, GP_TIMER7_ID, N_GP_TIMER_ID } gp_timer_ID_t; typedef enum { GPIO0_ID = 0, N_GPIO_ID } gpio_ID_t; typedef enum { TIMED_CTRL0_ID = 0, N_TIMED_CTRL_ID } timed_ctrl_ID_t; typedef enum { INPUT_FORMATTER0_ID = 0, INPUT_FORMATTER1_ID, INPUT_FORMATTER2_ID, INPUT_FORMATTER3_ID, N_INPUT_FORMATTER_ID } input_formatter_ID_t; /* The IF RST is outside the IF */ #define INPUT_FORMATTER0_SRST_OFFSET 0x0824 #define INPUT_FORMATTER1_SRST_OFFSET 0x0624 #define INPUT_FORMATTER2_SRST_OFFSET 0x0424 #define INPUT_FORMATTER3_SRST_OFFSET 0x0224 #define INPUT_FORMATTER0_SRST_MASK 0x0001 #define INPUT_FORMATTER1_SRST_MASK 0x0002 #define INPUT_FORMATTER2_SRST_MASK 0x0004 #define INPUT_FORMATTER3_SRST_MASK 0x0008 typedef enum { INPUT_SYSTEM0_ID = 0, N_INPUT_SYSTEM_ID } input_system_ID_t; typedef enum { RX0_ID = 0, N_RX_ID } rx_ID_t; typedef enum { MIPI_PORT0_ID = 0, MIPI_PORT1_ID, MIPI_PORT2_ID, N_MIPI_PORT_ID } mipi_port_ID_t; #define N_RX_CHANNEL_ID 4 /* Generic port enumeration with an internal port type ID */ typedef enum { CSI_PORT0_ID = 0, CSI_PORT1_ID, CSI_PORT2_ID, TPG_PORT0_ID, PRBS_PORT0_ID, FIFO_PORT0_ID, MEMORY_PORT0_ID, N_INPUT_PORT_ID } input_port_ID_t; typedef enum { CAPTURE_UNIT0_ID = 0, CAPTURE_UNIT1_ID, CAPTURE_UNIT2_ID, ACQUISITION_UNIT0_ID, DMA_UNIT0_ID, CTRL_UNIT0_ID, GPREGS_UNIT0_ID, FIFO_UNIT0_ID, IRQ_UNIT0_ID, N_SUB_SYSTEM_ID } sub_system_ID_t; #define N_CAPTURE_UNIT_ID 3 #define N_ACQUISITION_UNIT_ID 1 #define N_CTRL_UNIT_ID 1 /* * Input-buffer Controller. */ typedef enum { IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */ IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */ IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */ N_IBUF_CTRL_ID } ibuf_ctrl_ID_t; /** end of Input-buffer Controller */ /* * Stream2MMIO. */ typedef enum { STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */ STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */ STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */ N_STREAM2MMIO_ID } stream2mmio_ID_t; typedef enum { /* * Stream2MMIO 0 has 8 SIDs that are indexed by * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID]. * * Stream2MMIO 1 has 4 SIDs that are indexed by * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID]. * * Stream2MMIO 2 has 4 SIDs that are indexed by * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID]. */ STREAM2MMIO_SID0_ID = 0, STREAM2MMIO_SID1_ID, STREAM2MMIO_SID2_ID, STREAM2MMIO_SID3_ID, STREAM2MMIO_SID4_ID, STREAM2MMIO_SID5_ID, STREAM2MMIO_SID6_ID, STREAM2MMIO_SID7_ID, N_STREAM2MMIO_SID_ID } stream2mmio_sid_ID_t; /** end of Stream2MMIO */ /** * Input System 2401: CSI-MIPI recevier. */ typedef enum { CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */ CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */ CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */ N_CSI_RX_BACKEND_ID } csi_rx_backend_ID_t; typedef enum { CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */ CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */ CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */ #define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID+1) } csi_rx_frontend_ID_t; typedef enum { CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */ CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */ CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */ CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */ N_CSI_RX_DLANE_ID } csi_rx_fe_dlane_ID_t; /** end of CSI-MIPI receiver */ typedef enum { ISYS2401_DMA0_ID = 0, N_ISYS2401_DMA_ID } isys2401_dma_ID_t; /** * Pixel-generator. ("system_global.h") */ typedef enum { PIXELGEN0_ID = 0, PIXELGEN1_ID, PIXELGEN2_ID, N_PIXELGEN_ID } pixelgen_ID_t; /** end of pixel-generator. ("system_global.h") */ typedef enum { INPUT_SYSTEM_CSI_PORT0_ID = 0, INPUT_SYSTEM_CSI_PORT1_ID, INPUT_SYSTEM_CSI_PORT2_ID, INPUT_SYSTEM_PIXELGEN_PORT0_ID, INPUT_SYSTEM_PIXELGEN_PORT1_ID, INPUT_SYSTEM_PIXELGEN_PORT2_ID, N_INPUT_SYSTEM_INPUT_PORT_ID } input_system_input_port_ID_t; #define N_INPUT_SYSTEM_CSI_PORT 3 typedef enum { ISYS2401_DMA_CHANNEL_0 = 0, ISYS2401_DMA_CHANNEL_1, ISYS2401_DMA_CHANNEL_2, ISYS2401_DMA_CHANNEL_3, ISYS2401_DMA_CHANNEL_4, ISYS2401_DMA_CHANNEL_5, ISYS2401_DMA_CHANNEL_6, ISYS2401_DMA_CHANNEL_7, ISYS2401_DMA_CHANNEL_8, ISYS2401_DMA_CHANNEL_9, ISYS2401_DMA_CHANNEL_10, ISYS2401_DMA_CHANNEL_11, N_ISYS2401_DMA_CHANNEL } isys2401_dma_channel; enum ia_css_isp_memories { IA_CSS_ISP_PMEM0 = 0, IA_CSS_ISP_DMEM0, IA_CSS_ISP_VMEM0, IA_CSS_ISP_VAMEM0, IA_CSS_ISP_VAMEM1, IA_CSS_ISP_VAMEM2, IA_CSS_ISP_HMEM0, IA_CSS_SP_DMEM0, IA_CSS_DDR, N_IA_CSS_MEMORIES }; #define IA_CSS_NUM_MEMORIES 9 /* For driver compatability */ #define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES #define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES #endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
/** dimensionality must be larger than one */ class DiffPow extends AbstractObjectiveFunction { @Override public double valueOf (double[] x) { double res = 0; for (int i = 0; i < x.length; ++i) res += Math.pow(Math.abs(x[i]),2.+10*(double)i/(x.length-1.)); return res; } }
<reponame>solderneer/SimpleSBC //****************************************************************************** #define _IN_LCD_LTPS //------------------------------------------------------------------------------ #include "Memap.h" #include "Macro.h" #include "Lcd.h" #include "Byte2Word.h" #define DATA_BUS_WIDTH 8 #define LCD_INIT_NORMAL 0 #define LCD_MP4_INIT 1 #define LCD_JPEG_INIT 2 #define LCD_JPEG_X_INIT 3 //------------------------------------------------------------------------------ void LCD_Command(unsigned int cmd) { #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_COMMAND,(cmd>>8)); #endif write_XDATA(LCD_COMMAND,cmd); } void LCD_Data(unsigned int data) { #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,(data>>8)); #endif write_XDATA(LCD_DATA,data); } void LCD_Reg_Set(unsigned int cmd,unsigned int data) { #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_COMMAND,(cmd>>8)); #endif write_XDATA(LCD_COMMAND,cmd); #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,(data>>8)); #endif write_XDATA(LCD_DATA,data); } unsigned int LCD_StatusRead(void) { unsigned int i,j; i = read_XDATA(LCD_COMMAND); return i; } void LCD_PowerOff(void) { } #if 1 void LCDDEV_Init(unsigned int InitType) { unsigned int x,y; LCD_Reg_Set(0x0007, 0x0233); LCD_Reg_Set(0x0000, 0x0001); LCD_Reg_Set(0x0010, 0x0012); LCD_Reg_Set(0x0011, 0x0014); LCD_Reg_Set(0x0012, 0x0509); LCD_Reg_Set(0x0013, 0x2B00); LCD_Reg_Set(0x001E, 0x0131); LCD_Reg_Set(0x0001, 0x2A9F); LCD_Reg_Set(0x0002, 0x0380); if(InitType == LCD_INIT_NORMAL) LCD_Reg_Set(0x0003, 0x6030); else if((InitType == LCD_JPEG_X_INIT)||(InitType == LCD_MP4_INIT)) LCD_Reg_Set(0x0003, 0x6038); LCD_Reg_Set(0x0004, 0x0000); LCD_Reg_Set(0x0005, 0x0000); LCD_Reg_Set(0x0016, 0x7F00); LCD_Reg_Set(0x0017, 0x0101); LCD_Reg_Set(0x0007, 0x0233); LCD_Reg_Set(0x0040, 0x0001); LCD_Reg_Set(0x0041, 0x0000); LCD_Reg_Set(0x0042, 0x9F00); LCD_Reg_Set(0x0043, 0x9F00); LCD_Reg_Set(0x0044, 0x7F00); LCD_Reg_Set(0x0045, 0x9F00); LCD_Reg_Set(0x002C, 0x3000); LCD_Reg_Set(0x0030, 0x0000); LCD_Reg_Set(0x0031, 0x0000); LCD_Reg_Set(0x0032, 0x0000); LCD_Reg_Set(0x0033, 0x0000); LCD_Reg_Set(0x0034, 0x0000); LCD_Reg_Set(0x0035, 0x0000); LCD_Reg_Set(0x0036, 0x0000); LCD_Reg_Set(0x0037, 0x0000); LCD_Reg_Set(0x003A, 0x0000); LCD_Reg_Set(0x003B, 0x0000); LCD_Reg_Set(0x0021, 0x0000); LCD_Command(0x0022); for(y=0; y<LCD_MAX_YSIZE; y++){ for(x=0; x<LCD_MAX_XSIZE; x++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,0x0000); #endif write_XDATA(LCD_DATA,0x0000); } } } #endif #if 0 void LCDDEV_Init(unsigned int InitType) { unsigned int x,y; LCD_Reg_Set(0x0007, 0x0233); LCD_Reg_Set(0x0000, 0x0001); LCD_Reg_Set(0x0010, 0x0012); LCD_Reg_Set(0x0011, 0x0014); LCD_Reg_Set(0x0012, 0x0509); LCD_Reg_Set(0x0013, 0x2B00); LCD_Reg_Set(0x001E, 0x0131); LCD_Reg_Set(0x0001, 0x2AA0); //LCD_Reg_Set(0x0001, 0x03AF); LCD_Reg_Set(0x0002, 0x0380); //LCD_Reg_Set(0x0002, 0x0000); //LCD_Reg_Set(0x0003, 0x6030); LCD_Reg_Set(0x0003, 0x6830); LCD_Reg_Set(0x0004, 0x0000); LCD_Reg_Set(0x0005, 0x0000); LCD_Reg_Set(0x0016, 0x7F00); LCD_Reg_Set(0x0017, 0x0101); LCD_Reg_Set(0x0007, 0x0233); LCD_Reg_Set(0x0040, 0x0001); LCD_Reg_Set(0x0041, 0x0000); LCD_Reg_Set(0x0042, 0x9F00); LCD_Reg_Set(0x0043, 0x9F00); LCD_Reg_Set(0x0044, 0x7F00); LCD_Reg_Set(0x0045, 0x9F00); LCD_Reg_Set(0x002C, 0x3000); LCD_Reg_Set(0x0030, 0x0000); LCD_Reg_Set(0x0031, 0x0000); LCD_Reg_Set(0x0032, 0x0000); LCD_Reg_Set(0x0033, 0x0000); LCD_Reg_Set(0x0034, 0x0000); LCD_Reg_Set(0x0035, 0x0000); LCD_Reg_Set(0x0036, 0x0000); LCD_Reg_Set(0x0037, 0x0000); LCD_Reg_Set(0x003A, 0x0000); LCD_Reg_Set(0x003B, 0x0000); LCD_Reg_Set(0x0021, 0x0000); LCD_Command(0x0022); for(x=0;x<128;x++){ for(y=0;y<160;y++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,0x00F8); #endif write_XDATA(LCD_DATA,0xF800); } } } #endif void LCD_Init(void) { LCDDEV_Init(LCD_INIT_NORMAL); } void LCD_SetDispAddr(int x, int y) { unsigned int gddram; gddram = ((y&0xFF)<<8) | (x&0x7F); LCD_Reg_Set(0x21, gddram); LCD_Command(0x22); } void Display_Sub(unsigned int RGB_Data) { unsigned int i,j; LCD_SetDispAddr(0,0); for(i = LCD_MAX_YSIZE; i>0; i--){ for(j = LCD_MAX_XSIZE; j>0; j--) #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,RGB_Data>>8); #endif write_XDATA(LCD_DATA,RGB_Data); } } void Display_Image(unsigned int *pBuffer) { unsigned int i,j,temp,temp_data; LCD_Reg_Set(0X14, 0x00); LCD_Reg_Set(0X15, 0x00); for(i = 0x800; i>0; i--){ temp_data = *pBuffer++;//pBuffer[i]; temp = 0x0080; for(j=8; j>0; j--){ if(temp & temp_data){ write_XDATA(LCD_DATA,0x001f); }else{ write_XDATA(LCD_DATA,0x07e0); } temp = temp >> 1; } } } void Display_Picture(unsigned int *pBuffer) { unsigned int i,j; LCD_SetDispAddr(0,0); for(i = LCD_MAX_YSIZE; i>0; i--){ for(j = LCD_MAX_XSIZE; j>0; j--){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,*pBuffer>>8); #endif write_XDATA(LCD_DATA,*pBuffer++); } } } void LCDDEV_SetWindow(int x0, int y0, int x1, int y1) { unsigned int waddr; waddr = ((x1&0x7F)<<8) | (x0&0x7F); LCD_Reg_Set(0x44, waddr); waddr = ((y1&0xFF)<<8) | (y0&0xFF); LCD_Reg_Set(0x45, waddr); waddr = ((y0&0xFF)<<8) | (x0&0x7F); LCD_Reg_Set(0x21, waddr); LCD_Command(0x22); } void LCDDEV_RevertWindow(int x0, int y0, int x1, int y1) { unsigned int waddr; waddr = ((x1&0x7F)<<8) | (x0&0x7F); LCD_Reg_Set(0x44, waddr); waddr = ((y1&0xFF)<<8) | (y0&0xFF); LCD_Reg_Set(0x45, waddr); } void LCD_ReadBitmap(int x0, int y0, int xsize, int ysize, unsigned int *pData) { int x1, y1; x1 = x0+xsize-1; y1 = y0+ysize-1; LCDDEV_SetWindow(x0, y0, x1, y1); #if (DATA_BUS_WIDTH==8) read_XDATA(LCD_DATA); #endif read_XDATA(LCD_DATA); for(y1=0; y1<ysize; y1++){ for(x1=0; x1<xsize; x1++){ #if (DATA_BUS_WIDTH==8) *pData = (read_XDATA(LCD_DATA)&0xFF)<<8; *pData |= read_XDATA(LCD_DATA)&0xFF; #else *pData++ = read_XDATA(LCD_DATA); #endif } } LCDDEV_RevertWindow(0, 0, LCD_MAX_XSIZE-1, LCD_MAX_YSIZE-1); } unsigned int LCD_GetPixel(int x, int y) { unsigned int color; LCD_SetDispAddr(x,y); #if (DATA_BUS_WIDTH==8) color = read_XDATA(LCD_DATA); color = read_XDATA(LCD_DATA); color = (read_XDATA(LCD_DATA)&0xFF)<<8; color |= (read_XDATA(LCD_DATA)&0xFF); #else color = read_XDATA(LCD_DATA); color = read_XDATA(LCD_DATA); #endif return(color); } void LCD_SetPixel(int x, int y, unsigned int color) { LCD_SetDispAddr(x,y); #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,(color>>8)); #endif write_XDATA(LCD_DATA,color); //write_XDATA(LCD_DATA,color); } void LCD_XorPixel(int x, int y) { unsigned int color; color = LCD_GetPixel(x,y); LCD_SetPixel(x,y,0xFFFF-color); } unsigned int LCDDEV_GetPixel(int x, int y) { unsigned int color; LCD_SetDispAddr(x, y); #if (DATA_BUS_WIDTH==8) color = read_XDATA(LCD_DATA); color = read_XDATA(LCD_DATA); color = (read_XDATA(LCD_DATA)&0xFF)<<8; color |= (read_XDATA(LCD_DATA)&0xFF); #else color = read_XDATA(LCD_DATA); color = read_XDATA(LCD_DATA); #endif return(color); } void LCDDEV_SetPixel(int x, int y, unsigned int color) { LCD_SetDispAddr(x, y); #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA,(color>>8)); #endif write_XDATA(LCD_DATA,color); //write_XDATA(LCD_DATA,color); } void LCDDEV_XorPixel(int x, int y) { unsigned int color; color = LCDDEV_GetPixel(x,y); LCDDEV_SetPixel(x,y,0xFFFF-color); } void LCDDEV_DrawHLine(int x0, int y, int x1) { if (LCD_Context.DrawMode & LCD_DRAWMODE_XOR){ for(; x0<=x1; x0++){ LCDDEV_XorPixel(x0, y); } }else{ LCDDEV_SetWindow(x0, y, x1, y); for(; x0<=x1; x0++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA, (LCD_COLOR>>8)); #endif write_XDATA(LCD_DATA, LCD_COLOR); } LCDDEV_RevertWindow(0, 0, LCD_MAX_XSIZE-1, LCD_MAX_YSIZE-1); } } void LCDDEV_DrawVLine(int x, int y0, int y1) { unsigned int color; if (LCD_Context.DrawMode & LCD_DRAWMODE_XOR){ for(; y0<=y1; y0++){ LCDDEV_XorPixel(x, y0); } }else{ if(LCD_Context.DrawMode == LCD_DRAWMODE_REV) color = LCD_BKCOLOR; else color = LCD_COLOR; LCDDEV_SetWindow(x, y0, x, y1); for(; y0<=y1; y0++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA, (color>>8)); #endif write_XDATA(LCD_DATA, color); } LCDDEV_RevertWindow(0, 0, LCD_MAX_XSIZE-1, LCD_MAX_YSIZE-1); } } void LCDDEV_FillRect(int x0, int y0, int x1, int y1) { unsigned int color; int i; if (LCD_Context.DrawMode & LCD_DRAWMODE_XOR){ for(; y0<=y1; y0++){ LCDDEV_DrawHLine(x0, y0, x1); } }else{ if(LCD_Context.DrawMode == LCD_DRAWMODE_REV) color = LCD_BKCOLOR; else color = LCD_COLOR; LCDDEV_SetWindow(x0, y0, x1, y1); for(; y0<=y1 ; y0++){ i = x0; for(; i<=x1; i++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA, (color>>8)); #endif write_XDATA(LCD_DATA, color); } } LCDDEV_RevertWindow(0, 0, LCD_MAX_XSIZE-1, LCD_MAX_YSIZE-1); } } void LCDDEV_DrawBitLine1BPP(int x, int y, int xsize, int Diff, unsigned int *pData) { x += Diff; switch(LCD_Context.DrawMode){ case LCD_DRAWMODE_REV: while(xsize){ if(*pData & (0x8000 >> Diff)) LCDDEV_SetPixel(x, y, LCD_BKCOLOR); else LCDDEV_SetPixel(x, y, LCD_COLOR); x++; xsize--; if(++Diff == 16){ Diff = 0; pData++; } } break; case LCD_DRAWMODE_TRANS: while(xsize){ if(*pData & (0x8000 >> Diff)) LCDDEV_SetPixel(x, y, LCD_COLOR); x++; xsize--; if(++Diff == 16){ Diff = 0; pData++; } } break; case LCD_DRAWMODE_XOR: while(xsize){ if(*pData & (0x8000 >> Diff)) LCDDEV_XorPixel(x,y); x++; xsize--; if(++Diff == 16){ Diff = 0; pData++; } } break; default: while(xsize){ if(*pData & (0x8000 >> Diff)) LCDDEV_SetPixel(x, y, LCD_COLOR); else LCDDEV_SetPixel(x, y, LCD_BKCOLOR); x++; xsize--; if(++Diff == 16){ Diff = 0; pData++; } } break; } } void LCDDEV_DrawBitLine16BPP(int x0, int y0, int xsize, int ysize, unsigned int *pData) { int x1,y1; unsigned int i; x1 = x0+xsize-1; y1 = y0+ysize-1; LCDDEV_SetWindow(x0, y0, x1, y1); for(; ysize>0; ysize--){ i = xsize; for(; i>0; i--){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA, (*pData>>8)); #endif write_XDATA(LCD_DATA, *pData++); } } LCDDEV_RevertWindow(0, 0, LCD_MAX_XSIZE-1, LCD_MAX_YSIZE-1); } void LCDDEV_DrawBitmap(int x0, int y0, int xsize, int ysize, int BitsPerPixel, int BytesPerLine, unsigned int *pData, int Diff) { int i; switch (BitsPerPixel) { case 1: for(i=0; i<ysize; i++){ LCDDEV_DrawBitLine1BPP(x0, i+y0, xsize, Diff, pData); pData += BytesPerLine; } break; case 16: LCDDEV_DrawBitLine16BPP(x0, y0, xsize, ysize, pData); break; } } void MP4_LCD_Init(void) { unsigned int x, y; LCD_SetDispAddr(0, 0); for(y=0; y<LCD_MAX_YSIZE; y++){ for(x=0; x<LCD_MAX_XSIZE; x++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA, 0x0000); #endif write_XDATA(LCD_DATA, 0x0000); } } LCDDEV_Init(LCD_MP4_INIT); } void DMA_LcdJpegInit(void) { unsigned int x, y; LCD_SetDispAddr(0, 0); for(y=0; y<LCD_MAX_YSIZE; y++){ for(x=0; x<LCD_MAX_XSIZE; x++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA, 0x0000); #endif write_XDATA(LCD_DATA, 0x0000); } } LCDDEV_Init(LCD_INIT_NORMAL); } void DMA_LcdJpegInitX(void) { unsigned int x, y; LCD_SetDispAddr(0, 0); for(y=0; y<LCD_MAX_YSIZE; y++){ for(x=0; x<LCD_MAX_XSIZE; x++){ #if (DATA_BUS_WIDTH==8) write_XDATA(LCD_DATA, 0x0000); #endif write_XDATA(LCD_DATA, 0x0000); } } LCDDEV_Init(LCD_JPEG_X_INIT); } //******************************************************************************
Two Elk Grove police officers in one patrol car approached Gonzalez-Warren, Elk Grove police spokesman Officer Jason Jimenez said. The officers believed the suspect had an item in his hand, and one of the officers — a 19-year law enforcement veteran with the Elk Grove department — fired a single shot, Hampton said. “The suspect immediately surrendered and was taken into custody without further incident,” Hampton said. Hampton said the police and sheriff’s departments were still investigating what led to Thursday’s encounter outside Firestone. Hampton said Thursday that law enforcement authorities “were aware of a few” previous encounters with the suspect. Online records show Gonzalez-Warren had an outstanding warrant with the Concord Police Department for a misdemeanor. Sacramento Superior Court records show a short history of misdemeanor drug possession charges, but no felonies on Gonzalez-Warren’s record. Resisting an executive officer is a separate offense from resisting arrest. According to the California penal code, resisting an executive officer involves deterring an officer “by means of any threat or violence,” while the entry for resisting arrest does not explicitly reference violence. The latter is usually a misdemeanor. Because the incident took place outside of Elk Grove city limits, the Sheriff’s Department will be the lead agency investigating the shooting, Hampton said. Elk Grove police will also conduct an internal investigation. The officer has been placed on administrative leave. Gonzalez-Warren remains in Sacramento County jail in lieu of $20,000 bail.
Scripps Institution of Oceanography (est. 1903) La Jolla, CA is the perfect location for meeting a world famous climate scientist. It is one of the most beautifully sculpted campuses on the face of the planet, overlooking the Pacific Ocean, an inviting scenario for serious surfers, but it also beckons top-notch scientists from around the world. Every view from the architecturally rich campus opens to an endless panorama of gorgeous, blue-ocean waters and luscious, white surf for as far as the eye can see. However, that outward serenity belies a collapsing climate system that’s out of public view, one of the great illusions of all time. At Scripps I was privileged to meet the esteemed climate scientist Peter Wadhams (professor emeritus Cambridge) recipient of several prestigious science awards, and his lovely, brilliantly energetic and accomplished wife, Maria Pia Casarini (Council 2017-2018 — Polar Educators International). My mission was to drill down into what’s happening with the climate crisis. I got the answers I was looking for. Not only an interview but also additional answers are readily available to the general public via the paperback edition of Professor Wadham’s A Farewell to Ice (Penguin, UK; Oxford University Press, USA) a superb tome widely praised as a consummate must-read for a thorough understanding of our increasingly dangerous climate crisis. Still, at the end of the day, the colossal question overhanging all of society vis a vis the climate juggernaut remains: Will society be able to look into the eyes of their children’s children without wincing? My first question: What is the single most serious threat to the planet? A sudden and huge pulse of methane out of the East Siberian Arctic Shelf originating from its extraordinarily shallow waters <50 meters, or a similar burst out of the Laptev Sea, where 53% of the seawater rests on continental shelf averaging depth of <50 meters. Those extraordinarily shallow waters expose vulnerability to global warming over miles upon miles of methane concentration, hydrates as well as free gas, believed to be the world’s largest. The vulnerability relates to methane in sediments capped by layers of permafrost left over from the last Ice Age. The dilemma is: The permafrost cap is rapidly thawing as a result of anomalous retreat of summer sea ice. My follow up question: What will be the impact of a 50Gt pulse? Seriously, though, drilling down deeper yet, it became apparent that methane embedded in frozen deposits in shallow waters north of Siberia is the most underrated and overlooked risk by the scientific community, which prompts many, many hard questions. For starters, how is it possible that so few climate scientists and/or developed nations don’t care or follow the inordinate risks of a deathly methane breakout in the Arctic? The Arctic coastal seas contain 800Gt of methane in sediments, which is prevented from venting to surface by underwater permafrost, which is rapidly thawing because of sea ice loss. Conservatively, the topmost 6%-8%, or approximately 50Gt, is vulnerable to sudden venting within a few years as the protective layer of permafrost thaws, resulting in a rapid increase of 0.6C in planetary temperature. After considering the implications of her findings, Dr. Shakhova throttled back her own original larger estimate of a potential methane (CH4) pulse down to 50Gt even though reality may be much larger. As it happens, her discovery that a pulse could occur “out of the blue” has received the cold-shoulder by mainstream science. According to Dr. Wadhams, more in situ work is desperately needed to determine the stability of the sediments; meaning, whether the threat is less than thought, or if additional thaw will give rise to a pulse far greater than 8% of the 800Gt, which would amount to terminal disaster for the planet. Yes, there are only 5Gt of CH4 in the atmosphere today; a 50Gt burp would be enormously disruptive; moreover, molecule per molecule the immediate impact of CH4 is well over 20xs, depending upon timing up to 100xs, more powerful at inducing global warming than CO2. Which would have an immediate positive impact on global temperatures, cranking up by +0.60C within only two to three years on top of the +0.80C increase post-industrialization from over 200 years ago, or in comparative numbers, a 75% extra temperature boost within a handful of years with potency at least 20xs more powerful at influencing global warming than CO2, which took 200+ years to accomplish. Upon release into the atmosphere, methane bursts prompting excessive heat would damage ecosystems all across the planet and burn off agriculture across latitudes above and below the equator over indeterminate but widespread distances. Grain crop failures would fall like dominoes. In point of fact, the world is 100% dependent upon grains, whether for grain-based foodstuff or meat consumption. All of which brings to mind the summer of 2018 planetary heat wave, setting new standards for global warming. Just imagine the impact of a relatively speedy 75% increase from 0.8C up to 1.4C within the geological equivalent of a snap of the fingers. Along those lines, contemplate the following headline in The Guardian, July 20, 2018: “Crop Failure and Bankruptcy Threaten Farmers as Drought Grips Europe.” In view of that, consider the ramifications of a 75% increase in temps. But beware, notwithstanding that risk of a massive methane burp, another global warming danger haunts the planet and goes deeper than the aforementioned risk of a sudden methane pulse, which incidentally, may or may not happen. Nobody knows for sure. That bigger climate monster overshadows all else: A significant, but obscure, climate sensitivity analysis shows that an “unrealized warming” or latency effect exists within the climate system, which implies the following: If all CO2 emissions stopped cold-turkey today, global temps would still rise by up to 5C over the upcoming decades. Interestingly, even though mainstream science supports the concept of “unrealized warming,” it is not emphasized and of more significance, the magnitude, for example +5C, is a subject of intense debate. It is not part of the Intergovernmental Panel on Climate Change (IPCC) analysis, which only looks at immediate, fast climate response to CO2 increasing and thereby calls for a lid of 2C global warming by 2100, which Dr. Wadhams claims is impossible to achieve under the current IPCC edict. This bigger climate monster or doomsday forecast can only be averted by full-scale deployment of carbon removal from the atmosphere. But first something about the derivation of this ultra gloomy forecast, or the dark side of climate science. That is a “climate sensitivity” issue: (a) “If the planetary system is very sensitive then we are in deep trouble or (b) If it is not very sensitive at all then there really isn’t a problem.” After ten years of research, the answer was found to be (a). Wasdell’s study of climate sensitivity indicates that global warming will heat up way beyond anything suggested by the IPCC even if CO2 emissions came to a halt today. In other words, we’re cooked! The only way out of the jam is via geoengineering as well as removal of CO2 from the atmosphere. Still, geoengineering is mostly a black and white issue amongst the scientific/engineering communities with a sizeable group opposed to tinkering with or creating a Frankenstein climate or something even worse, as unintended consequences often times derive from the best of intentions. Additionally, there is presently no assurance that any geoengineering model will work to scale, or carbon removal, which would likely need to be nearly as large as the originator of CO2 in the first instance or the fossil fuel industry in toto, an enormous infrastructure that took decades to build. Thus, with overwhelming odds working against any easy pathways to a semblance of “Mother Earth back to normal,” what can concerned individuals do to help overcome tough odds, which unfortunately lean in favor of mainstream thought, which ignores the above-mentioned serious aspects of an increasingly wacky climate? As for Dr. Wadhams, aside from speeches around the world, Korea and Japan on the docket, and thought-provoking books/articles, he’s an enthusiastic member of ScientistsWarning.org and encourages the public to join its ranks now. As of December 2017, over 20,000 scientists in 184 nations signed a 2nd Scientists Warning to Humanity. ScientistsWarning.org is an ideal outlet for people that want to get seriously involved on a direct personal basis in helping the worldwide effort to combat global warming and debasement of the biosphere. It’s especially important to generate as much public support as possible for this most important effort directed by Stuart Scott of ClimateMatters.TV fame to show cohesion via strength in numbers. Numbers are meaningful. The planet is counting on you! There is no question that a very large number of people have to move; you cannot live where the water comes over you. I have not heard one suggestion on how we are going to move one hundred million (100,000,000) people out of low-lying areas and what countries would be willing to accept them. This article was posted on Friday, August 10th, 2018 at 12:49pm and is filed under Climate Change, Environment, Fossil Fuel Emissions, Global Warming, Interview, Methane emissions.
"""Kudos GET method tests.""" import json from tests import conftest async def test_get(client): """Test Kudos GET method.""" resp = await client.get('/kudos', headers={'Accept': 'application/json'}) assert resp.status == 200 assert 'application/json' in resp.headers['Content-Type'] text = await resp.text() body = json.loads(text) assert isinstance(body, list) for kudo in body: conftest.kudo_asserts(kudo)
def label_time_image(self, mjd, surveystart=None): if surveystart is None: return 'MJD {:.5f}'.format(mjd) diffs = mjd - surveystart night = np.int(np.floor(diffs)) t = Time(mjd, format='mjd') d = t.to_datetime(timezone=pytz.timezone('US/Eastern')) l = d.astimezone(pytz.timezone('US/Pacific')) label = l.strftime(format='%H:%M:%S') return 'Night: {:05d} Time:'.format(night) + label
package uk.co.gresearch.siembol.response.common; import org.springframework.plugin.core.Plugin; /** * Plugin interface responding evaluator factories provided by the plugin */ public interface ResponsePlugin extends Plugin<String> { /** * Get responding evaluator factories provided by the plugin * * @return RespondingResult with OK status code and respondingEvaluatorFactories in attributes or * ERROR status code with message otherwise */ RespondingResult getRespondingEvaluatorFactories(); @Override default boolean supports(String str) { return true; } }
<reponame>pgrabas/emu6502 #pragma once #include "emu_core/clock.hpp" #include <chrono> #include <cstdint> #include <fmt/format.h> #include <iostream> #include <thread> namespace emu { struct ClockSteadyException : public std::runtime_error { ClockSteadyException(const std::string &msg) : runtime_error(msg) {} }; //Single thread only struct ClockSteady final : public Clock { static constexpr uint64_t kMaxFrequency = 100'000'000llu; static constexpr uint64_t kNanosecondsPerSecond = 1'000'000'000llu; using steady_clock = std::chrono::steady_clock; ClockSteady(uint64_t frequency = k1MhzFrequency, std::ostream *verbose_stream = nullptr) : frequency{frequency}, tick{kNanosecondsPerSecond / frequency} // ,verbose_stream(verbose_stream) { if (frequency > kMaxFrequency) { throw ClockSteadyException("ClockSteady: frequency > kMaxFrequency"); } ClockSteady::Reset(); (void)verbose_stream; } [[nodiscard]] uint64_t CurrentCycle() const override { return current_cycle; } void WaitForNextCycle() override { if (steady_clock::now() > next_cycle) { // if (verbose_stream != nullptr) { // (*verbose_stream) << fmt::format("Lost cycle at {}\n", current_cycle); // } next_cycle += tick; ++lost_cycles; ++current_cycle; return; } ++current_cycle; while (next_cycle > steady_clock::now()) { // busy loop // putting thread to sleep is not precise enough } next_cycle += tick; } void Reset() override { current_cycle = 0; start_time = steady_clock::now(); next_cycle = start_time + tick; } [[nodiscard]] uint64_t LostCycles() const override { return lost_cycles; } [[nodiscard]] uint64_t Frequency() const override { return frequency; } [[nodiscard]] double Time() const override { std::chrono::duration<double> dt = steady_clock::now() - start_time; return dt.count(); }; private: uint64_t current_cycle = 0; const uint64_t frequency; std::chrono::nanoseconds const tick; steady_clock::time_point next_cycle{}; steady_clock::time_point start_time{}; uint64_t lost_cycles = 0; // std::ostream *const verbose_stream; }; } // namespace emu
ELISA study of oocyst-sporozoite transition in malaria vectors. Intrinsic vector characteristics and environmental factors affect the sporogonic development of P. falciparum in Anopheles mosquitoes. We tested for the presence of the circumsporozoite protein, as a marker of the oocyst to sporozoite transition in naturally infected Anopheles gambiae s.l. and Anopheles funestus. Malaria vectors were collected in a village in the Sahel of Niger during the rainy and dry seasons. ELISA-CSP was carried out on abdomen and head/thorax portions from more than 2000 samples. No significant difference was found in the overall rates of infection of An. gambiae s.l. (4.13%) and An. funestus (3.58%). Given the differences in duration of the two parasite stages, P. falciparum CSP antigen prevalence was nearly as high in the abdomen as in the head/thorax, and did not differ significantly between An. gambiae s.l. and An. funestus. These preliminary results suggest that development from oocysts to salivary gland sporozoites is similar in the two vectors. However, these developmental indices varied as a function of the season in which samples were collected, particularly for An. gambiae s.l. This simple method may be useful for field studies assessing the effect of environmental and genetic factors on parasite survival.
#ifndef __AW_LOCAL_NOTIFICATIONS_NOTIFICATION_H__ #define __AW_LOCAL_NOTIFICATIONS_NOTIFICATION_H__ #include <type/aw_string.h> #include <type/aw_buffer.h> #include <rendering/aw_color.h> namespace LocalNotifications { class CNotification { public: explicit CNotification(int id); CNotification& setTitle(const char* title); CNotification& setText(const char* text); CNotification& setSmallIcon(const char* filename); CNotification& setLargeIcon(const char* filename); CNotification& setPriority(int priority); CNotification& setColor(const Rendering::CColor& color); CNotification& setAutoCancel(bool autoCancel); CNotification& setVibration(const Type::CBuffer<long>& pattern); CNotification& setPayload(const char* payload); const char* getTitle() const { return mTitle.get(); } const char* getText() const { return mText.get(); } const char* getSmallIcon() const { return mSmallIcon.get(); } const char* getLargeIcon() const { return mLargeIcon.get(); } int getPriority(int priority) const { return mPriority; } int getColor(const Rendering::CColor& color) const { return mColor.toInt(); } bool getAutoCancel(bool autoCancel) const { return mAutoCancel; } const Type::CBuffer<long>& getVibration() const { return mVibrationPattern; } const char* getPayload() const { return mPayload.get(); } private: int mId; Type::CString mTitle; Type::CString mText; Type::CString mSmallIcon; Type::CString mLargeIcon; int mPriority; Rendering::CColor mColor; Type::CBuffer<long> mVibrationPattern; Type::CString mPayload; bool mAutoCancel; }; } #endif
Effects of wear speeds on friction-wear behaviours of cathode arc ion plated TiAlSiN coatings at high temperatures Abstract A TiAlSiN coating was deposited on AISI H13 hot work mould steel using a cathodic arc ion plating (CAIP). The microstructures, chemical composition and phases of the obtained coatings were analysed using a field emission scanning electronic microscope, energy dispersive spectrometer (EDS) and X-ray diffractometer, respectively. The high temperature friction-wear properties of TiAlSiN coating at the different wear speeds were investigated, and the wear mechanism was also discussed. The results show that the N of the TiAlSiN coating is not completely released at 800 °C, the diffraction peak of TiN still exists in this coating. In addition, the products of SiO2 and Al2O3 play a role of self-lubricating and wear resistance. The average coefficient of friction (COF) of the coatings at the wear speeds of 400, 600, and 800 r/min is 0.15, 0.22, and 0.17, respectively. The wear mechanism of TiAlSiN coating at 800 °C is primarily adhesive wear, accompanied by oxidation wear and abrasive wear.
<reponame>KaranSampath/snake_game /** * CIS 120 Game HW * (c) University of Pennsylvania * @version 2.1, Apr 2017 */ import java.awt.*; /** * A basic object representing a snakeunit starting in the upper left corner of the game court. * It is displayed as a square of a specified color. */ public class Snakeunit extends GameObj { public static final int SIZE = 7; public static final int INIT_VEL_X = 0; public static final int INIT_VEL_Y = 0; private Color color; public Snakeunit(int posx, int posy, int courtWidth, int courtHeight, Color color) { super(INIT_VEL_X, INIT_VEL_Y, posx, posy, SIZE, SIZE, courtWidth, courtHeight); this.color = color; } @Override public void draw(Graphics g) { g.setColor(this.color); g.fillRect(this.getPx(), this.getPy(), this.getWidth(), this.getHeight()); } }
#include <stdio.h> int main() { double n; int n100,n50,n20,n10,n5,n2,n1,n050,n025,n010,n005,n001,p; scanf("%lf",&n); p = n * 100; n100 = p/10000; p = p%10000; n50 = p/5000; p = p%5000; n20 = p/2000; p = p%2000; n10 = p/1000; p = p%1000; n5 = p/500; p = p%500; n2 = p/200; p = p%200; n1 = p/100; p = p%100; n050 = p/50; p = p%50; n025 = p/25; p = p%25; n010 = p/10; p = p%10; n005 = p/5; p = p%5; n001 = p/1; p = p%1; printf("NOTAS:\n"); printf("%d nota(s) de R$ 100.00\n",n100); printf("%d nota(s) de R$ 50.00\n",n50); printf("%d nota(s) de R$ 20.00\n",n20); printf("%d nota(s) de R$ 10.00\n",n10); printf("%d nota(s) de R$ 5.00\n",n5); printf("%d nota(s) de R$ 2.00\n",n2); printf("MOEDAS:\n"); printf("%d moeda(s) de R$ 1.00\n",n1); printf("%d moeda(s) de R$ 0.50\n",n050); printf("%d moeda(s) de R$ 0.25\n",n025); printf("%d moeda(s) de R$ 0.10\n",n010); printf("%d moeda(s) de R$ 0.05\n",n005); printf("%d moeda(s) de R$ 0.01\n",n001); return 0; }
#!/usr/bin/env python # -*- coding: utf-8 -*- # Common Python library imports import copy # Pip package imports import pytest from marshmallow.exceptions import ValidationError # Internal package imports from backend.farm.serializers import FarmSerializer, FarmListSerializer from .. import get_input_data VALID_INPUT_DATA = [ ({'title': 'Farm 1', 'regionId': lambda c: c.id}), ({'title': 'Farm 12313', 'regionId': lambda c: c.id}), ({'title': 'Farm #$!"1', 'regionId': lambda c: c.id}), ] INVALID_INPUT_DATA = [ ({'title': 'Farm 11', 'regionId': None}, 'Field may not be null.', 'regionId'), ({'title': 'Farm 11', 'regionId': 999}, 'ID 999 does not exist.', 'regionId'), ({'title': None, 'regionId': lambda c: c.id}, 'Field may not be null.', 'title'), ] VALID_INPUT_DATA_LIST = [ [({'title': 'Farm 1', 'regionId': lambda c: c.id}), ({'title': 'Farm 12313', 'regionId': lambda c: c.id})], [({'title': 'Farm #$!"1', 'regionId': lambda c: c.id})], ] INVALID_INPUT_DATA_LIST = [ ([{'title': 'Farm 11', 'regionId': None}], 'Field may not be null.', 'regionId'), ([{'title': 'Farm 11', 'regionId': 999}], 'ID 999 does not exist.', 'regionId'), ([{'title': None, 'regionId': lambda c: c.id}], 'Field may not be null.', 'title'), ] class TestFarmSerializer: @pytest.mark.parametrize("input", VALID_INPUT_DATA) def test_valid_inputs(self, input, region_1): serializer = FarmSerializer() serializer.load(copy.deepcopy(get_input_data(input, region_1))) @pytest.mark.parametrize("input,msg,field", INVALID_INPUT_DATA) def test_invalid_inputs(self, input, msg, field, region_1): serializer = FarmSerializer() with pytest.raises(ValidationError) as v: serializer.load(copy.deepcopy(get_input_data(input, region_1))) assert msg in v.value.args[0][field] @pytest.mark.parametrize("input", VALID_INPUT_DATA) def test_valid_serialize_deserialize(self, input, region_1): serializer = FarmSerializer() result = serializer.load(copy.deepcopy(get_input_data(input, region_1))) result = serializer.dump(result) assert result['title'] == input['title'] class TestFarmListSerializer: @pytest.mark.parametrize("input", VALID_INPUT_DATA_LIST) def test_valid_inputs(self, input, region_1): serializer = FarmListSerializer() serializer.load(copy.deepcopy(get_input_data(input, region_1)), many=True) @pytest.mark.parametrize("input,msg,field", INVALID_INPUT_DATA_LIST) def test_invalid_inputs(self, input, msg, field, region_1): serializer = FarmListSerializer() with pytest.raises(ValidationError) as v: serializer.load(copy.deepcopy(get_input_data(input, region_1)), many=True) @pytest.mark.skip(reason="Updateting the ID of country is not working here.") @pytest.mark.parametrize("input", VALID_INPUT_DATA_LIST) def test_valid_serialize_deserialize(self, input, region_1): serializer = FarmListSerializer() result = serializer.load(copy.deepcopy(get_input_data(input, region_1)), many=True) result = serializer.dump(result) for r, i in zip(result, input): assert r['title'] == i['title']
Women's health issues and nuclear medicine, Part I: Women and heart disease. This is the first article of a four-part series on women's health issues and nuclear medicine. This article will review women and heart disease. After reading this article the technologist will be able to: (a) compare and contrast the differences in diagnosing coronary artery disease between men and women; (b) explain the importance of radionuclide myocardial perfusion imaging in diagnosing and stratifying risk of coronary artery disease in women; and (c) list and explain the technical challenges of imaging women's hearts.
/** * All Tinbo plugins must implement this interface. * This interface pre defines the id used for all plugins. * <p> * Declaring your implementations of this interface into a * META-INF/services/io.gitlab.arturbosch.tinbo.plugins.TiNBoPlugin * file is needed to get your plugins loaded at startup. * * @author Artur Bosch */ public abstract class TinboPlugin { static final String UNSPECIFIED = "unspecified"; public String name() { return getClass().getSimpleName(); } public String version() { return UNSPECIFIED; } @Nullable public TinboMode providesMode() { return null; } public abstract List<Command> registerCommands(TinboContext tinbo); @Override public String toString() { return name(); } }
//RequireAdminRole checks if user is admin func RequireAdminRole(ctx *gin.Context) { if ctx.GetHeader(textproto.CanonicalMIMEHeaderKey(headers.UserRoleXHeader)) != "admin" { gonic.Gonic(mterrors.ErrAdminRequired(), ctx) return } }
<filename>packages/renderer/src/samples/preload-module.ts console.log('fs', window.fs) console.log('ipcRenderer', window.ipcRenderer) // Usage of ipcRenderer.on window.ipcRenderer.on('main-process-message', (_event, ...args) => { console.log('[Receive Main-process message]:', ...args) }) export default {}
<gh_stars>1-10 #include <assert.h> #include <iostream> #include <fstream> #include <Misc.hpp> #include <RTM.hpp> #include <RTMGrid.hpp> #include <RTMGPU.hpp> #include <RTMGPUPlatform.hpp> #ifdef RTM_ACC_GPU #include <cuda.h> #include <cuda_runtime.h> #include <rtmgpu.hpp> #endif void RTMGPUPlatform::destroyRTMPlatform() { #ifdef RTM_ACC_GPU CUDACHECK(cudaDeviceReset()); #endif } void RTMGPUPlatform::initRTMPlatform() { #ifdef RTM_ACC_GPU RTM_PRINT("Initializing CUDA GPU Platform...", rtmParam->verbose); CUDACHECK(cudaGetDeviceCount(&nDevices)); deviceID=pLimits->lRank%nDevices; // each process uses a different GPU when // multiple devices are available CUDACHECK(cudaSetDevice(deviceID)); CUDACHECK(cudaDeviceReset()); CUDACHECK(cudaGetDeviceProperties(&deviceProperties,deviceID)); // printf(">>> [P%d] GPU Properties: \n", pLimits->pRank); // printf(">>> [P%d] nDevices= %d \n", pLimits->pRank, nDevices); // printf(">>> [P%d] devId = %d (lRank=%d) \n", pLimits->pRank, deviceID, pLimits->pRank); //printf(">>> \t\t + name : %s \n", deviceProperties.name); //printf(">>> \t\t + totalGlobalMem : %d GB \n",deviceProperties.totalGlobalMem/1000000000); //printf(">>> \t\t + sharedMemPerBlock : %d \n", deviceProperties.sharedMemPerBlock); //printf(">>> \t\t + regsPerBlock : %d \n", deviceProperties.regsPerBlock); //printf(">>> \t\t + warpSize : %d \n", deviceProperties.warpSize); //printf(">>> \t\t + memPitch : %d \n", deviceProperties.memPitch); //printf(">>> \t\t + maxThreadsPerBlock : %d \n", deviceProperties.maxThreadsPerBlock); //printf(">>> \t\t + maxThreadsDim : (%d,%d,%d) \n", deviceProperties.maxThreadsDim[0], deviceProperties.maxThreadsDim[1], deviceProperties.maxThreadsDim[2]); //printf(">>> \t\t + maxGridSize : (%d,%d,%d) \n", deviceProperties.maxGridSize[0], deviceProperties.maxGridSize[1], deviceProperties.maxGridSize[2]); //printf(">>> \t\t + totalConstMem : %d \n", deviceProperties.totalConstMem); //printf(">>> \t\t + major : %d \n", deviceProperties.major); //printf(">>> \t\t + minor : %d \n", deviceProperties.minor); //printf(">>> \t\t + clockRate : %d \n", deviceProperties.clockRate); //printf(">>> \t\t + deviceOverlap : %d \n", deviceProperties.deviceOverlap); //printf(">>> \t\t + multiProcessorCount : %d \n", deviceProperties.multiProcessorCount); //printf(">>> \t\t + kernelExecTimeoutEnabled: %d \n", deviceProperties.kernelExecTimeoutEnabled); //printf(">>> \t\t + integrated : %d \n", deviceProperties.integrated); //printf(">>> \t\t + canMapHostMemory : %d \n", deviceProperties.canMapHostMemory); //printf(">>> \t\t + computeMode : %d \n", deviceProperties.computeMode); #endif } void RTMGPUPlatform::rtmStepMultipleWave(RTMCube<RTMData_t, RTMDevPtr_t> *P0Grid, RTMCube<RTMData_t, RTMDevPtr_t> *PP0Grid, RTMCube<RTMData_t, RTMDevPtr_t> *P1Grid, RTMCube<RTMData_t, RTMDevPtr_t> *PP1Grid, RTMStencil<RTMData_t,RTMDevPtr_t> *stencil, const RTMVelocityModel<RTMData_t,RTMDevPtr_t> &v2dt2Grid) { grtmStepMultiWave(P0Grid->getDevPtr(), PP0Grid->getDevPtr(),P1Grid->getDevPtr(), PP1Grid->getDevPtr(), stencil->getDevPtr(), v2dt2Grid.getDevPtr()); } void RTMGPUPlatform::rtmUpdateFreqContributions(int it, int iw, int lw, RTMCube<RTMData_t, RTMDevPtr_t> *PSGrid, RTMCube<RTMData_t, RTMDevPtr_t> *PRGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PSReGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PSImGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PRReGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PRImGrid, RTMPlane<RTMData_t,RTMDevPtr_t> * kernelRe, RTMPlane<RTMData_t,RTMDevPtr_t> * kernelIm) { grtmUpdateFreqContribution((uint64_t) it, (uint64_t) iw, (uint64_t)lw, PSReGrid->getStartX(), PSReGrid->getEndX(), PSReGrid->getStartY(), PSReGrid->getEndY(), PSReGrid->getStartZ(), PSReGrid->getEndZ(), kernelRe->getDevPtr(), kernelIm->getDevPtr(), PSGrid->getDevPtr(), PRGrid->getDevPtr(), PSReGrid->getDevPtr(), PSImGrid->getDevPtr(), PRReGrid->getDevPtr(), PRImGrid->getDevPtr()); } void RTMGPUPlatform::rtmFreqDomainImageCondition(int iw, int lw, RTMVector<RTMData_t,RTMDevPtr_t> * w2List, RTMCube<RTMData_t, RTMDevPtr_t> *imgGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PSReGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PSImGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PRReGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *PRImGrid) { grtmFreqImgCondition((uint64_t)iw, (uint64_t)lw, PSReGrid->getStartX(), PSReGrid->getEndX(), PSReGrid->getStartY(), PSReGrid->getEndY(), PSReGrid->getStartZ(), PSReGrid->getEndZ(), w2List->getDevPtr(),imgGrid->getDevPtr(), PSReGrid->getDevPtr(), PSImGrid->getDevPtr(), PRReGrid->getDevPtr(), PRImGrid->getDevPtr()); } void RTMGPUPlatform::rtmApplySource(RTMCube<RTMData_t, RTMDevPtr_t> * PGrid, RTMSeismicSource<RTMData_t,RTMDevPtr_t> *srcGrid, uint32_t it) { /* source position does not consider the extended grid */ int sxe = srcGrid->getX() + rtmParam->blen; int sye = srcGrid->getY() + rtmParam->blen; int sze = srcGrid->getZ() + rtmParam->blen; RTMData_t srcVal = (*srcGrid)[it]; grtmSource(sxe, sye, sze, srcVal, PGrid->getDevPtr()); } void RTMGPUPlatform::rtmRestoreReceiverData(RTMCube<RTMData_t, RTMDevPtr_t> * PPGrid, RTMReceiverGrid<RTMData_t,RTMDevPtr_t> *rcvGrid, uint32_t it) { grtmSeism(PPGrid->getDevPtr(), rcvGrid->getDevPtr(), rcvGrid->getNZ(), it, false); } void RTMGPUPlatform::rtmSaveReceiverData(RTMCube<RTMData_t, RTMDevPtr_t> * PPGrid, RTMReceiverGrid<RTMData_t,RTMDevPtr_t> *rcvGrid, uint32_t it) { grtmSeism(PPGrid->getDevPtr(), rcvGrid->getDevPtr(), rcvGrid->getNZ(), it, true); } void RTMGPUPlatform::rtmSaveUpperBorder(RTMCube<RTMData_t, RTMDevPtr_t> *PGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *upbGrid,uint32_t it) { grtmUPB(it, PGrid->getDevPtr(), upbGrid->getDevPtr(), true); } void RTMGPUPlatform::rtmRestoreUpperBorder(RTMCube<RTMData_t, RTMDevPtr_t> *PGrid, RTMGridCollection<RTMData_t,RTMDevPtr_t> *upbGrid,uint32_t it) { grtmUPB(it, PGrid->getDevPtr(), upbGrid->getDevPtr(), false); } void RTMGPUPlatform::rtmStep(RTMCube<RTMData_t, RTMDevPtr_t> *PGrid, RTMCube<RTMData_t, RTMDevPtr_t> *PPGrid, RTMStencil<RTMData_t,RTMDevPtr_t> *stencil, const RTMVelocityModel<RTMData_t,RTMDevPtr_t> &v2dt2Grid) { grtmStep(PGrid->getDevPtr(), PPGrid->getDevPtr(), stencil->getDevPtr(), v2dt2Grid.getDevPtr()); } /** * Cross-correlation image condition * OBS: IMG, PS and PR grids must have the same dimensions * */ void RTMGPUPlatform::rtmImageCondition(RTMCube<RTMData_t, RTMDevPtr_t> *imgGrid, RTMCube<RTMData_t, RTMDevPtr_t> *PSGrid, RTMCube<RTMData_t, RTMDevPtr_t> *PRGrid) { int ix, iy, iz; int nx = imgGrid->getNX(); int ny = imgGrid->getNY(); int nz = imgGrid->getNZ(); assert((imgGrid->getNX() == PSGrid->getNX()) && (imgGrid->getNX() == PRGrid->getNX())); assert((imgGrid->getNY() == PSGrid->getNY()) && (imgGrid->getNY() == PRGrid->getNY())); assert((imgGrid->getNZ() == PSGrid->getNZ()) && (imgGrid->getNZ() == PRGrid->getNZ())); grtmImgCondition(imgGrid->getDevPtr(), PSGrid->getDevPtr(), PRGrid->getDevPtr()); } void RTMGPUPlatform::rtmTaperAllBorders(RTMCube<RTMData_t, RTMDevPtr_t> *rtmGrid, RTMTaperFunction<RTMData_t,RTMDevPtr_t> *rtmTaper) { grtmTaperBorders(rtmGrid->getDevPtr(), rtmTaper->getDevPtr(), false); } void RTMGPUPlatform::rtmTaperUpperBorders(RTMCube<RTMData_t, RTMDevPtr_t> *rtmGrid, RTMTaperFunction<RTMData_t,RTMDevPtr_t> *rtmTaper) { grtmTaperBorders(rtmGrid->getDevPtr(), rtmTaper->getDevPtr(), true); } void RTMGPUPlatform::grtmStep(RTMDevPtr_t * devP, RTMDevPtr_t * devPP, RTMDevPtr_t * devCoefs, RTMDevPtr_t * devV2DT2) { #ifdef RTM_ACC_GPU uint32_t gridx; uint32_t gridy; uint32_t gridz; bool rtm2D = rtmParam->nx==1; if (rtm2D){ gridx = CUDANGRIDS(plen_y, SMALL_BLOCK_SIZE); gridy = CUDANGRIDS(plen_z, SMALL_BLOCK_SIZE); gridz = 1; }else{ gridx = CUDANGRIDS(plen_x, SMALL_BLOCK_SIZE); gridy = CUDANGRIDS(plen_y, SMALL_BLOCK_SIZE); gridz = CUDANGRIDS(plen_z, SMALL_BLOCK_SIZE); } dim3 dimGrid(gridx, gridy, gridz); dim3 dimBlock(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE); // printf(">> PLEN_X=%d; GridNXE = %d \n", plen_x, gridx); // printf(">> PLEN_Y=%d; GridNYE = %d \n", plen_y, gridy); // printf(">> PLEN_Z=%d; GridNZE = %d \n", plen_z, gridz); RTMSTEP_WRAPPER(dimGrid, dimBlock, st_order, plen_x, plen_y, plen_z, rtmParam->blen, devP, devPP, devV2DT2, devCoefs, rtm2D); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif } void RTMGPUPlatform::grtmStepMultiWave( RTMDevPtr_t * devP0, RTMDevPtr_t * devPP0, RTMDevPtr_t * devP1, RTMDevPtr_t * devPP1, RTMDevPtr_t * devCoefs, RTMDevPtr_t * devV2DT2) { #ifdef RTM_ACC_GPU uint32_t gridx = CUDANGRIDS(plen_x, SMALL_BLOCK_SIZE); uint32_t gridy = CUDANGRIDS(plen_y, SMALL_BLOCK_SIZE); uint32_t gridz = CUDANGRIDS(plen_z, SMALL_BLOCK_SIZE); bool rtm2D = rtmParam->nx==1; dim3 dimGrid(gridx, gridy, gridz); dim3 dimBlock(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE); // printf(">> PLEN_X=%d; GridNXE = %d \n", plen_x, gridx); // printf(">> PLEN_Y=%d; GridNYE = %d \n", plen_y, gridy); // printf(">> PLEN_Z=%d; GridNZE = %d \n", plen_z, gridz); RTMSTEP_MULTIWAVE_WRAPPER(dimGrid, dimBlock, st_order, plen_x, plen_y, plen_z, devP0, devPP0, devP1, devPP1, devV2DT2, devCoefs, rtm2D); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif } void RTMGPUPlatform::grtmSeism(RTMDevPtr_t * devPPR, RTMDevPtr_t * devSeism, int _nt, int _it, bool modeling){ #ifdef RTM_ACC_GPU uint32_t rcvOffsetX = rtmParam->receiver_start_x; uint32_t rcvDistX = rtmParam->receiver_distance_x; uint32_t rcvCountX = rtmParam->receiver_count_x; uint32_t rcvOffsetY = rtmParam->receiver_start_y; uint32_t rcvDistY = rtmParam->receiver_distance_y; uint32_t rcvCountY = rtmParam->receiver_count_y; uint32_t rcvDepthZ = rtmParam->receiver_depth_z + rtmParam->blen; uint32_t pStartX = pLimits->processArea.xStart; uint32_t pEndX = pLimits->processArea.xEnd; uint32_t pStartY = pLimits->processArea.yStart; uint32_t pEndY = pLimits->processArea.yEnd; uint32_t blen = rtmParam->blen; uint32_t nze = rtmParam->nz + 2*rtmParam->blen; uint32_t nt = _nt; uint32_t it = _it; uint32_t gNXE = CUDANGRIDS(rtmParam->receiver_count_x, DEFAULT_BLOCK_SIZE); uint32_t gNYE = CUDANGRIDS(rtmParam->receiver_count_y, DEFAULT_BLOCK_SIZE); dim3 dimGrid(gNXE, gNYE, 1); dim3 dimBlock(DEFAULT_BLOCK_SIZE, DEFAULT_BLOCK_SIZE, 1); RTMSEISM_WRAPPER(dimGrid, dimBlock, rcvOffsetX, rcvDistX, rcvCountX, rcvOffsetY, rcvDistY, rcvCountY, rcvDepthZ, pStartX, pEndX, pStartY, pEndY, blen, nze, nt, it, modeling, devSeism, devPPR); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif } void RTMGPUPlatform::grtmSource(uint32_t sx, uint32_t sy, uint32_t sz, RTMData_t eval, RTMDevPtr_t * devPP) { #ifdef RTM_ACC_GPU dim3 dimGrid(1, 1, 1); dim3 dimBlock(1, 1, 1); uint32_t pStartX = pLimits->processArea.xStart; uint32_t pEndX = pLimits->processArea.xEnd; uint32_t pStartY = pLimits->processArea.yStart; uint32_t pEndY = pLimits->processArea.yEnd; uint32_t nze = rtmParam->nz + 2*rtmParam->blen; if (sx >= pStartX && sx < pEndX){ if (sy >= pStartY && sy < pEndY){ RTMSOURCE_WRAPPER(dimGrid, dimBlock, sx, sy, sz, eval, pStartX, pEndX, pStartY, pEndY, nze,devPP); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); } } #endif } void RTMGPUPlatform::grtmTaperBorders( RTMDevPtr_t * P, RTMDevPtr_t * TAPER, bool upperBorderOnly) { #ifdef RTM_ACC_GPU uint32_t gridx = CUDANGRIDS(plen_x, SMALL_BLOCK_SIZE); uint32_t gridy = CUDANGRIDS(plen_y, SMALL_BLOCK_SIZE); uint32_t gridz; if(upperBorderOnly){ gridz = CUDANGRIDS(rtmParam->blen, SMALL_BLOCK_SIZE); }else{ gridz = CUDANGRIDS(plen_z, SMALL_BLOCK_SIZE); } dim3 dimGrid(gridx, gridy, gridz); dim3 dimBlock(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE); uint32_t pStartX = pLimits->processArea.xStart; uint32_t pEndX = pLimits->processArea.xEnd; uint32_t pStartY = pLimits->processArea.yStart; uint32_t pEndY = pLimits->processArea.yEnd; uint32_t nxe = rtmParam->nx + 2*rtmParam->blen; uint32_t nye = rtmParam->ny + 2*rtmParam->blen; uint32_t nze = rtmParam->nz + 2*rtmParam->blen; uint32_t blen = rtmParam->blen; RTMTAPER_WRAPPER(dimGrid, dimBlock, pStartX, pEndX,pStartY, pEndY, nxe, nye, nze, blen, TAPER, P, upperBorderOnly); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif } void RTMGPUPlatform::grtmSwapPtr(RTMDevPtr_t ** devA, RTMDevPtr_t ** devB) { #ifdef RTM_ACC_GPU // RTMDevPtr_t * DEV_SWAP_PTR = *devA; // *devA = *devB; // *devB = DEV_SWAP_PTR; #endif } void RTMGPUPlatform::grtmImgCondition(RTMDevPtr_t * IMG, RTMDevPtr_t * PS, RTMDevPtr_t * PR) { #ifdef RTM_ACC_GPU uint32_t gridx = CUDANGRIDS(plen_x, SMALL_BLOCK_SIZE); uint32_t gridy = CUDANGRIDS(plen_y, SMALL_BLOCK_SIZE); uint32_t gridz = CUDANGRIDS(plen_z, SMALL_BLOCK_SIZE); dim3 dimGrid(gridx, gridy, gridz); dim3 dimBlock(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE); RTMIMG_WRAPPER(dimGrid, dimBlock, plen_x, plen_y, plen_z, IMG, PS, PR); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif } void RTMGPUPlatform::grtmUPB(uint32_t it, RTMDevPtr_t * devPP, RTMDevPtr_t * devUPB, bool rw){ #ifdef RTM_ACC_GPU uint32_t nze = rtmParam->nz + 2*rtmParam->blen; uint32_t gridx = CUDANGRIDS(plen_x, DEFAULT_BLOCK_SIZE); uint32_t gridy = CUDANGRIDS(plen_y, DEFAULT_BLOCK_SIZE); dim3 dimGrid(gridx, gridy, 1); dim3 dimBlock(DEFAULT_BLOCK_SIZE, DEFAULT_BLOCK_SIZE, 1); RTMWUPB_WRAPPER(dimGrid, dimBlock, rtmParam->stencil_order, plen_x, plen_y, nze, rtmParam->blen, it, rw, devPP,devUPB); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif } void RTMGPUPlatform::grtmFreqImgCondition(uint64_t iw, uint64_t lw, uint64_t iStartX, uint64_t iEndX, uint64_t iStartY, uint64_t iEndY, uint64_t iStartZ, uint64_t iEndZ, RTMDevPtr_t * w2List,RTMDevPtr_t * IMG, RTMDevPtr_t * PSRe, RTMDevPtr_t * PSIm, RTMDevPtr_t * PRRe, RTMDevPtr_t * PRIm){ #ifdef RTM_ACC_GPU uint64_t nxe = rtmParam->nx + 2*rtmParam->blen; uint64_t nye = rtmParam->ny + 2*rtmParam->blen; uint64_t nze = rtmParam->nz + 2*rtmParam->blen; uint64_t blen = rtmParam->blen; if(iStartX >= rtmParam->nx || iStartY >= rtmParam->ny || iStartZ>=rtmParam->nz){ return; // invalid area } // this is necessary to avoid border regions uint32_t iNX = iEndX - iStartX; uint32_t iNY = iEndY - iStartY; uint32_t iNZ = iEndZ - iStartZ; uint32_t gridx = CUDANGRIDS(iNX, SMALL_BLOCK_SIZE); uint32_t gridy = CUDANGRIDS(iNY, SMALL_BLOCK_SIZE); uint32_t gridz = CUDANGRIDS(iNZ, SMALL_BLOCK_SIZE); // printf("P[%d]: gridx = %d gridy=%d gridz=%d inY=%d\n", pLimits->pRank, gridx, gridy, gridz, iNY); dim3 dimGrid(gridx, gridy, gridz); dim3 dimBlock(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE); RTM_FREQIMG_WRAPPER(dimGrid, dimBlock, iw,lw, iStartX, iEndX, iStartY, iEndY, iStartZ, iEndZ, nxe, nye, nze, blen, w2List, IMG, PSRe, PSIm, PRRe, PRIm); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif } void RTMGPUPlatform::grtmUpdateFreqContribution(uint64_t it, uint64_t iw, uint64_t lw, uint64_t iStartX, uint64_t iEndX, uint64_t iStartY, uint64_t iEndY, uint64_t iStartZ, uint64_t iEndZ, RTMDevPtr_t * kernelRe, RTMDevPtr_t * kernelIm, RTMDevPtr_t * PS, RTMDevPtr_t * PR, RTMDevPtr_t * PSRe, RTMDevPtr_t * PSIm, RTMDevPtr_t * PRRe, RTMDevPtr_t * PRIm){ #ifdef RTM_ACC_GPU uint64_t nxe = rtmParam->nx + 2*rtmParam->blen; uint64_t nye = rtmParam->ny + 2*rtmParam->blen; uint64_t nze = rtmParam->nz + 2*rtmParam->blen; uint64_t blen = rtmParam->blen; if(iStartX >= rtmParam->nx || iStartY >= rtmParam->ny || iStartZ>=rtmParam->nz){ return; // invalid area } // this is necessary to avoid border regions uint32_t iNX = iEndX - iStartX; uint32_t iNY = iEndY - iStartY; uint32_t iNZ = iEndZ - iStartZ; uint32_t gridx = CUDANGRIDS(iNX, SMALL_BLOCK_SIZE); uint32_t gridy = CUDANGRIDS(iNY, SMALL_BLOCK_SIZE); uint32_t gridz = CUDANGRIDS(iNZ, SMALL_BLOCK_SIZE); dim3 dimGrid(gridx, gridy, gridz); dim3 dimBlock(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE); // printf(" pNX = %d pNY = %d pNZ=%d \n pStartX = %d pEndX = %d \n pStartY = %d pEndY = %d \n", // iNX, iNY, iNZ, iStartX, iEndX, iStartY, iEndY); RTM_UPDATEFREQ_WRAPPER(dimGrid, dimBlock, it, iw,lw, rtmParam->nt, iStartX, iEndX, iStartY, iEndY, iStartZ, iEndZ, nxe, nye, nze, blen, kernelRe, kernelIm, PS, PR, PSRe, PSIm, PRRe, PRIm); CUDACHECK( cudaPeekAtLastError() ); CUDACHECK( cudaDeviceSynchronize() ); #endif }
Learning-by-doing as an approach to teaching social entrepreneurship Many studies have explored the use of learning-by-doing in higher education, but few have applied this to social entrepreneurship contexts and applications: this paper addresses this gap in the literature. Our programme involved students working with different stakeholders in an interactive learning environment to generate real revenue for social enterprises. Our results show that learning-by-doing enables students to develop their entrepreneurial skills and enhance their knowledge of social businesses. The findings also show that students became more effective at working in teams and in formulating and applying appropriate business strategies for the social enterprises. Overall, the learning-by-doing approach discussed in this paper is capable of developing the entrepreneurial skills of students, but there are challenges that need to be addressed if such an approach is to be effective.