file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | {
None => Err(From::from("expected 1 argument, but got none")),
Some(file_path) => Ok(file_path),
}
}
fn read_csv_file3() {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.delimiter(b';')
.double_quote(false)
.flexible(true)
.comment(Some(b'#'))
.from_reader(io::stdin());
// setting可能。<柔軟
}
type Record = (String, String, Option<u64>, f64, f64);
fn read_csv_file4() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record3 = result?;
println!("{:?}", record);
}
Ok(())
}
type Record2 = HashMap<String, String>;
fn read_csv_file5() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record2 = result?;
println!("{:?}", record);
}
Ok(())
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct Record3 {
latitude: f64,
longitude: f64,
// error時に、自動的にNoneにしてくれるオプション
#[serde(deserialize_with = "csv::invalid_option")]
population: Option<f64>,
city: String,
state: String,
}
fn write_csv() -> Result<(), Box<dyn Error>> {
// let mut wtr = csv::Writer::from_writer(io::stdout());
let mut wtr = csv::WriterBuilder::new()
.delimiter(b'\t')
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(io::stdout());
// AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。
wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?;
wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?;
wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?;
wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?;
wtr.flush()?;
Ok(())
}
// borrowされた&strを、ownedなString型で置き換えるということは、
// レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。
// これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
struct WriteRecord<'a> {
city: &'a str,
state: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
fn write_csv2() -> Result<(), Box<dyn Error>> {
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.serialize(WriteRecord {
city: "Davidsons Landing",
state: "AK",
population: None,
latitude: 65.2419444,
longitude: -165.2716667,
})?;
wtr.serialize(WriteRecord {
city: "Kenai",
state: "AK",
population: Some(7610),
latitude: 60.5544444,
longitude: -151.2583333,
})?;
wtr.serialize(WriteRecord {
city: "Oakman",
state: "AL",
population: None,
latitude: 33.7133333,
longitude: -87.3886111,
})?;
wtr.flush()?;
Ok(())
}
fn read_and_write_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args_os().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.headers()?)?;
for result in rdr.records() {
let record = result?;
if record.iter().any(|r| r == &argss) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// utf-8に変換できない場合の対処法。
// byteで読み込む!!!
fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.byte_headers()?)?;
for result in rdr.byte_records() {
let record = result?;
// argss.as_bytes() 戻りが、参照なのね。
if record.iter().any(|r| r == argss.as_bytes()) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// 前回の例と違い、デシリアライズとシリアライズ両方をderiveする
// これは型から自動的にデシリアライズとシリアライズを行えるということである
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
struct RecordMulti {
city: String,
state: String,
population: Option<u64>,
latitude: f64,
}
fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> {
// クエリとなる固定引数を受け取る
// もし引数が与えられないか整数でない場合はエラーを返す
let minimum_pop: u64 = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(arg) => arg.parse::<u64>()?,
};
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
for result in rdr.deserialize() {
let record: RecordMulti = result?;
if record.population.map_or(false, |f| f >= minimum_pop) {
wtr.serialize(&record)?;
}
}
wtr.flush()?;
Ok(())
}
// ./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total
fn performance_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.records() {
let record = result?;
if &record[0] == "us" && &record[3] == "MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 1.69s user 0.05s system 34% | cpu 5.094 total
// String からbyteで処理をするように変更した。
fn performance2_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.byte_records() {
let record = result?;
if &record[0] == b"us" && &record[3] == b"MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 0.44s user 0.04s system 22% cpu 2.142 total
// reader.record()は、イテレータをどんどん返す(アロケートしながら)
// だから、1回だけにして、アロケーションの回数を減らす。
fn performance3_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin()); | identifier_body |
|
main.rs | Err(e) => return Err(From::from(e)),
Ok(r) => println!("{:?}", r),
}
}
Ok(())
}
fn run_question() -> Result<(), Box<dyn Error>> {
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
// ?を使うことで可読性が上がる!
let a = result?;
println!("{:?}", a);
}
Ok(())
}
fn read_csv_file() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let file = File::open(file_path)?;
let mut rdr = csv::Reader::from_reader(file);
// ここでヘッダーを読み込みたいとする。
// ① clone()する。
// ただし、メモリにコピーをとる代償が伴う。
// let headers = rdr.headers()?.clone();
{
// lifetimeのために、この呼び出しはそれ所有スコープでネストされている。
// ② スコープをネストさせる。
// 所有権が奪われて、以降のイテレーションができなくなる。
// <なるほど。逆にこういうテクニックがあるということか。
let headers = rdr.headers()?;
println!("{:?}", headers);
}
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn read_csv_file2() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn get_file_path() -> Result<OsString, Box<dyn Error>> {
match env::args_os().nth(1) {
None => Err(From::from("expected 1 argument, but got none")),
Some(file_path) => Ok(file_path),
}
}
fn read_csv_file3() {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.delimiter(b';')
.double_quote(false)
.flexible(true)
.comment(Some(b'#'))
.from_reader(io::stdin());
// setting可能。<柔軟
}
type Record = (String, String, Option<u64>, f64, f64);
fn read_csv_file4() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record3 = result?;
println!("{:?}", record);
}
Ok(())
}
type Record2 = HashMap<String, String>;
fn read_csv_file5() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record2 = result?;
println!("{:?}", record);
}
Ok(())
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct Record3 {
latitude: f64,
longitude: f64,
// error時に、自動的にNoneにしてくれるオプション
#[serde(deserialize_with = "csv::invalid_option")]
population: Option<f64>,
city: String,
state: String,
}
fn write_csv() -> Result<(), Box<dyn Error>> {
// let mut wtr = csv::Writer::from_writer(io::stdout());
let mut wtr = csv::WriterBuilder::new()
.delimiter(b'\t')
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(io::stdout());
// AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。
wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?;
wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?;
wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?;
wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?;
wtr.flush()?;
Ok(())
}
// borrowされた&strを、ownedなString型で置き換えるということは、
// レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。
// これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
struct WriteRecord<'a> {
city: &'a str,
state: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
fn write_csv2() -> Result<(), Box<dyn Error>> {
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.serialize(WriteRecord {
city: "Davidsons Landing",
state: "AK",
population: None,
latitude: 65.2419444,
longitude: -165.2716667,
})?;
wtr.serialize(WriteRecord {
city: "Kenai",
state: "AK",
population: Some(7610),
latitude: 60.5544444,
longitude: -151.2583333,
})?;
wtr.serialize(WriteRecord {
city: "Oakman",
state: "AL",
population: None,
latitude: 33.7133333,
longitude: -87.3886111,
})?;
wtr.flush()?;
Ok(())
}
fn read_and_write_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args_os().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.headers()?)?;
for result in rdr.records() {
let record = result?;
if record.iter().any(|r| r == &argss) {
wtr.write_record(&record);
} | }
// utf-8に変換できない場合の対処法。
// byteで読み込む!!!
fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.byte_headers()?)?;
for result in rdr.byte_records() {
let record = result?;
// argss.as_bytes() 戻りが、参照なのね。
if record.iter().any(|r| r == argss.as_bytes()) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// 前回の例と違い、デシリアライズとシリアライズ両方をderiveする
// これは型から自動的にデシリアライズとシリアライズを行えるということである
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
struct RecordMulti {
city: String,
state: String,
population: Option<u64>,
latitude: f64,
}
fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> {
// クエリとなる固定引数を受け取る
// もし引数が与えられないか整数でない場合はエラーを返す
let minimum_pop: u64 = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(arg) => arg.parse::<u64>()?,
};
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
for result in rdr.deserialize() {
let record: RecordMulti = result?;
if record.population.map_or | }
wtr.flush()?;
Ok(()) | random_line_split |
main.rs | (file_path)?;
let mut rdr = csv::Reader::from_reader(file);
// ここでヘッダーを読み込みたいとする。
// ① clone()する。
// ただし、メモリにコピーをとる代償が伴う。
// let headers = rdr.headers()?.clone();
{
// lifetimeのために、この呼び出しはそれ所有スコープでネストされている。
// ② スコープをネストさせる。
// 所有権が奪われて、以降のイテレーションができなくなる。
// <なるほど。逆にこういうテクニックがあるということか。
let headers = rdr.headers()?;
println!("{:?}", headers);
}
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn read_csv_file2() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn get_file_path() -> Result<OsString, Box<dyn Error>> {
match env::args_os().nth(1) {
None => Err(From::from("expected 1 argument, but got none")),
Some(file_path) => Ok(file_path),
}
}
fn read_csv_file3() {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.delimiter(b';')
.double_quote(false)
.flexible(true)
.comment(Some(b'#'))
.from_reader(io::stdin());
// setting可能。<柔軟
}
type Record = (String, String, Option<u64>, f64, f64);
fn read_csv_file4() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record3 = result?;
println!("{:?}", record);
}
Ok(())
}
type Record2 = HashMap<String, String>;
fn read_csv_file5() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record2 = result?;
println!("{:?}", record);
}
Ok(())
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct Record3 {
latitude: f64,
longitude: f64,
// error時に、自動的にNoneにしてくれるオプション
#[serde(deserialize_with = "csv::invalid_option")]
population: Option<f64>,
city: String,
state: String,
}
fn write_csv() -> Result<(), Box<dyn Error>> {
// let mut wtr = csv::Writer::from_writer(io::stdout());
let mut wtr = csv::WriterBuilder::new()
.delimiter(b'\t')
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(io::stdout());
// AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。
wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?;
wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?;
wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?;
wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?;
wtr.flush()?;
Ok(())
}
// borrowされた&strを、ownedなString型で置き換えるということは、
// レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。
// これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
struct WriteRecord<'a> {
city: &'a str,
state: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
fn write_csv2() -> Result<(), Box<dyn Error>> {
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.serialize(WriteRecord {
city: "Davidsons Landing",
state: "AK",
population: None,
latitude: 65.2419444,
longitude: -165.2716667,
})?;
wtr.serialize(WriteRecord {
city: "Kenai",
state: "AK",
population: Some(7610),
latitude: 60.5544444,
longitude: -151.2583333,
})?;
wtr.serialize(WriteRecord {
city: "Oakman",
state: "AL",
population: None,
latitude: 33.7133333,
longitude: -87.3886111,
})?;
wtr.flush()?;
Ok(())
}
fn read_and_write_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args_os().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.headers()?)?;
for result in rdr.records() {
let record = result?;
if record.iter().any(|r| r == &argss) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// utf-8に変換できない場合の対処法。
// byteで読み込む!!!
fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.byte_headers()?)?;
for result in rdr.byte_records() {
let record = result?;
// argss.as_bytes() 戻りが、参照なのね。
if record.iter().any(|r| r == argss.as_bytes()) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// 前回の例と違い、デシリアライズとシリアライズ両方をderiveする
// これは型から自動的にデシリアライズとシリアライズを行えるということである
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
struct RecordMulti {
city: String,
state: String,
population: Option<u64>,
latitude: f64,
}
fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> {
// クエリとなる固定引数を受け取る
// もし引数が与えられないか整数でない場合はエラーを返す
let minimum_pop: u64 = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(arg) => arg.parse::<u64>()?,
};
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
for result in rdr.deserialize() {
let record: RecordMulti = result?;
if record.population.map_or(false, |f| f >= minimum_pop) {
wtr.serialize(&record)?;
}
}
wtr.flush()?;
Ok(())
}
// ./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total
fn performance_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.records() {
let record = result?;
if &rec | ord[0] == " | identifier_name |
|
__init__.py | print(f'pyatlas_config={config}')
return config
def get_digest(self):
return HTTPDigestAuth(self.public_key,self.private_key)
# API
def organizations(self):
""" Return a list of organzations available to current api user.
"""
return self.get('{}/orgs'.format(ApiVersion.A1.value))
def projects(self):
""" Alias for groups()
"""
return self.groups()
def groups(self):
""" Return list of groups for this public key.
"""
#return self.get('{}/groups'.format(ApiVersion.A1.value))
return self.get('{}/groups'.format(ApiVersion.CM1.value))
def users(self,org_id=None):
""" Return list of users for this organization.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))
## Database Users ##
def database_users(self,project_id=''):
""" GET /api/atlas/v1.0/groups/{GROUP-ID}/databaseUsers
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers')
def create_project(self
,name
,org_id=None):
if org_id is None:
org_id = self.org_id
project_data = { "name" : name }
if org_id:
project_data["orgId"] = org_id
pprint.pprint(project_data)
project = self.post(f'{ApiVersion.CM1.value}/groups'
,body=project_data)
pprint.pprint(project)
print(f'>>>>> {project.keys()}')
self.project_id = project['content']['id']
self.org_id = project['content']['orgId']
return project
def delete_project(self,name):
project = self.project_by_name(project_name=name)
group_id = project['content']['id']
logger.info(f'>>>>>>> delete_project name={name} group_id={group_id}')
response = self.delete(f'{ApiVersion.CM1.value}/groups/{group_id}')
return response
## TODO - need to expose this in the AutomaticKeyMachine api --
def new_project_apikey(self,ip_address=None,project_id=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
if project_id is None:
if self.project_id is None:
raise "No project_id found. Pass or set when creating client"
project_id=self.project_id
apikey = self.create_project_apikey(project_id=project_id)
whitelist = self.add_project_apikey_whitelist(apikey_id=apikey.id
,ip_address=ip_address
,project_id=project_id)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_project_apikey(self
,project_id
,description='pyatlas generated project apikey'
,roles='PROJECT_OWNER'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
| data = [ { 'ipAddress' : ip_address, 'comment' : 'pyatlas generated whitelist entry' } ]
pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))
def cluster_ready(self,cluster_name,project_id=os.environ.get("ATLAS_PROJECT")):
""" | def add_whitelist_atlas_project(self, ip_address=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip | random_line_split |
__init__.py |
def get_digest(self):
return HTTPDigestAuth(self.public_key,self.private_key)
# API
def organizations(self):
""" Return a list of organzations available to current api user.
"""
return self.get('{}/orgs'.format(ApiVersion.A1.value))
def projects(self):
""" Alias for groups()
"""
return self.groups()
def groups(self):
""" Return list of groups for this public key.
"""
#return self.get('{}/groups'.format(ApiVersion.A1.value))
return self.get('{}/groups'.format(ApiVersion.CM1.value))
def users(self,org_id=None):
""" Return list of users for this organization.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))
## Database Users ##
def database_users(self,project_id=''):
""" GET /api/atlas/v1.0/groups/{GROUP-ID}/databaseUsers
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers')
def create_project(self
,name
,org_id=None):
if org_id is None:
org_id = self.org_id
project_data = { "name" : name }
if org_id:
project_data["orgId"] = org_id
pprint.pprint(project_data)
project = self.post(f'{ApiVersion.CM1.value}/groups'
,body=project_data)
pprint.pprint(project)
print(f'>>>>> {project.keys()}')
self.project_id = project['content']['id']
self.org_id = project['content']['orgId']
return project
def delete_project(self,name):
project = self.project_by_name(project_name=name)
group_id = project['content']['id']
logger.info(f'>>>>>>> delete_project name={name} group_id={group_id}')
response = self.delete(f'{ApiVersion.CM1.value}/groups/{group_id}')
return response
## TODO - need to expose this in the AutomaticKeyMachine api --
def new_project_apikey(self,ip_address=None,project_id=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
if project_id is None:
if self.project_id is None:
raise "No project_id found. Pass or set when creating client"
project_id=self.project_id
apikey = self.create_project_apikey(project_id=project_id)
whitelist = self.add_project_apikey_whitelist(apikey_id=apikey.id
,ip_address=ip_address
,project_id=project_id)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_project_apikey(self
,project_id
,description='pyatlas generated project apikey'
,roles='PROJECT_OWNER'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_whitelist_atlas_project(self, ip_address=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
data = [ { 'ipAddress' : ip_address, 'comment' : 'pyatlas generated whitelist entry' } ]
pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
| config = {
Env.PUBLIC_KEY : self.public_key,
Env.PRIVATE_KEY : self.private_key,
Env.ORG_ID : self.org_id,
Env.PROJECT_ID : self.project_id
}
print(f'pyatlas_config={config}')
return config | identifier_body |
|
__init__.py | project_id != '' else self.__project_id
return self.get(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers')
def create_project(self
,name
,org_id=None):
if org_id is None:
org_id = self.org_id
project_data = { "name" : name }
if org_id:
project_data["orgId"] = org_id
pprint.pprint(project_data)
project = self.post(f'{ApiVersion.CM1.value}/groups'
,body=project_data)
pprint.pprint(project)
print(f'>>>>> {project.keys()}')
self.project_id = project['content']['id']
self.org_id = project['content']['orgId']
return project
def delete_project(self,name):
project = self.project_by_name(project_name=name)
group_id = project['content']['id']
logger.info(f'>>>>>>> delete_project name={name} group_id={group_id}')
response = self.delete(f'{ApiVersion.CM1.value}/groups/{group_id}')
return response
## TODO - need to expose this in the AutomaticKeyMachine api --
def new_project_apikey(self,ip_address=None,project_id=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
if project_id is None:
if self.project_id is None:
raise "No project_id found. Pass or set when creating client"
project_id=self.project_id
apikey = self.create_project_apikey(project_id=project_id)
whitelist = self.add_project_apikey_whitelist(apikey_id=apikey.id
,ip_address=ip_address
,project_id=project_id)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_project_apikey(self
,project_id
,description='pyatlas generated project apikey'
,roles='PROJECT_OWNER'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_whitelist_atlas_project(self, ip_address=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
data = [ { 'ipAddress' : ip_address, 'comment' : 'pyatlas generated whitelist entry' } ]
pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))
def cluster_ready(self,cluster_name,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return True if and only if cluster stateName is `IDLE`.
"""
cluster = self.cluster(project_id,cluster_name)
pprint.pprint(cluster)
return cluster['stateName'] == 'IDLE'
def create_cluster(self, provision_details, project_id=""):
""" Create a cluster.
The provision_details should be instanace of the
AtlasCluster CRD
"""
response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'
,body=provision_details)
return response
def delete_cluster(self, cluster_name, project_id ="" ):
""" Delete the cluster.
"""
response = self.delete(f'{ApiVersion.A1.value}/groups/{project_id}/clusters/{cluster_name}')
return response
## Fidicuary APIs ##
def invoices(self,org_id=None,invoice_id=''):
""" Return all invoices or a particular invoice.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))
def | pending_invoice | identifier_name |
|
__init__.py | pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))
def cluster_ready(self,cluster_name,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return True if and only if cluster stateName is `IDLE`.
"""
cluster = self.cluster(project_id,cluster_name)
pprint.pprint(cluster)
return cluster['stateName'] == 'IDLE'
def create_cluster(self, provision_details, project_id=""):
""" Create a cluster.
The provision_details should be instanace of the
AtlasCluster CRD
"""
response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'
,body=provision_details)
return response
def delete_cluster(self, cluster_name, project_id ="" ):
""" Delete the cluster.
"""
response = self.delete(f'{ApiVersion.A1.value}/groups/{project_id}/clusters/{cluster_name}')
return response
## Fidicuary APIs ##
def invoices(self,org_id=None,invoice_id=''):
""" Return all invoices or a particular invoice.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))
def pending_invoice(self,org_id=None):
""" Return the pending invoice for this organization id.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/pending'.format(ApiVersion.A1.value,org_id))
def invoice_items(self,org_id=None,query={}):
""" Return the line items posted for the
given _date from the appropriate invoice.
"""
if org_id is None:
org_id = self.org_id
query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')
# Given a 'query_end_date' to find the invoice containing the
# line items for that date we need to find the invoice which
# has 'endDate' equal to the end of the month of the `query_end_date`
query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))
target_invoices = []
invoices = self.invoices(org_id)
if self.verbose:
print('Searching invoices org_id={}'.format(org_id))
print('query={} query_end_date={}'.format(query,query_end_date))
print('Result keys: {}'.format( invoices['content'].keys() ))
print('Total result count: {}'.format( invoices['content']['totalCount'] ))
for invoice in invoices['content']['results']:
#pprint.pprint(invoice)
end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if self.verbose:
print('invoice({})[\'endDate\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))
if end_date == query_first_next_month:
target_invoices.append(invoice)
if self.verbose:
print('Target invoices: {}'.format(target_invoices))
target_line_items = []
for invoice in target_invoices:
invoice_details = self.invoices(org_id,invoice['id'])
print('invoice_details: {}'.format(invoice_details))
for item in invoice_details['content']['lineItems']:
end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if end_date == query_end_date:
target_line_items.append(item)
if self.verbose:
print('target_line_items: {}'.format(target_line_items))
return target_line_items
def summarize_invoice(line_items):
""" Return the sum total for a given list of invoice items.
"""
sku_summary = AtlasClient.summarize_invoice_items_by_sku(line_items)
total = 0
for item in sku_summary:
total += sku_summary[item]['totalPriceCents']
return total
def summarize_invoice_items_by_sku(line_items):
""" Return a dict summary of line items by SKU.
"""
sku_summary = {}
for item in line_items:
if item['sku'] not in sku_summary:
| sku_summary[item['sku']]= { 'totalPriceCents' : 0 } | conditional_block |
|
planning.rs | Target;
use context::CrateContext;
use context::WorkspaceContext;
use settings::RazeSettings;
use settings::GenMode;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::str;
use util;
pub struct PlannedBuild {
pub workspace_context: WorkspaceContext,
pub crate_contexts: Vec<CrateContext>,
}
pub struct BuildPlanner<'a> {
settings: RazeSettings,
cargo_config: &'a Config,
platform_attrs: Vec<Cfg>,
registry: Option<SourceId>,
}
impl <'a> BuildPlanner<'a> {
pub fn new(settings: RazeSettings,
cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> {
Ok(BuildPlanner {
platform_attrs: try!(util::fetch_attrs(&settings.target)),
cargo_config: cargo_config,
registry: None,
settings: settings,
})
}
pub fn | (&mut self, host: String) -> CargoResult<()> {
match host.to_url().map(|url| SourceId::for_registry(&url)) {
Ok(registry_id) => {
self.registry = Some(registry_id);
Ok(())
},
Err(value) => Err(CargoError::from(value))
}
}
pub fn plan_build(&self) -> CargoResult<PlannedBuild> {
let ResolvedPlan {root_name, packages, resolve} =
try!(ResolvedPlan::resolve_from_files(&self.cargo_config));
let root_package_id = try!(resolve.iter()
.filter(|dep| dep.name() == root_name)
.next()
.ok_or(CargoError::from("root crate should be in cargo resolve")));
let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>();
let mut crate_contexts = Vec::new();
let source_id = match self.registry.clone() {
Some(v) => v,
None => try!(SourceId::crates_io(&self.cargo_config)),
};
for id in try!(find_all_package_ids(source_id, &resolve)) {
let package = packages.get(&id).unwrap().clone();
let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>();
features.sort();
let full_name = format!("{}-{}", id.name(), id.version());
let path = format!("./vendor/{}-{}/", id.name(), id.version());
// Verify that package is really vendored
if self.settings.genmode == GenMode::Vendored {
try!(fs::metadata(&path).map_err(|_| {
CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path))
}));
}
// Identify all possible dependencies
let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } =
PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs);
build_deps.sort();
dev_deps.sort();
normal_deps.sort();
let mut targets = try!(identify_targets(&full_name, &package));
targets.sort();
let possible_crate_settings =
self.settings.crates
.get(id.name())
.and_then(|c| c.get(&id.version().to_string()));
let should_gen_buildrs =
possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false);
let build_script_target = if should_gen_buildrs {
targets.iter().find(|t| t.kind.deref() == "custom-build").cloned()
} else {
None
};
let targets_sans_build_script =
targets.into_iter().filter(|t| t.kind.deref() != "custom-build").collect::<Vec<_>>();
let additional_deps =
possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new());
let additional_flags =
possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new());
let extra_aliased_targets =
possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new());
// Skip generated dependencies explicitly designated to be skipped (potentially due to
// being replaced or customized as part of additional_deps)
let non_skipped_normal_deps = if let Some(s) = possible_crate_settings {
normal_deps.into_iter()
.filter(|d| !s.skipped_deps.contains(&format!("{}-{}", d.name, d.version)))
.collect::<Vec<_>>()
} else {
normal_deps
};
crate_contexts.push(CrateContext {
pkg_name: id.name().to_owned(),
pkg_version: id.version().to_string(),
features: features,
is_root_dependency: root_direct_deps.contains(&id),
metadeps: Vec::new() /* TODO(acmcarther) */,
dependencies: non_skipped_normal_deps,
build_dependencies: build_deps,
dev_dependencies: dev_deps,
path: path,
build_script_target: build_script_target,
targets: targets_sans_build_script,
platform_triple: self.settings.target.to_owned(),
additional_deps: additional_deps,
additional_flags: additional_flags,
extra_aliased_targets: extra_aliased_targets,
})
}
let workspace_context = WorkspaceContext {
workspace_path: self.settings.workspace_path.clone(),
platform_triple: self.settings.target.clone(),
gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(),
};
crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version));
Ok(PlannedBuild{
workspace_context: workspace_context,
crate_contexts: crate_contexts
})
}
}
/** The set of all included dependencies for Cargo's dependency categories. */
pub struct PlannedDeps {
pub build_deps: Vec<BuildDependency>,
pub dev_deps: Vec<BuildDependency>,
pub normal_deps: Vec<BuildDependency>,
}
impl PlannedDeps {
/**
* Identifies the full set of cargo dependencies for the provided package id using cargo's
* resolution details.
*/
pub fn find_all_deps(id: &PackageId,
package: &CargoPackage,
resolve: &Resolve,
platform_triple: &str,
platform_attrs: &Vec<Cfg>) -> PlannedDeps {
let platform_deps = package
.dependencies()
.iter()
.filter(|dep| {
dep.platform()
.map(|p| p.matches(&platform_triple, Some(&platform_attrs)))
.unwrap_or(true)
})
.cloned()
.collect::<Vec<Dependency>>();
let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build);
let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development);
let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal);
let resolved_deps = resolve.deps(&id).into_iter()
.map(|dep| BuildDependency {
name: dep.name().to_owned(),
version: dep.version().to_string(),
})
.collect::<Vec<BuildDependency>>();
PlannedDeps {
normal_deps:
resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(),
build_deps:
resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(),
dev_deps:
resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(),
}
}
}
/** A synthesized Cargo dependency resolution. */
pub struct ResolvedPlan<'a> {
pub root_name: String,
pub packages: PackageSet<'a>,
pub resolve: Resolve,
}
impl<'a> ResolvedPlan<'a> {
/**
* Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and
* the resolution graph.
*/
pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> {
let lockfile = Path::new("Cargo.lock");
let manifest_path = lockfile.parent().unwrap().join("Cargo.toml");
let manifest = env::current_dir().unwrap().join(&manifest_path);
let ws = try!(Workspace::new(&manifest, cargo_config));
let specs = Packages::All.into_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name().to_owned();
let (packages, resolve) = ops::resolve_ws_precisely(
&ws,
None,
&[],
false,
false,
&specs)?;
Ok(ResolvedPlan {
root_name: root_name,
packages: packages,
resolve: resolve,
})
}
}
/** Enumerates the set of all possibly relevant packages for the Cargo dependencies */
fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> {
try!(fs::metadata("Cargo.lock").map_err(|_| {
CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.")
}));
let mut package_ids = resolve.iter()
.filter(|id| *id.source_id() == registry_id)
.cloned()
.collect::<Vec<_>>();
package_ids.sort_by_key(|id| id.name().to_owned());
| set_registry_from_url | identifier_name |
planning.rs | Target;
use context::CrateContext;
use context::WorkspaceContext;
use settings::RazeSettings;
use settings::GenMode;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::str;
use util;
pub struct PlannedBuild {
pub workspace_context: WorkspaceContext,
pub crate_contexts: Vec<CrateContext>,
}
pub struct BuildPlanner<'a> {
settings: RazeSettings,
cargo_config: &'a Config,
platform_attrs: Vec<Cfg>,
registry: Option<SourceId>,
}
impl <'a> BuildPlanner<'a> {
pub fn new(settings: RazeSettings,
cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> {
Ok(BuildPlanner {
platform_attrs: try!(util::fetch_attrs(&settings.target)),
cargo_config: cargo_config,
registry: None,
settings: settings,
})
}
pub fn set_registry_from_url(&mut self, host: String) -> CargoResult<()> {
match host.to_url().map(|url| SourceId::for_registry(&url)) {
Ok(registry_id) => {
self.registry = Some(registry_id);
Ok(())
},
Err(value) => Err(CargoError::from(value))
}
}
pub fn plan_build(&self) -> CargoResult<PlannedBuild> {
let ResolvedPlan {root_name, packages, resolve} =
try!(ResolvedPlan::resolve_from_files(&self.cargo_config));
let root_package_id = try!(resolve.iter()
.filter(|dep| dep.name() == root_name)
.next()
.ok_or(CargoError::from("root crate should be in cargo resolve")));
let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>();
let mut crate_contexts = Vec::new();
let source_id = match self.registry.clone() {
Some(v) => v,
None => try!(SourceId::crates_io(&self.cargo_config)),
};
for id in try!(find_all_package_ids(source_id, &resolve)) {
let package = packages.get(&id).unwrap().clone();
let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>();
features.sort();
let full_name = format!("{}-{}", id.name(), id.version());
let path = format!("./vendor/{}-{}/", id.name(), id.version());
// Verify that package is really vendored
if self.settings.genmode == GenMode::Vendored {
try!(fs::metadata(&path).map_err(|_| {
CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path))
}));
}
// Identify all possible dependencies
let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } =
PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs);
build_deps.sort();
dev_deps.sort();
normal_deps.sort();
let mut targets = try!(identify_targets(&full_name, &package));
targets.sort();
let possible_crate_settings =
self.settings.crates
.get(id.name())
.and_then(|c| c.get(&id.version().to_string()));
let should_gen_buildrs =
possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false);
let build_script_target = if should_gen_buildrs {
targets.iter().find(|t| t.kind.deref() == "custom-build").cloned()
} else {
None
};
let targets_sans_build_script =
targets.into_iter().filter(|t| t.kind.deref() != "custom-build").collect::<Vec<_>>();
let additional_deps =
possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new());
let additional_flags =
possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new());
let extra_aliased_targets =
possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new());
// Skip generated dependencies explicitly designated to be skipped (potentially due to
// being replaced or customized as part of additional_deps)
let non_skipped_normal_deps = if let Some(s) = possible_crate_settings | else {
normal_deps
};
crate_contexts.push(CrateContext {
pkg_name: id.name().to_owned(),
pkg_version: id.version().to_string(),
features: features,
is_root_dependency: root_direct_deps.contains(&id),
metadeps: Vec::new() /* TODO(acmcarther) */,
dependencies: non_skipped_normal_deps,
build_dependencies: build_deps,
dev_dependencies: dev_deps,
path: path,
build_script_target: build_script_target,
targets: targets_sans_build_script,
platform_triple: self.settings.target.to_owned(),
additional_deps: additional_deps,
additional_flags: additional_flags,
extra_aliased_targets: extra_aliased_targets,
})
}
let workspace_context = WorkspaceContext {
workspace_path: self.settings.workspace_path.clone(),
platform_triple: self.settings.target.clone(),
gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(),
};
crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version));
Ok(PlannedBuild{
workspace_context: workspace_context,
crate_contexts: crate_contexts
})
}
}
/** The set of all included dependencies for Cargo's dependency categories. */
pub struct PlannedDeps {
pub build_deps: Vec<BuildDependency>,
pub dev_deps: Vec<BuildDependency>,
pub normal_deps: Vec<BuildDependency>,
}
impl PlannedDeps {
/**
* Identifies the full set of cargo dependencies for the provided package id using cargo's
* resolution details.
*/
pub fn find_all_deps(id: &PackageId,
package: &CargoPackage,
resolve: &Resolve,
platform_triple: &str,
platform_attrs: &Vec<Cfg>) -> PlannedDeps {
let platform_deps = package
.dependencies()
.iter()
.filter(|dep| {
dep.platform()
.map(|p| p.matches(&platform_triple, Some(&platform_attrs)))
.unwrap_or(true)
})
.cloned()
.collect::<Vec<Dependency>>();
let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build);
let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development);
let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal);
let resolved_deps = resolve.deps(&id).into_iter()
.map(|dep| BuildDependency {
name: dep.name().to_owned(),
version: dep.version().to_string(),
})
.collect::<Vec<BuildDependency>>();
PlannedDeps {
normal_deps:
resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(),
build_deps:
resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(),
dev_deps:
resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(),
}
}
}
/** A synthesized Cargo dependency resolution. */
pub struct ResolvedPlan<'a> {
pub root_name: String,
pub packages: PackageSet<'a>,
pub resolve: Resolve,
}
impl<'a> ResolvedPlan<'a> {
/**
* Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and
* the resolution graph.
*/
pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> {
let lockfile = Path::new("Cargo.lock");
let manifest_path = lockfile.parent().unwrap().join("Cargo.toml");
let manifest = env::current_dir().unwrap().join(&manifest_path);
let ws = try!(Workspace::new(&manifest, cargo_config));
let specs = Packages::All.into_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name().to_owned();
let (packages, resolve) = ops::resolve_ws_precisely(
&ws,
None,
&[],
false,
false,
&specs)?;
Ok(ResolvedPlan {
root_name: root_name,
packages: packages,
resolve: resolve,
})
}
}
/** Enumerates the set of all possibly relevant packages for the Cargo dependencies */
fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> {
try!(fs::metadata("Cargo.lock").map_err(|_| {
CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.")
}));
let mut package_ids = resolve.iter()
.filter(|id| *id.source_id() == registry_id)
.cloned()
.collect::<Vec<_>>();
package_ids.sort_by_key(|id| id.name().to_owned());
| {
normal_deps.into_iter()
.filter(|d| !s.skipped_deps.contains(&format!("{}-{}", d.name, d.version)))
.collect::<Vec<_>>()
} | conditional_block |
planning.rs | use cargo::ops::Packages;
use cargo::ops;
use cargo::util::CargoResult;
use cargo::util::Cfg;
use cargo::util::Config;
use cargo::util::ToUrl;
use context::BuildDependency;
use context::BuildTarget;
use context::CrateContext;
use context::WorkspaceContext;
use settings::RazeSettings;
use settings::GenMode;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::str;
use util;
pub struct PlannedBuild {
pub workspace_context: WorkspaceContext,
pub crate_contexts: Vec<CrateContext>,
}
pub struct BuildPlanner<'a> {
settings: RazeSettings,
cargo_config: &'a Config,
platform_attrs: Vec<Cfg>,
registry: Option<SourceId>,
}
impl <'a> BuildPlanner<'a> {
pub fn new(settings: RazeSettings,
cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> {
Ok(BuildPlanner {
platform_attrs: try!(util::fetch_attrs(&settings.target)),
cargo_config: cargo_config,
registry: None,
settings: settings,
})
}
pub fn set_registry_from_url(&mut self, host: String) -> CargoResult<()> {
match host.to_url().map(|url| SourceId::for_registry(&url)) {
Ok(registry_id) => {
self.registry = Some(registry_id);
Ok(())
},
Err(value) => Err(CargoError::from(value))
}
}
pub fn plan_build(&self) -> CargoResult<PlannedBuild> {
let ResolvedPlan {root_name, packages, resolve} =
try!(ResolvedPlan::resolve_from_files(&self.cargo_config));
let root_package_id = try!(resolve.iter()
.filter(|dep| dep.name() == root_name)
.next()
.ok_or(CargoError::from("root crate should be in cargo resolve")));
let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>();
let mut crate_contexts = Vec::new();
let source_id = match self.registry.clone() {
Some(v) => v,
None => try!(SourceId::crates_io(&self.cargo_config)),
};
for id in try!(find_all_package_ids(source_id, &resolve)) {
let package = packages.get(&id).unwrap().clone();
let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>();
features.sort();
let full_name = format!("{}-{}", id.name(), id.version());
let path = format!("./vendor/{}-{}/", id.name(), id.version());
// Verify that package is really vendored
if self.settings.genmode == GenMode::Vendored {
try!(fs::metadata(&path).map_err(|_| {
CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path))
}));
}
// Identify all possible dependencies
let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } =
PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs);
build_deps.sort();
dev_deps.sort();
normal_deps.sort();
let mut targets = try!(identify_targets(&full_name, &package));
targets.sort();
let possible_crate_settings =
self.settings.crates
.get(id.name())
.and_then(|c| c.get(&id.version().to_string()));
let should_gen_buildrs =
possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false);
let build_script_target = if should_gen_buildrs {
targets.iter().find(|t| t.kind.deref() == "custom-build").cloned()
} else {
None
};
let targets_sans_build_script =
targets.into_iter().filter(|t| t.kind.deref() != "custom-build").collect::<Vec<_>>();
let additional_deps =
possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new());
let additional_flags =
possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new());
let extra_aliased_targets =
possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new());
// Skip generated dependencies explicitly designated to be skipped (potentially due to
// being replaced or customized as part of additional_deps)
let non_skipped_normal_deps = if let Some(s) = possible_crate_settings {
normal_deps.into_iter()
.filter(|d| !s.skipped_deps.contains(&format!("{}-{}", d.name, d.version)))
.collect::<Vec<_>>()
} else {
normal_deps
};
crate_contexts.push(CrateContext {
pkg_name: id.name().to_owned(),
pkg_version: id.version().to_string(),
features: features,
is_root_dependency: root_direct_deps.contains(&id),
metadeps: Vec::new() /* TODO(acmcarther) */,
dependencies: non_skipped_normal_deps,
build_dependencies: build_deps,
dev_dependencies: dev_deps,
path: path,
build_script_target: build_script_target,
targets: targets_sans_build_script,
platform_triple: self.settings.target.to_owned(),
additional_deps: additional_deps,
additional_flags: additional_flags,
extra_aliased_targets: extra_aliased_targets,
})
}
let workspace_context = WorkspaceContext {
workspace_path: self.settings.workspace_path.clone(),
platform_triple: self.settings.target.clone(),
gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(),
};
crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version));
Ok(PlannedBuild{
workspace_context: workspace_context,
crate_contexts: crate_contexts
})
}
}
/** The set of all included dependencies for Cargo's dependency categories. */
pub struct PlannedDeps {
pub build_deps: Vec<BuildDependency>,
pub dev_deps: Vec<BuildDependency>,
pub normal_deps: Vec<BuildDependency>,
}
impl PlannedDeps {
/**
* Identifies the full set of cargo dependencies for the provided package id using cargo's
* resolution details.
*/
pub fn find_all_deps(id: &PackageId,
package: &CargoPackage,
resolve: &Resolve,
platform_triple: &str,
platform_attrs: &Vec<Cfg>) -> PlannedDeps {
let platform_deps = package
.dependencies()
.iter()
.filter(|dep| {
dep.platform()
.map(|p| p.matches(&platform_triple, Some(&platform_attrs)))
.unwrap_or(true)
})
.cloned()
.collect::<Vec<Dependency>>();
let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build);
let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development);
let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal);
let resolved_deps = resolve.deps(&id).into_iter()
.map(|dep| BuildDependency {
name: dep.name().to_owned(),
version: dep.version().to_string(),
})
.collect::<Vec<BuildDependency>>();
PlannedDeps {
normal_deps:
resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(),
build_deps:
resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(),
dev_deps:
resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(),
}
}
}
/** A synthesized Cargo dependency resolution. */
pub struct ResolvedPlan<'a> {
pub root_name: String,
pub packages: PackageSet<'a>,
pub resolve: Resolve,
}
impl<'a> ResolvedPlan<'a> {
/**
* Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and
* the resolution graph.
*/
pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> {
let lockfile = Path::new("Cargo.lock");
let manifest_path = lockfile.parent().unwrap().join("Cargo.toml");
let manifest = env::current_dir().unwrap().join(&manifest_path);
let ws = try!(Workspace::new(&manifest, cargo_config));
let specs = Packages::All.into_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name().to_owned();
let (packages, resolve) = ops::resolve_ws_precisely(
&ws,
None,
&[],
false,
false,
&specs)?;
Ok(ResolvedPlan {
root_name: root_name,
packages: packages,
resolve: resolve,
})
}
}
/** Enumerates the set of all possibly relevant packages for the Cargo dependencies */
fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> {
try!(fs::metadata("Cargo.lock | use cargo::core::Resolve;
use cargo::core::SourceId;
use cargo::core::Workspace;
use cargo::core::dependency::Kind; | random_line_split |
|
main.rs | continue;
}
println!("The value of n is {}", n);
if n > 100 {
break;
}
}
// for loop
println!("-------for loop");
for i in 1..10 {
println!("The number is {}", i);
}
let range = 10..20;
for i in range {
println!("element in range {}", i);
}
let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"];
for name in family_name.iter() {
println!("Family person is {}", name);
}
for (index, name) in family_name.iter().enumerate() {
println!("Family people {} is {}", index+1, name);
}
for name in family_name { // in this way we cannot use family_name next time
println!("name is {}", name);
}
// enum
println!("-------enum");
enum Direction {
Up,
Down,
Left,
Right
}
let player_direction1:Direction = Direction::Up;
let player_direction2:Direction = Direction::Down;
let player_direction3:Direction = Direction::Left;
let player_direction4:Direction = Direction::Right;
match player_direction1 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction2 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction3 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) {
println!("{} is even", i);
} else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn | (c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 {
return (self.width + self.height) * 2;
}
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match Operator | print_color2 | identifier_name |
main.rs | continue;
}
println!("The value of n is {}", n);
if n > 100 {
break;
}
}
// for loop
println!("-------for loop");
for i in 1..10 {
println!("The number is {}", i);
}
let range = 10..20;
for i in range {
println!("element in range {}", i);
}
let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"];
for name in family_name.iter() {
println!("Family person is {}", name);
}
for (index, name) in family_name.iter().enumerate() {
println!("Family people {} is {}", index+1, name);
}
for name in family_name { // in this way we cannot use family_name next time
println!("name is {}", name);
}
// enum
println!("-------enum");
enum Direction {
Up,
Down,
Left,
Right
}
let player_direction1:Direction = Direction::Up;
let player_direction2:Direction = Direction::Down;
let player_direction3:Direction = Direction::Left;
let player_direction4:Direction = Direction::Right;
match player_direction1 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction2 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction3 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) | else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn print_color2(c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 {
return (self.width + self.height) * 2;
}
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match Operator | {
println!("{} is even", i);
} | conditional_block |
main.rs | continue;
}
println!("The value of n is {}", n);
if n > 100 {
break;
}
}
// for loop
println!("-------for loop");
for i in 1..10 {
println!("The number is {}", i);
}
let range = 10..20;
for i in range {
println!("element in range {}", i);
}
let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"];
for name in family_name.iter() {
println!("Family person is {}", name);
}
for (index, name) in family_name.iter().enumerate() {
println!("Family people {} is {}", index+1, name);
}
for name in family_name { // in this way we cannot use family_name next time
println!("name is {}", name);
}
// enum
println!("-------enum");
enum Direction {
Up,
Down,
Left,
Right
}
let player_direction1:Direction = Direction::Up;
let player_direction2:Direction = Direction::Down;
let player_direction3:Direction = Direction::Left;
let player_direction4:Direction = Direction::Right;
match player_direction1 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction2 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction3 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) {
println!("{} is even", i);
} else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn print_color2(c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 |
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match | {
return (self.width + self.height) * 2;
} | identifier_body |
main.rs | We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) {
println!("{} is even", i);
} else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn print_color2(c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 {
return (self.width + self.height) * 2;
}
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match Operator");
let number = 11;
match number {
1 => println!("It is one!"), // case 1
2 => println!("it is two!"), // case 2
3 | 4 => println!("it is three or four!"), // case 3 | 4
5..=10 => println!("it is between 5 to 10"), // case 5 to 10
_ => println!("it is out of the range!"), // default
}
// read input from console
println!("-------read input from console");
use std::io;
let mut input = String::new();
println!("Hey mate! Say something:");
match io::stdin().read_line(&mut input) {
Ok(_) => {
println!("Success! You said: {}", input.to_ascii_uppercase());
},
Err(e) => println!("Oops! SOmething went wrong: {}", e)
}
// Hashmap
println!("-------Hashmap");
use std::collections::HashMap;
// define HashMap
let mut marks = HashMap::new();
// add values
marks.insert("Rust Programming", 96);
marks.insert("Lua Programming", 100);
marks.insert("C++ Programming", 90);
marks.insert("Java Programming", 94);
// prompt length of the HashMap
println!("How many subjects are collected there? {}", marks.len());
// find a subject
match marks.get("Rust Programming") {
Some(mark) => println!("You have got {} for that.", mark),
None => println!("You did not study this subject!"),
}
// remove an item
marks.remove("Java Programming");
// loop through HashMap
for (subject, mark) in &marks {
println!("For {} you have got {}.", subject, mark);
}
// check for value
println!("Did you study C#? {} ", marks.contains_key("C# Programming"));
| } | random_line_split |
|
main.py | self.pre_parse_page(second_get_res.text), second_get_res.text,args)
def pre_parse_page(self, page_source):
'''
用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
print('检索到' + reference_num + '条结果,全部下载大约需要' +
self.s2h(reference_num_int * 5) + '。')
# is_all_download = input('是否要 | is_all_download = 'n'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
page, i = divmod(reference_num_int, 20)
if i != 0:
page += 1
return page
else:
count = self.count
self.select_download_num = int(count)
while True:
if self.select_download_num > reference_num_int:
print('输入数量大于检索结果,请重新输入!')
self.select_download_num = int(input('请输入需要下载的数量(不满一页将下载整页):'))
else:
page, i = divmod(self.select_download_num, 20)
# 不满一页的下载一整页
if i != 0:
page += 1
print("开始下载前%d页所有文件,预计用时%s" % (page, self.s2h(page * 20 * 5)))
print('--------------------------')
return page
def parse_page(self, download_page_left, page_source,args):
'''
保存页面信息
解析每一页的下载地址
'''
# gettype = args['type']
gettype = "pdf"
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码
try:
# 去除第一个tr标签(表头)
tr_table.tr.extract()
except Exception as e:
logging.error('出现验证码')
return self.parse_page(
download_page_left,
crack.get_image(self.get_result_url, self.session,
page_source),args)
# 遍历每一行
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
tr_text = ''
download_url = ''
detail_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# with open(
# 'data/ReferenceList.txt', 'a',
# encoding='utf-8') as file:
# file.write(td_text + ' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
# download_url = dl_url.attrs['href']+"&dflag=pdfdown"
# download_url = dl_url.attrs['href']+"&dflag=cajdown"
download_url = dl_url.attrs['href']+"&dflag="+ gettype +"down"
# download_url = dl_url.attrs['href']
try:
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
if args["flag"] == True:
self.index += 1
self.docid = self.sheet_name + str(self.index).zfill(4)
self.download_refence(download_url, single_refence_list,args)
# 是否开启详情页数据抓取
if config.crawl_isdetail ==1:
time.sleep(config.crawl_stepWaitTime)
if len(self.download_url)>40:
page_detail.get_detail_page(self.session, self.get_result_url,
detail_url, single_refence_list,
self.download_url,self.docid, gettype)
# with open('data/ReferenceList.txt', 'a', encoding='utf-8') as file:
# file.write('\n')
else:
logging.error("无下载链接")
# time.sleep(0.5)
else:
args["CrawProcess"].emit('爬取结束')
print("结束爬取,退出")
break
# exit()
except OSError:
pass
# download_page_left为剩余等待遍历页面
if download_page_left > 1:
self.cur_page_num += 1
self.get_another_page(download_page_left,args)
def get_another_page(self, download_page_left,args):
'''
请求其他页面和请求第一个页面形式不同
重新构造请求
'''
time.sleep(config.crawl_stepWaitTime)
curpage_pattern_compile = re.compile(r'.*?curpage=(\d+).*?')
self.get_result_url = CHANGE_PAGE_URL + re.sub(
curpage_pattern_compile, '?curpage=' + str(self.cur_page_num),
self.change_page_url)
get_res = self.session.get(self.get_result_url, headers=HEADER)
download_page_left -= 1
self.parse_page(download_page_left, get_res.text,args)
def download_refence(self,url, single_refence_list,args):
'''
拼接下载地址
进行文献下载
'''
gettype = "pdf"
# gettype = args['type']
# 拼接下载地址
self.download_url = DOWNLOAD_URL + re.sub(r'../', '', url)
# print("url---------------", self.download_url)
if len(self.download_url) > 40:
args['count']+=1
self.pg="正在下载第%s/%s篇文献"%(args['count'],str(self.select_download_num))
self.info='节点1_正在下载: ' + single_refence_list[1] + '.' + gettype
args["CrawProcess"].emit(str(self.pg+"\n"+self.info))
# print(type(args["CrawProcess"]))
name = single_refence_list[1]
# name = single_refence_list[1] + '_' + single_refence_list[2]
'''检查文件命名,防止网站资源有特殊字符本地无法保存'''
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# with open('data/Links.txt', 'a', encoding='utf-8') as file:
# file.write(self.download_url + '\n')
# if config.crawl_isdownload ==1:
if not os.path.isdir('data/PDFs'):
os.mkdir(r'data/PDFs')
# filename = self.docid+name+".pdf"
filename = self.docid+name+"." + gettype
try:
if not os.path.isfile(os.path.join("data/PDFs/", filename)):
sess = requests.Session()
HEADER['Referer'] = self.download_url
# HEADER['Cookie'] = 'LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwxSkpTdzNSelZPMGtUTTR3djg1QT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'CurrSortFieldType=desc;CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27);c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 15:04:56;cnkiUserKey=80843df4-4597-8109-17a3-f4f7642134c4;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E | 全部下载(y/n)?')
| identifier_name |
main.py | \\chromedriver.exe"
self.webdriver_path = "D:\\chromedriver.exe"
# self.webdriver_path = "D:\\安装包\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe"
# options = webdriver.ChromeOptions()
chrome_options = Options()
# options1 = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# options1.add_experimental_option('excludeSwitches', ['enable-logging'])
# driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options, options=options1)
# driver = webdriver.PhantomJS(executable_path=self.webdriver_path)
driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options)
# driver = webdriver.Chrome(self.webdriver_path)
driver.get("https://www.cnki.net/")
driver.find_element_by_id("txt_SearchText").click()
sleep(2)
driver.find_element_by_id("txt_SearchText").send_keys("机器学习")
sleep(1)
element = driver.find_element_by_class_name("search-btn")
webdriver.ActionChains(driver).move_to_element(element).click(element).perform()
driver.find_element_by_class_name("search-btn").click()
sleep(1)
coo = driver.get_cookies()
cookies = {}
self.ck = str()
# 获取cookie中的name和value,转化成requests可以使用的形式
for cookie in coo:
cookies[cookie['name']] = cookie['value']
self.ck = self.ck + cookie['name'] + '=' + cookie['value'] + ';'
# print(cookie['name'] + '=' + cookie['value'] + ';')
return self.ck
def search_reference(self, ueser_input,args):
'''
第一次发送post请求
再一次发送get请求,这次请求没有写文献等东西
两次请求来获得文献列表
'''
if os.path.isdir('data'):
# 递归删除文件
shutil.rmtree('data')
# 创建一个空的
os.mkdir('data')
'''DbPrefix 为CFLS时 仅下载中文,SCDB 下载中英文(英文无下载链接)'''
static_post_data = {
'action': '',
'NaviCode': '*',
'ua': '1.21',
'isinEn': '1',
'PageName': 'ASP.brief_result_aspx',
'DbPrefix': 'CJFQ',
'DbCatalog': '中国学术期刊网络出版总库',
# 'ConfigFile': 'SCDB.xml',
'ConfigFile': 'CJFQ.xml',
'db_opt': 'CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD', # 搜索类别(CNKI右侧的)
'his': '0',
'__': time.asctime(time.localtime()) + ' GMT+0800 (中国标准时间)'
}
# 将固定字段与自定义字段组合
post_data = {**static_post_data, **ueser_input}
try:
self.get_cookies()
except Exception as e:
print(e)
print("cookie获取失败")
# 必须有第一次请求,否则会提示服务器没有用户
first_post_res = self.session.post(
SEARCH_HANDLE_URL, data=post_data, headers=HEADER)
# get请求中需要传入第一个检索条件的值
key_value = quote(ueser_input.get('txt_1_value1'))
# print("first_post_res:",first_post_res.text)
# print("key_value:",key_value)
self.get_result_url = GET_PAGE_URL + first_post_res.text + '&t=1544249384932&keyValue=' + key_value + '&S=1&sorttype='
# 检索结果的第一个页面
second_get_res = self.session.get(self.get_result_url,headers=HEADER)
# cookies = second_get_res.cookies
# cookie = requests.utils.dict_from_cookiejar(cookies)
# print(cookie)
# print(second_get_res.text)
# second_get_res = self.session.get(SEARCH_HANDLE_URL, data=post_data,headers=HEADER)
change_page_pattern_compile = re.compile(
r'.*?pagerTitleCell.*?<a href="(.*?)".*')
try:
self.change_page_url = re.search(change_page_pattern_compile,
second_get_res.text).group(1)
print(self.change_page_url)
try:
self.parse_page(
self.pre_parse_page(second_get_res.text), second_get_res.text,args)
except Exception as e:
print(e)
except Exception as e:
print(e)
# pass
# self.parse_page(
# self.pre_parse_page(second_get_res.text), second_get_res.text,args)
def pre_parse_page(self, page_source):
'''
用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
print('检索到' + reference_num + '条结果,全部下载大约需要' +
self.s2h(reference_num_int * 5) + '。')
# is_all_download = input('是否要全部下载(y/n)?')
is_all_download = 'n'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
page, i = divmod(reference_num_int, 20)
if i != 0:
page += 1
return page
else:
count = self.count
self.select_download_num = int(count)
while True:
if self.select_download_num > reference_num_int:
print('输入数量大于检索结果,请重新输入!')
self.select_download_num = int(input('请输入需要下载的数量(不满一页将下载整页):'))
else:
page, i = divmod(self.select_download_num, 20)
# 不满一页的下载一整页
if i != 0:
page += 1
print("开始下载前%d页所有文件,预计用时%s" % (page, self.s2h(page * 20 * 5)))
print('--------------------------')
return page
def parse_page(self, download_page_left, page_source,args):
'''
保存页面信息
解析每一页的下载地址
'''
# gettype = args['type']
gettype = "pdf"
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码
try:
# 去除第一个tr标签(表头)
tr_table.tr.extract()
except Exception as e:
logging.error('出现验证码')
return self.parse_page(
download_page_left,
crack.get_image(self.get_result_url, self.session,
page_source),args)
# 遍历每一行
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
tr_text = ''
download_url = ''
detail_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# with open(
# 'data/ReferenceList.txt', 'a',
# encoding='utf-8') as file:
# file.write(td_text + ' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
# download_url = dl_url.attrs['href']+"&dflag=pdfdown"
# download_url = dl_url.attrs['href']+"&dflag=cajdown"
download_url = dl_url.attrs['href']+"&dflag="+ gettype +"down"
# download_url = dl_url.attrs['href']
try:
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
if args["flag"] == True:
self.index += 1
self.docid = self.sheet_name + str(self.index).zfill(4)
self.download_refence(download_url, single_refence_list,args)
# 是否开启详情 | (time.time()))
self.index = 0
self.cur_page_num = 1
# 保持会话
self.session.get(BASIC_URL, headers=HEADER)
self.count=count
def get_cookies(self):
# self.webdriver_path = "D:\\workspaces\\pythonworks\\webdriver\\chromedriver_win32 | identifier_body |
|
main.py | self.ck = self.ck + cookie['name'] + '=' + cookie['value'] + ';'
# print(cookie['name'] + '=' + cookie['value'] + ';')
return self.ck
def search_reference(self, ueser_input,args):
'''
第一次发送post请求
再一次发送get请求,这次请求没有写文献等东西
两次请求来获得文献列表
'''
if os.path.isdir('data'):
# 递归删除文件
shutil.rmtree('data')
# 创建一个空的
os.mkdir('data')
'''DbPrefix 为CFLS时 仅下载中文,SCDB 下载中英文(英文无下载链接)'''
static_post_data = {
'action': '',
'NaviCode': '*',
'ua': '1.21',
'isinEn': '1',
'PageName': 'ASP.brief_result_aspx',
'DbPrefix': 'CJFQ',
'DbCatalog': '中国学术期刊网络出版总库',
# 'ConfigFile': 'SCDB.xml',
'ConfigFile': 'CJFQ.xml',
'db_opt': 'CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD', # 搜索类别(CNKI右侧的)
'his': '0',
'__': time.asctime(time.localtime()) + ' GMT+0800 (中国标准时间)'
}
# 将固定字段与自定义字段组合
post_data = {**static_post_data, **ueser_input}
try:
self.get_cookies()
except Exception as e:
print(e)
print("cookie获取失败")
# 必须有第一次请求,否则会提示服务器没有用户
first_post_res = self.session.post(
SEARCH_HANDLE_URL, data=post_data, headers=HEADER)
# get请求中需要传入第一个检索条件的值
key_value = quote(ueser_input.get('txt_1_value1'))
# print("first_post_res:",first_post_res.text)
# print("key_value:",key_value)
self.get_result_url = GET_PAGE_URL + first_post_res.text + '&t=1544249384932&keyValue=' + key_value + '&S=1&sorttype='
# 检索结果的第一个页面
second_get_res = self.session.get(self.get_result_url,headers=HEADER)
# cookies = second_get_res.cookies
# cookie = requests.utils.dict_from_cookiejar(cookies)
# print(cookie)
# print(second_get_res.text)
# second_get_res = self.session.get(SEARCH_HANDLE_URL, data=post_data,headers=HEADER)
change_page_pattern_compile = re.compile(
r'.*?pagerTitleCell.*?<a href="(.*?)".*')
try:
self.change_page_url = re.search(change_page_pattern_compile,
second_get_res.text).group(1)
print(self.change_page_url)
try:
self.parse_page(
self.pre_parse_page(second_get_res.text), second_get_res.text,args)
except Exception as e:
print(e)
except Exception as e:
print(e)
# pass
# self.parse_page(
# self.pre_parse_page(second_get_res.text), second_get_res.text,args)
def pre_parse_page(self, page_source):
'''
用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
print('检索到' + reference_num + '条结果,全部下载大约需要' +
self.s2h(reference_num_int * 5) + '。')
# is_all_download = input('是否要全部下载(y/n)?')
is_all_download = 'n'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
page, i = divmod(reference_num_int, 20)
if i != 0:
page += 1
return page
else:
count = self.count
self.select_download_num = int(count)
while True:
if self.select_download_num > reference_num_int:
print('输入数量大于检索结果,请重新输入!')
self.select_download_num = int(input('请输入需要下载的数量(不满一页将下载整页):'))
else:
page, i = divmod(self.select_download_num, 20)
# 不满一页的下载一整页
if i != 0:
page += 1
print("开始下载前%d页所有文件,预计用时%s" % (page, self.s2h(page * 20 * 5)))
print('--------------------------')
return page
def parse_page(self, download_page_left, page_source,args):
'''
保存页面信息
解析每一页的下载地址
'''
# gettype = args['type']
gettype = "pdf"
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码
try:
# 去除第一个tr标签(表头)
tr_table.tr.extract()
except Exception as e:
logging.error('出现验证码')
return self.parse_page(
download_page_left,
crack.get_image(self.get_result_url, self.session,
page_source),args)
# 遍历每一行
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
tr_text = ''
download_url = ''
detail_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# with open(
# 'data/ReferenceList.txt', 'a',
# encoding='utf-8') as file:
# file.write(td_text + ' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
# download_url = dl_url.attrs['href']+"&dflag=pdfdown"
# download_url = dl_url.attrs['href']+"&dflag=cajdown"
download_url = dl_url.attrs['href']+"&dflag="+ gettype +"down"
# download_url = dl_url.attrs['href']
try:
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
if args["flag"] == True:
self.index += 1
self.docid = self.sheet_name + str(self.index).zfill(4)
self.download_refence(download_url, single_refence_list,args)
# 是否开启详情页数据抓取
if config.crawl_isdetail ==1:
time.sleep(config.crawl_stepWaitTime)
if len(self.download_url)>40:
page_detail.get_detail_page(self.session, self.get_result_url,
detail_url, single_refence_list,
self.download_url,self.docid, gettype)
# with open('data/ReferenceList.txt', 'a', encoding='utf-8') as file:
# file.write('\n')
else:
logging.error("无下载链接")
# time.sleep(0.5)
else:
args["CrawProcess"].emit('爬取结束')
print("结束爬取,退出")
break
# exit()
except OSError:
pass
# download_page_left为剩余等待遍历页面
if download_page_left > 1:
self.cur_page_num += 1
self.get_another_page(download_page_left,args)
def get_another_page(self, download_page_left,args):
'''
请求其他页面和请求第一个页面形式不同
重新构造请求
'''
time.sleep(config.crawl_stepWaitTime)
curpage_pattern_compile = re.compile(r'.*?curpage=(\d+).*?')
self.get_result_url = CHANGE_PAGE_URL + re.sub(
curpage_pattern_compile, '?curpage=' + str(self.cur_page_num),
self.change_page_url)
get_res = self.session.get(self.get_result_url, headers=HEADER)
download_page_left -= 1
self.parse_page(download_page_left, get_res.text,args)
def download_refence(self,url, single_refence_list,args):
'''
拼接下载地址
进行文献下载
'''
gettype = "pdf"
# gettype = args[' | cookies[cookie['name']] = cookie['value'] | random_line_split |
|
main.py | ensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 16:25:29;cnkiUserKey=700c6580-66f0-d89f-414c-c84f72dc52fa;c_m_expire=2020-09-15 16:25:29;SID_kns8=123106;ASP.NET_SessionId=qag4isl11jbdrt0mjunnyvjr;SID_kns_new=kns123117;Ecp_ClientId=1200915160502413634;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"rptZbY"};Ecp_notFirstLogin=rptZbY;LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;Ecp_session=1;'
HEADER['Cookie'] = self.ck
# HEADER['Cookie'] = 'Ecp_ClientId=1200824163400713266; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; SID=zhuye006; Ecp_session=1; _pk_ref=%5B%22%22%2C%22%22%2C1600247285%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D1QNB3ozqZFmKQrJunLFuJn3iSEv6k-AZeBA3xHZ-8Wa%26wd%3D%26eqid%3Ded55ec7e00044464000000035f61627d%22%5D; _pk_ses=*; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/16/2020 17:27:44; LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; c_m_expire=2020-09-16 17:27:44; Ecp_notFirstLogin=Gr0r31; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"Gr0r31"}'
refence_file = sess.get(self.download_url, headers=HEADER)
with open('data/PDFs/' + filename, 'wb') as file:
file.write(refence_file.content)
# refence_file = requests.get(self.download_url,headers=HEADER)
# with open('data/CAJs/' + filename , 'wb') as file:
# file.write(refence_file.content)
# print(self.download_url)
# refence_file =sess.get(self.download_url,headers=HEADER)
# htmls = refence_file.text
# soup = BeautifulSoup(htmls, 'lxml')
# print(soup.find_all(('img')))
# if len(soup.find_all('img'))>0:
#
# validCodeSubSrc = soup.find_all('img')[0]['src']
#
# code=crack.get_image2(validCodeSubSrc, self.session)
#
# HEADER['Referer'] = self.download_url
#
# payload = "vcode=" + code
# ret = sess.post(self.download_url, data=payload)
# print(ret)
except Exception as e:
logging.error(e)
logging.error('下载出错')
time.sleep(config.crawl_stepWaitTime)
'''移动文件到指定路径'''
def move_file(self,src_dir, target_dir,args):
args["CrawProcess"].emit("正在移动文件")
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for item in os.listdir(src_dir):
src_name = os.path.join(src_dir, item)
target_name = os.path.join(target_dir, item)
shutil.move(src_name, target_name)
args["CrawProcess"].emit("文件移动完成,爬取完成")
def s2h(self,seconds):
'''
将秒数转为小时数
'''
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return ("%02d小时%02d分钟%02d秒" % (h, m, s))
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C15 | 99961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEj | conditional_block |
|
createKB.py |
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build classifiers and create the PGmine knowledge base')
parser.add_argument('--trainingFiles',required=True,type=str,help='3 BioC files (comma-separated) for star_allele, rs and other')
parser.add_argument('--selectedChemicals',required=True,type=str,help='Which chemicals to filter for')
parser.add_argument('--dbsnp',required=True,type=str,help='File containing mappings from dbSNP IDs to genes')
parser.add_argument('--genes',required=True,type=str,help='File containing gene data')
parser.add_argument('--variantStopwords',required=True,type=str,help='Variants to remove')
parser.add_argument('--relevantMeSH',required=True,type=str,help='File with MeSH term mappings')
parser.add_argument('--inBioC',required=True,type=str,help='BioC file to predict things on')
parser.add_argument('--outKB',required=True,type=str,help='TSV file for KB')
args = parser.parse_args()
chemMeshIDs = set()
meshIDsToChemName,meshIDsToPharmGKB,meshIDsToDrugBank = {},{},{}
cancerDrugMeshIDs = set()
with open(args.selectedChemicals) as f:
chemData = json.load(f)
for chem in chemData:
for meshID in chem['MeSH']:
meshID = 'MESH:'+meshID
chemMeshIDs.add(meshID)
if chem['isCancerDrug']:
cancerDrugMeshIDs.add(meshID)
meshIDsToChemName[meshID] = chem['name']
meshIDsToDrugBank[meshID] = chem['DrugBank']
if 'PharmGKB' in chem:
meshIDsToPharmGKB[meshID] = chem['PharmGKB']
dbsnp = {}
with open(args.dbsnp) as f:
for line in f:
rsid,geneInfos = line.strip('\n').split('\t')
geneInfos = [ tuple(geneInfo.split(':')) for geneInfo in geneInfos.split('|') ]
geneNames = [ geneName for geneName,geneID in geneInfos ]
geneIDs = [ geneID for geneName,geneID in geneInfos ]
dbsnp[rsid] = (geneNames,geneIDs)
with open(args.variantStopwords) as f:
variantStopwords = set( line.strip().lower() for line in f )
geneID2Name = {}
proteinCodingGenes = set()
with open(args.genes) as f:
for line in f:
entrezID,name,geneType = line.strip('\n').split('\t')
geneID2Name[entrezID] = name
if geneType == 'protein-coding':
proteinCodingGenes.add(entrezID)
print("Loaded chemical, gene and variant data")
pediatricTerms = set(['Pediatrics','Infant','Infant, Newborn','Child','Child, Preschool','Adolescent','Birth Cohort'])
adultTerms = set(['Adult','Aged','Middle Aged','Young Adult'])
with gzip.open(args.relevantMeSH,'rt') as f:
relevantMeSH = json.load(f)
pediatricPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in pediatricTerms for t in terms ) )
adultPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in adultTerms for t in terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid and int(pmid) in pediatricPMIDs
is_adult_paper = pmid and int(pmid) in adultPMIDs
age | return len(s) == len(s.encode()) | identifier_body |
|
createKB.py | terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid and int(pmid) in pediatricPMIDs
is_adult_paper = pmid and int(pmid) in adultPMIDs
age_mesh_tags = "|".join(pmidToRelevantMeSH[int(pmid)]) if int(pmid) in pmidToRelevantMeSH else ""
journal_short = journal
if len(journal_short) > 50:
journal_short = journal_short[:50] + '...'
if 'subsection' in doc.metadata:
subsection = doc.metadata['subsection'].replace('’',"'")
elif doc.metadata['section'] == 'abstract':
subsection = 'abstract'
elif doc.metadata['section'] == 'title':
subsection = 'title'
if subsection == 'None':
subsection = 'unknown'
groups = defaultdict(set)
scores = defaultdict(lambda : 0)
# We want to group pairs of chemical/variants together so if we don't create redundant relations explaining the same relation where there are multiples of the same chemical/variant across the sentence
chemicalVariantRelations = [ r for r in doc.relations if r.relationType == 'ChemicalMutation' ]
for chemicalVariant in chemicalVariantRelations:
chemical,variant = chemicalVariant.entities
chemical_mesh_id = chemical.metadata['conceptid']
variant_concept_id = variant.metadata['conceptid']
if ';' in chemical_mesh_id:
continue
elif ';' in variant_concept_id:
continue
key = (chemical_mesh_id,variant_concept_id)
groups[key].add(chemical)
groups[key].add(variant)
scores[key] = max(scores[key],chemicalVariant.probability)
for key,chemicalVariants in groups.items():
score = scores[key]
# Sort by location in sentence
chemicalVariants = sorted(chemicalVariants, key = lambda x: x.position[0] )
chemicals = [ e for e in chemicalVariants if e.entityType == 'Chemical' ]
variants = [ e for e in chemicalVariants if e.entityType == 'Mutation' ]
chemical,variant = chemicals[0],variants[0]
chemical_text = chemical.text
chemical_mesh_id = chemical.metadata['conceptid']
chemical_pharmgkb_id = meshIDsToPharmGKB[chemical_mesh_id] if chemical_mesh_id in meshIDsToPharmGKB else 'NA'
chemical_normalized = meshIDsToChemName[chemical_mesh_id]
chemical_drugbank_id = meshIDsToDrugBank[chemical_mesh_id]
# Remap statins
chemical_text_lower = chemical_text.lower()
if chemical_text_lower in ['statin','statins']:
chemical_pharmgkb_id = 'PA133950441'
chemical_normalized = 'HMG-CoA reductase inhibitors'
chemical_drugbank_id = ''
elif chemical_text_lower == 'citalopram':
chemical_pharmgkb_id = 'PA449015'
chemical_normalized = 'Citalopram'
chemical_drugbank_id = 'DB00215'
elif chemical_text_lower == 'levomilnacipran':
chemical_pharmgkb_id = 'PA166182150'
chemical_normalized = 'Levomilnacipran'
chemical_drugbank_id = 'DB08918'
variant_text = variant.text
variant_normalized = utils.normalizeMutation(variant_text)
if variant_normalized is None:
continue
variant_metadata = variant.metadata['conceptid'].split(';') | random_line_split |
||
createKB.py | (s):
#try:
# s.decode('ascii')
# return True
#except UnicodeDecodeError:
# return False
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build classifiers and create the PGmine knowledge base')
parser.add_argument('--trainingFiles',required=True,type=str,help='3 BioC files (comma-separated) for star_allele, rs and other')
parser.add_argument('--selectedChemicals',required=True,type=str,help='Which chemicals to filter for')
parser.add_argument('--dbsnp',required=True,type=str,help='File containing mappings from dbSNP IDs to genes')
parser.add_argument('--genes',required=True,type=str,help='File containing gene data')
parser.add_argument('--variantStopwords',required=True,type=str,help='Variants to remove')
parser.add_argument('--relevantMeSH',required=True,type=str,help='File with MeSH term mappings')
parser.add_argument('--inBioC',required=True,type=str,help='BioC file to predict things on')
parser.add_argument('--outKB',required=True,type=str,help='TSV file for KB')
args = parser.parse_args()
chemMeshIDs = set()
meshIDsToChemName,meshIDsToPharmGKB,meshIDsToDrugBank = {},{},{}
cancerDrugMeshIDs = set()
with open(args.selectedChemicals) as f:
chemData = json.load(f)
for chem in chemData:
for meshID in chem['MeSH']:
meshID = 'MESH:'+meshID
chemMeshIDs.add(meshID)
if chem['isCancerDrug']:
cancerDrugMeshIDs.add(meshID)
meshIDsToChemName[meshID] = chem['name']
meshIDsToDrugBank[meshID] = chem['DrugBank']
if 'PharmGKB' in chem:
meshIDsToPharmGKB[meshID] = chem['PharmGKB']
dbsnp = {}
with open(args.dbsnp) as f:
for line in f:
rsid,geneInfos = line.strip('\n').split('\t')
geneInfos = [ tuple(geneInfo.split(':')) for geneInfo in geneInfos.split('|') ]
geneNames = [ geneName for geneName,geneID in geneInfos ]
geneIDs = [ geneID for geneName,geneID in geneInfos ]
dbsnp[rsid] = (geneNames,geneIDs)
with open(args.variantStopwords) as f:
variantStopwords = set( line.strip().lower() for line in f )
geneID2Name = {}
proteinCodingGenes = set()
with open(args.genes) as f:
for line in f:
entrezID,name,geneType = line.strip('\n').split('\t')
geneID2Name[entrezID] = name
if geneType == 'protein-coding':
proteinCodingGenes.add(entrezID)
print("Loaded chemical, gene and variant data")
pediatricTerms = set(['Pediatrics','Infant','Infant, Newborn','Child','Child, Preschool','Adolescent','Birth Cohort'])
adultTerms = set(['Adult','Aged','Middle Aged','Young Adult'])
with gzip.open(args.relevantMeSH,'rt') as f:
relevantMeSH = json.load(f)
pediatricPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in pediatricTerms for t in terms ) )
adultPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in adultTerms for t in terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid | isASCII | identifier_name |
|
createKB.py |
dbsnp = {}
with open(args.dbsnp) as f:
for line in f:
rsid,geneInfos = line.strip('\n').split('\t')
geneInfos = [ tuple(geneInfo.split(':')) for geneInfo in geneInfos.split('|') ]
geneNames = [ geneName for geneName,geneID in geneInfos ]
geneIDs = [ geneID for geneName,geneID in geneInfos ]
dbsnp[rsid] = (geneNames,geneIDs)
with open(args.variantStopwords) as f:
variantStopwords = set( line.strip().lower() for line in f )
geneID2Name = {}
proteinCodingGenes = set()
with open(args.genes) as f:
for line in f:
entrezID,name,geneType = line.strip('\n').split('\t')
geneID2Name[entrezID] = name
if geneType == 'protein-coding':
proteinCodingGenes.add(entrezID)
print("Loaded chemical, gene and variant data")
pediatricTerms = set(['Pediatrics','Infant','Infant, Newborn','Child','Child, Preschool','Adolescent','Birth Cohort'])
adultTerms = set(['Adult','Aged','Middle Aged','Young Adult'])
with gzip.open(args.relevantMeSH,'rt') as f:
relevantMeSH = json.load(f)
pediatricPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in pediatricTerms for t in terms ) )
adultPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in adultTerms for t in terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid and int(pmid) in pediatricPMIDs
is_adult_paper = pmid and int(pmid) in adultPMIDs
age_mesh_tags = "|".join(pmidToRelevantMeSH[int(pmid)]) if int(pmid) in pmidToRelevantMeSH else ""
journal_short = journal
if len(journal_short) > 50:
journal_short = journal_short[:50] + '...'
if 'subsection' in doc.metadata:
subsection = doc.metadata['subsection'].replace('’',"'")
elif doc.metadata['section'] == 'abstract':
subsection = 'abstract'
elif doc.metadata['section'] == 'title':
subsection = 'title'
if subsection == 'None':
subsection = 'unknown'
groups = defaultdict(set)
scores = defaultdict(lambda : 0)
# We want to group pairs of chemical/variants together so if we don't create redundant relations explaining the same relation where there are multiples of the same chemical/variant across the sentence
chemicalVariantRelations = [ r for r in doc.relations if r.relationType == 'ChemicalMutation' ]
for chemicalVariant in chemicalVariantRelations:
chemical,variant = chemicalVariant.entities
chemical_mesh_id = chemical.metadata['conceptid']
variant_concept_id = variant.metadata['conceptid']
if ';' in chemical_mesh_id:
continue
elif ';' in variant_concept_id:
continue
key = (chemical_mesh_id | meshID = 'MESH:'+meshID
chemMeshIDs.add(meshID)
if chem['isCancerDrug']:
cancerDrugMeshIDs.add(meshID)
meshIDsToChemName[meshID] = chem['name']
meshIDsToDrugBank[meshID] = chem['DrugBank']
if 'PharmGKB' in chem:
meshIDsToPharmGKB[meshID] = chem['PharmGKB'] | conditional_block |
|
weather_stn_data.py | 924','state':'TX','weather_station':'CORPUS CHRISTI INTL AP'},
{'icao_code':'KCRW','row':413,'col':676,'stn_id_cdo':'GHCND:USW00013866','state':'WV','weather_station':'CHARLESTON YEAGER AP'},
{'icao_code':'KDCA','row':452,'col':735,'stn_id_cdo':'GHCND:USW00013743','state':'VA','weather_station':'WASHINGTON REAGAN AP'},
{'icao_code':'KDFW','row':240,'col':463,'stn_id_cdo':'GHCND:USW00003927','state':'TX','weather_station':'Dallas-Fort Worth WSCMO AP'},
{'icao_code':'KDSM','row':418,'col':487,'stn_id_cdo':'GHCND:USW00014933','state':'IA','weather_station':'DES MOINES INTL AP'},
{'icao_code':'KEYW','row':146,'col':788,'stn_id_cdo':'GHCND:USW00012836','state':'FL','weather_station':'KEY WEST INTL AP'},
{'icao_code':'KFAR','row':510,'col':429,'stn_id_cdo':'GHCND:USW00014914','state':'ND','weather_station':'Fargo, Hector International Airport'}, #WFO=FGF, Grand Forks, ND
{'icao_code':'KFAT','row':339,'col': 94,'stn_id_cdo':'GHCND:USW00093193','state':'CA','weather_station':'Fresno Air Terminal'}, #wfo=hnx
{'icao_code':'KFLG','row':283,'col':216,'stn_id_cdo':'GHCND:USW00003103','state':'AZ','weather_station':'FLAGSTAFF PULLIAM AP'},
{'icao_code':'KFMY','row':187,'col':768,'stn_id_cdo':'GHCND:USW00012835','state':'FL','weather_station':'FT MYERS PAGE FLD AP'},
{'icao_code':'KFSD','row':449,'col':438,'stn_id_cdo':'GHCND:USW00014944','state':'SD','weather_station':'SIOUX FALLS FOSS FLD'},
{'icao_code':'KFST','row':190,'col':364,'stn_id_cdo':'GHCND:USW00023091','state':'TX','weather_station':'FT STOCKTON PECOS AP'},
{'icao_code':'KGEG','row':533,'col':173,'stn_id_cdo':'GHCND:USW00024157','state':'WA','weather_station':'SPOKANE INTL AP'}, #wfo=otx
{'icao_code':'KGGW','row':527,'col':306,'stn_id_cdo':'GHCND:USW00094008','state':'MT','weather_station':'GLASGOW INTL AP'},
{'icao_code':'KGRB','row':491,'col':548,'stn_id_cdo':'GHCND:USW00014898','state':'WI','weather_station':'GREEN BAY A S INTL AP'},
{'icao_code':'KGRR','row':473,'col':591,'stn_id_cdo':'GHCND:USW00094860','state':'MI','weather_station':'GRAND RAPIDS INTL AP'},
{'icao_code':'KGSO','row':383,'col':718,'stn_id_cdo':'GHCND:USW00013723','state':'NC','weather_station':'PIEDMONT TRIAD INTL AP'},
{'icao_code':'KHLN','row':502,'col':238,'stn_id_cdo':'GHCND:USW00024144','state':'MT','weather_station':'HELENA RGNL AP'}, #wfo=tfx
{'icao_code':'KIAH','row':185,'col':502,'stn_id_cdo':'GHCND:USW00012960','state':'TX','weather_station':'HOUSTON INTERCONT AP'},
{'icao_code':'KICT','row':334,'col':444,'stn_id_cdo':'GHCND:USW00003928','state':'KS','weather_station':'WICHITA DWIGHT D EISENHOWER NA'},
{'icao_code':'KINL','row':547,'col':465,'stn_id_cdo':'GHCND:USW00014918','state':'MN','weather_station':'International Falls Airport'},
{'icao_code':'KJAN','row':253,'col':584,'stn_id_cdo':'GHCND:USW00003940','state':'MS','weather_station':'JACKSON INTL AP'},
{'icao_code':'KJAX','row':266,'col':738,'stn_id_cdo':'GHCND:USW00013889','state':'FL','weather_station':'JACKSONVILLE INTL AP'},
{'icao_code':'KLAS','row':310,'col':163,'stn_id_cdo':'GHCND:USW00023169','state':'NV','weather_station':'LAS VEGAS MCCARRAN AP'},
{'icao_code':'KLAX','row':278,'col':101,'stn_id_cdo':'GHCND:USW00023174','state':'CA','weather_station':'Los Angeles International Airport'}, #wfo=lox
{'icao_code':'KLBB','row':248,'col':380,'stn_id_cdo':'GHCND:USW00023042','state':'TX','weather_station':'LUBBOCK INTL AP'},
{'icao_code':'KLBF','row':397,'col':388,'stn_id_cdo':'GHCND:USW00024023','state':'NE','weather_station':'N PLATTE RGNL AP'},
{'icao_code':'KLEX','row':390,'col':636,'stn_id_cdo':'GHCND:USW00093820','state':'KY','weather_station':'LEXINGTON BLUEGRASS AP'},
{'icao_code':'KLIT','row':292,'col':537,'stn_id_cdo':'GHCND:USW00013963','state':'AR','weather_station':'LITTLE ROCK AP ADAMS FLD'},
{'icao_code':'KMCI','row':373,'col':480,'stn_id_cdo':'GHCND:USW00003947','state':'MO','weather_station':'KANSAS CITY INTL AP'},
{'icao_code':'KMCO','row':228,'col':762,'stn_id_cdo':'GHCND:USW00012815','state':'FL','weather_station':'ORLANDO INTL AP'},
{'icao_code':'KMEM','row':307,'col':571,'stn_id_cdo':'GHCND:USW00013893','state':'TN','weather_station':'MEMPHIS INTL AP'}, | {'icao_code':'KMIA','row':184,'col':804,'stn_id_cdo':'GHCND:USW00012839','state':'FL','weather_station':'MIAMI INTL AP'},
{'icao_code':'KMIE','row':426,'col':610,'stn_id_cdo':'GHCND:USW00094895','state':'IN','weather_station':'MUNCIE DELAWARE CO AP'},
{'icao_code':'KMLI','row':427,'col':531,'stn_id_cdo':'GHCND:USW00014923','state':'IL','weather_station':'MOLINE QUAD CITY INTL AP'},
{'icao_code':'KMOB','row':229,'col':625,'stn_id_cdo':'GHCND:USW00013894','state':'AL','weather_station':'MOBILE RGNL AP'},
{'icao_code':'KMSY','row':205,'col':594,'stn_id_cdo':'GHCND:USW00012916','state':'LA','weather_station':'NEW ORLEANS INTL AP'}, | random_line_split |
|
coverage.py | etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
|
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## | """Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp | identifier_body |
coverage.py | etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
|
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## | try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1 | conditional_block |
coverage.py | etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def | (lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
| count_valid | identifier_name |
coverage.py | etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures. | doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## | '''
lkp = {}
for arch in arch_lkp.keys(): | random_line_split |
fapui-accordion.js | div>" ].join ( "" );
juicer.set ( "cache", true );
juicer.set ( "errorhandling", false );
juicer.set ( "strip", true );
juicer.set ( "detection", false );
},
/**
*
* @param {*} el
*/
render : function ( el ) {
if ( ! FAPUI.isString ( el ) ) {
el.addClass ( "panel-noscroll" );
}
this.callParent ( [ el ] );
},
/**
*
*/
createDom : function () {
var me = this;
me._items = me.items.slice ( 0 );
me.id = me.id || FAPUI.getId ();
var html = [];
var divClass = "accordion-container";
if ( ! me.border ) {
divClass = divClass + " accordion-container-noborder";
}
html.push ( "<div id=\"" + me.id + "\" class=\"" + divClass + "\">" );
var cfg = {};
$ ( me._items ).each ( function ( i ) {
this.index = i;
cfg.it = this;
html.push ( juicer ( me.tpl, cfg ) );
} );
html.push ( "</div>" );
me.items = this._jsonToWidget ();
return html.join ( "" );
},
/**
*
*/
bindEvent : function () {
var me = this;
me.el = me.el || $ ( "#" + me.id );
me.el.click ( function ( event ) {
var target = $ ( event.target );
if ( target.parent ().is ( "div.accordion-title" ) ) {
target = target.parent ();
}
if ( target.is ( "div.accordion-title" ) ) {
var index = parseInt ( target.parent ().attr ( "index" ), 10 );
me.fireEvent ( "itemclick", me, index, target.parent ().attr ( "itemId" ), event );
me.expand ( index );
}
} );
me.afterRender ();
},
/**
*
*/
afterRender : function () {
var me = this;
var i = me.activeIndex || 0;
me.activeIndex = - 1;
me.expand ( i );
},
/**
*
*/
updateRender : function () {
this.callParent ();
},
/**
* 重新计算容器内子组件的宽和高,调用setHeight和setWidth方法后都会执行此方法
* @method doLayout
*/
doLayout : function () {
var me = this;
me.setHeight ( me.height );
me.setWidth ( me.width );
},
/**
*
* @param {*} h
*/
doLayoutH : function ( h ) {
var me = this;
//计算内容区域的高度
var items = this.el.children ();
var heightSum = 0;
$ ( items ).each ( function () {
heightSum = heightSum + $ ( "div:first", this ).outerHeight ();
} );
h = h - heightSum;
var _itemHeight = h;
$ ( items ).each ( function () {
$ ( this ).children ( "div.accordion-content" ).height ( h );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setHeight ( _itemHeight );
}
} );
},
/**
*
* @param {*} w
*/
doLayoutW : function ( w ) {
var me = this;
var items = this.el.children ();
$ ( items ).each ( function () {
$ ( $ ( this ).children ( "div.accordion-title" ) ).width ( w - 5 );
$ ( $ ( this ).children ( "div.accordion-content" ) ).width ( w );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setWidth ( me.width );
}
} );
},
/**
* 设置高
* @method setHeight
* @param {num} h
*/
setHeight : function ( h ) {
this.height = h;
this.el.height ( h );
this.doLayoutH ( h );
},
/**
* 设置宽
* @method setWidth
* @param {num} w
*/
setWidth : function ( w ) {
this.width = w;
this.el.width ( w );
this.doLayoutW ( w );
},
/**
* 得到AccordionLayout的高度
* @method getHeight
* @return {Num}
*/
getHeight : function () {
return this.height;
},
/**
* 得到AccordionLayout的宽度
* @method getWidth
* @return {Num}
*/
getWidth : function () {
return this.width;
},
/**
* 展开子组件。如果index为数字,则展开items中的第index组件;如果index为String,则展开子组件的itemId等于index的组件
* @method expand
* @param {string} index
*/
expand : function ( index ) {
var me = this;
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId='" + index + "']" )[ 0 ] ).attr ( "index" );
index = parseInt ( index , 10 );
}
if ( index !== null && this.activeIndex !== index && me.items.length > 0 ) {
me.collapse ( this.activeIndex );
var contentArea = $ ( $ ( this.el ).
children ( "div[index=" + index + "]" )[ 0 ] ).
children ( "div.accordion-content" );
me._renderWidget ( index, contentArea );
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
headEl.addClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).addClass ( "accordion-expand" );
if ( me.animate === true ) {
contentEl.slideDown ( "normal" );
} else {
contentEl.show ();
}
this.items[ index ].setWidth ( contentArea.innerWidth () );
this.items[ index ].setHeight ( contentArea.innerHeight () );
this.fireEvent ( "expand", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = index;
}
},
/**
* 关闭子组件。如果index为数字,则关闭items中的第index组件;如果index为String,则关闭子组件的itemId等于index的组件
* @method collapse
* @param {string} index
*/
collapse : function ( index ) {
var me = this;
if ( index == - 1 ) {
return;
}//如果index为-1则说明所有的选项都关闭了
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId=\"" + index + "\"]" )[ 0 ] ).attr ( "index" );
index = parseInt ( index );
}
if ( index !== null && this.activeIndex ==index && me.items.length > 0 ) {
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
if ( me.animate === true ) {
contentEl.slideUp ( "normal" );
} else {
contentEl.hide ();
}
contentEl.removeClass ( "accordion-content-first-default" );
headEl.removeClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).removeClass ( "accordion-expand" );
this.fireEvent ( "collapse", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = - 1;
}
},
/**
* 添加子组件
* @method addItems
* @param {Array} items 需要添加组件集合
*/
addItems : function ( items ) {
var me = this;
if ( ! FAPUI.isArray ( items ) ) {
items = [ items ];
}
var cfg = {};
var html = [];
$ ( items ).each ( function ( i ) {
this.index = me.items.length + i;
cfg.it = this;
html.push ( juicer ( me.tpl, cfg ) );
} );
me.el.append ( html.join ( "" ) );
me._items = me._items.concat ( items );
me.items = me.items.concat ( me._jsonToWidget ( items ) );
me.doLayout ();
},
/**
* 移除子组件。如果index为数字,则移除items中的第index组件;如果index为String,则移除子组件的itemId等于index的组件
* @method removeItem
* @param {string} index | random_line_split |
||
fapui-accordion.js | 内容区域的高度
var items = this.el.children ();
var heightSum = 0;
$ ( items ).each ( function () {
heightSum = heightSum + $ ( "div:first", this ).outerHeight ();
} );
h = h - heightSum;
var _itemHeight = h;
$ ( items ).each ( function () {
$ ( this ).children ( "div.accordion-content" ).height ( h );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setHeight ( _itemHeight );
}
} );
},
/**
*
* @param {*} w
*/
doLayoutW : function ( w ) {
var me = this;
var items = this.el.children ();
$ ( items ).each ( function () {
$ ( $ ( this ).children ( "div.accordion-title" ) ).width ( w - 5 );
$ ( $ ( this ).children ( "div.accordion-content" ) ).width ( w );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setWidth ( me.width );
}
} );
},
/**
* 设置高
* @method setHeight
* @param {num} h
*/
setHeight : function ( h ) {
this.height = h;
this.el.height ( h );
this.doLayoutH ( h );
},
/**
* 设置宽
* @method setWidth
* @param {num} w
*/
setWidth : function ( w ) {
this.width = w;
this.el.width ( w );
this.doLayoutW ( w );
},
/**
* 得到AccordionLayout的高度
* @method getHeight
* @return {Num}
*/
getHeight : function () {
return this.height;
},
/**
* 得到AccordionLayout的宽度
* @method getWidth
* @return {Num}
*/
getWidth : function () {
return this.width;
},
/**
* 展开子组件。如果index为数字,则展开items中的第index组件;如果index为String,则展开子组件的itemId等于index的组件
* @method expand
* @param {string} index
*/
expand : function ( index ) {
var me = this;
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId='" + index + "']" )[ 0 ] ).attr ( "index" );
index = parseInt ( index , 10 );
}
if ( index !== null && this.activeIndex !== index && me.items.length > 0 ) {
me.collapse ( this.activeIndex );
var contentArea = $ ( $ ( this.el ).
children ( "div[index=" + index + "]" )[ 0 ] ).
children ( "div.accordion-content" );
me._renderWidget ( index, contentArea );
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
headEl.addClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).addClass ( "accordion-expand" );
if ( me.animate === true ) {
contentEl.slideDown ( "normal" );
} else {
contentEl.show ();
}
this.items[ index ].setWidth ( contentArea.innerWidth () );
this.items[ index ].setHeight ( contentArea.innerHeight () );
this.fireEvent ( "expand", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = index;
}
},
/**
* 关闭子组件。如果index为数字,则关闭items中的第index组件;如果index为String,则关闭子组件的itemId等于index的组件
* @method collapse
* @param {string} index
*/
collapse : function ( index ) {
var me = this;
if ( index == - 1 ) {
return;
}//如果index为-1则说明所有的选项都关闭了
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId=\"" + index + "\"]" )[ 0 ] ).attr ( "index" );
index = parseInt ( index );
}
if ( index !== null && this.activeIndex ==index && me.items.length > 0 ) {
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
if ( me.animate === true ) {
contentEl.slideUp ( "normal" );
} else {
contentEl.hide ();
}
contentEl.removeClass ( "accordion-content-first-default" );
headEl.removeClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).removeClass ( "accordion-expand" );
this.fireEvent ( "collapse", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = - 1;
}
},
/**
* 添加子组件
* @method addItems
* @param {Array} items 需要添加组件集合
*/
addItems : function ( items ) {
var me = this;
if ( ! FAPUI.isArray ( items ) ) {
items = [ items ];
}
var cfg = {};
var html = [];
$ ( items ).each ( function ( i ) {
this.index = me.items.length + i;
cfg.it = this;
html.push ( juicer ( me.tpl, cfg ) );
} );
me.el.append ( html.join ( "" ) );
me._items = me._items.concat ( items );
me.items = me.items.concat ( me._jsonToWidget ( items ) );
me.doLayout ();
},
/**
* 移除子组件。如果index为数字,则移除items中的第index组件;如果index为String,则移除子组件的itemId等于index的组件
* @method removeItem
* @param {string} index
*/
removeItem : function ( index ) {
var me = this;
var comp;
if ( FAPUI.isNumber ( index ) ) {
comp = $ ( me.el.children ( "div[index=\"" + index + "\"]" )[ 0 ] );
} else {
comp = $ ( me.el.children ( "div[itemId=\"" + index + "\"]" )[ 0 ] );
index = parseInt ( comp.attr ( "index" ) );
}
if ( comp[ 0 ] === null ) {
return;
}
var siblings = comp.siblings ();
siblings.each ( function () {
var i = parseInt ( $ ( this ).attr ( "index" ) );
if ( i > index ) {
$ ( this ).attr ( "index", i - 1 );
}
} );
if ( me.activeIndex > index ) {
me.activeIndex = me.activeIndex - 1;
} else if ( me.activeIndex == index ) {
me.activeIndex = - 1;
}
comp.unbind ();
comp.remove ();
me.items.splice ( index, 1 );
me._items.splice ( index, 1 );
//删除后重新设置内容的高度
me.setHeight ( me.height );
},
/**
* @access private
* @param {*} items 把items转换成组件
*/
_jsonToWidget : function ( items ) {
items = items || this.items;
var newItems = [];
if ( items !== null && items.length > 0 ) {
$ ( items ).each ( function ( index ) {
var me = this;
var o = {};
FAPUI.apply ( o, me );
delete o.title;
o.isRender = false;
if ( me.isUI && me.isUI () ) {
newItems.push ( o );
} else {
var cmp = FAPUI.create ( me );
newItems.push ( cmp );
}
} );
}
return newItems;
},
/**
* 渲染组件
* @private
*/
_renderWidget : function ( index, contentArea ) {
if ( this.items && this.items[ index ] && ! this.items[ index ].isRender ) {
this.items[ index ].render ( contentArea );
this.items[ index ].isRender = true;
}
},
/**
*
*/
onDestroy : function () {
var me = this;
if ( me.items ) {
$ ( me.items ).each ( function () {
var that = this;
that.destroy ();
} );
}
me.callParent ();
}
}
} );
FAPUI.register ( "accordionLayout", FAPUI.Layout.AccordionLayout );
return FAPUI.Layout.AccordionLayout;
} ); | conditional_block |
||
stoopid.py | 4, 4])
self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])
self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)
self.dense2 = torch.nn.Linear(encoding_size, output_size)
def forward(self, visual_obs: torch.tensor):
visual_obs = visual_obs.permute(0, 3, 1, 2)
conv_1 = torch.relu(self.conv1(visual_obs))
conv_2 = torch.relu(self.conv2(conv_1))
hidden = self.dense1(conv_2.reshape([-1, self.final_flat]))
hidden = torch.relu(hidden)
hidden = self.dense2(hidden)
return hidden
@staticmethod
def conv_output_shape(
h_w: Tuple[int, int],
kernel_size: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
):
"""
Computes the height and width of the output of a convolution layer.
"""
h = floor(
((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
w = floor(
((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
return h, w
"""We will now create a few classes to help us store the data we will use to train the Q-Learning algorithm."""
import numpy as np
from typing import NamedTuple, List
class Experience(NamedTuple):
"""
An experience contains the data of one Agent transition.
- Observation
- Action
- Reward
- Done flag
- Next Observation
"""
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
# A Trajectory is an ordered sequence of Experiences
Trajectory = List[Experience]
# A Buffer is an unordered list of Experiences from multiple Trajectories
Buffer = List[Experience]
"""Now, we can create our trainer class. The role of this trainer is to collect data from the Environment according to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def generate_trajectories(
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name)
# For all Agents with a Terminal Step:
for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
| done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bell | """
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
) | identifier_body |
stoopid.py | to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def generate_trajectories(
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name)
# For all Agents with a Terminal Step:
for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
"""
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bellman equation to update the Q-Network
target = (
reward
+ (1.0 - done)
* GAMMA
* torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values
)
mask = torch.zeros((len(batch), action_size))
mask.scatter_(1, action, 1)
prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)
criterion = torch.nn.MSELoss()
loss = criterion(prediction, target)
# Perform the backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
"""### Run Training"""
# Commented out IPython magic to ensure Python compatibility.
# -----------------
# This code is used to close an env that might not have been closed before
try:
env.close()
except:
pass
# -----------------
from mlagents_envs.registry import default_registry
from mlagents_envs.environment import UnityEnvironment
import matplotlib.pyplot as plt
# %matplotlib inline
# Create the GridWorld Environment from the registry
env = default_registry["GridWorld"].make()
print("GridWorld environment created.")
# Create a new Q-Network.
qnet = VisualQNetwork((64, 84, 3), 126, 5)
experiences: Buffer = []
optim = torch.optim.Adam(qnet.parameters(), lr= 0.001)
cumulative_rewards: List[float] = []
# The number of training steps that will be performed
NUM_TRAINING_STEPS = 70
# The number of experiences to collect per training step
NUM_NEW_EXP = 1000
# The maximum size of the Buffer
BUFFER_SIZE = 10000
for n in range(NUM_TRAINING_STEPS):
| new_exp,_ = Trainer.generate_trajectories(env, qnet, NUM_NEW_EXP, epsilon=0.1)
random.shuffle(experiences)
if len(experiences) > BUFFER_SIZE:
experiences = experiences[:BUFFER_SIZE]
experiences.extend(new_exp)
Trainer.update_q_net(qnet, optim, experiences, 5)
_, rewards = Trainer.generate_trajectories(env, qnet, 100, epsilon=0)
cumulative_rewards.append(rewards)
print("Training step ", n+1, "\treward ", rewards) | conditional_block |
|
stoopid.py | 4, 4])
self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])
self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)
self.dense2 = torch.nn.Linear(encoding_size, output_size)
def forward(self, visual_obs: torch.tensor):
visual_obs = visual_obs.permute(0, 3, 1, 2)
conv_1 = torch.relu(self.conv1(visual_obs))
conv_2 = torch.relu(self.conv2(conv_1))
hidden = self.dense1(conv_2.reshape([-1, self.final_flat]))
hidden = torch.relu(hidden)
hidden = self.dense2(hidden)
return hidden
@staticmethod
def conv_output_shape(
h_w: Tuple[int, int],
kernel_size: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
):
"""
Computes the height and width of the output of a convolution layer.
"""
h = floor(
((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
w = floor(
((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
return h, w
"""We will now create a few classes to help us store the data we will use to train the Q-Learning algorithm."""
import numpy as np
from typing import NamedTuple, List
class Experience(NamedTuple):
"""
An experience contains the data of one Agent transition.
- Observation
- Action
- Reward
- Done flag
- Next Observation
"""
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
# A Trajectory is an ordered sequence of Experiences
Trajectory = List[Experience]
# A Buffer is an unordered list of Experiences from multiple Trajectories
Buffer = List[Experience]
"""Now, we can create our trainer class. The role of this trainer is to collect data from the Environment according to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def | (
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name)
# For all Agents with a Terminal Step:
for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
"""
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the | generate_trajectories | identifier_name |
stoopid.py | , 4])
self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])
self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)
self.dense2 = torch.nn.Linear(encoding_size, output_size)
def forward(self, visual_obs: torch.tensor):
visual_obs = visual_obs.permute(0, 3, 1, 2)
conv_1 = torch.relu(self.conv1(visual_obs))
conv_2 = torch.relu(self.conv2(conv_1))
hidden = self.dense1(conv_2.reshape([-1, self.final_flat]))
hidden = torch.relu(hidden)
hidden = self.dense2(hidden)
return hidden
@staticmethod
def conv_output_shape(
h_w: Tuple[int, int],
kernel_size: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
):
"""
Computes the height and width of the output of a convolution layer.
"""
h = floor(
((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
w = floor(
((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
return h, w
"""We will now create a few classes to help us store the data we will use to train the Q-Learning algorithm."""
import numpy as np
from typing import NamedTuple, List
class Experience(NamedTuple):
"""
An experience contains the data of one Agent transition.
- Observation
- Action
- Reward
- Done flag
- Next Observation
"""
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
# A Trajectory is an ordered sequence of Experiences
Trajectory = List[Experience]
# A Buffer is an unordered list of Experiences from multiple Trajectories
Buffer = List[Experience]
"""Now, we can create our trainer class. The role of this trainer is to collect data from the Environment according to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def generate_trajectories(
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name) | for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
"""
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bell |
# For all Agents with a Terminal Step: | random_line_split |
spider.py | ", norm_to="PM_PON")
########################################################################################
# .. seealso:: `Normalisation <../geochem/normalization.html>`__
#
########################################################################################
# Basic spider plots are straightforward to produce:
#
import pyrolite.plot
ax = normdf.pyroplot.spider(color="0.5", alpha=0.5, unity_line=True, figsize=(10, 4))
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Index Ordering
# --------------
#
# The default ordering here follows that of the dataframe columns, but we typically
# want to reorder these based on some physical ordering. A :code:`index_order` keyword
# argument can be used to supply a function which will reorder the elements before
# plotting. Here we order the elements by relative incompatibility (using
# :func:`pyrolite.geochem.ind.by_incompatibility` behind the scenes):
from pyrolite.geochem.ind import by_incompatibility
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Similarly, you can also rearrange elements to be in order of atomic number:
#
from pyrolite.geochem.ind import by_number
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="number",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Color Mapping
# -------------
#
# We can also specify either continuous or categorical values to use for the colors,
# and even map categorical values to specific colors where useful:
#
fig, ax = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 8))
ax[0].set_title("Continuous Values")
normdf.pyroplot.spider(
ax=ax[0],
unity_line=True,
index_order="incompatibility",
cmap="plasma",
alpha=0.1,
color=np.log(normdf["Li"]), # a range of continous values
)
ax[1].set_title("Boolean/Categorical Values")
normdf.pyroplot.spider(
ax=ax[1],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
)
ax[2].set_title("Boolean/Categorical Values with Color Mapping")
normdf.pyroplot.spider(
ax=ax[2],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
color_mappings={ # mapping the boolean values to specific colors
"color": {True: "green", False: "purple"}
},
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# Legend Proxies for Spiderplots
# ------------------------------
#
# While it's relatively straightforward to style spider plots as you wish, for the
# moment can be a bit more involved to create a legend for these styles. Where you're
# creating styles based on a set of categories or labels, a few of pyrolite's utility
# functions might come in handy. Below we'll go through such an example, after creating
# a few labels (here based on a binning of the Cs abundance):
#
labels = pd.cut(
np.log(normdf["Cs"]), bins=4, labels=["Low", "Mid. Low", "Mid High", "High"]
)
pd.unique(labels)
########################################################################################
# Below we'll use :func:`~pyrolite.plot.color.process_color` and
# :func:`~pyrolite.util.plot.legend.proxy_line` to construct a set of legend proxies.
# Note that we need to pass the same configuration to both
# :func:`~pyrolite.plot.pyroplot.spider` and :func:`~pyrolite.plot.color.process_color`
# in order to get the same results, and that the order of labels in the legend
# will depend on which labels appear first in your dataframe or series (and hence the
# ordering of the unique values which are returned).
#
from pyrolite.plot.color import process_color
from pyrolite.util.plot.legend import proxy_line
ax = normdf.pyroplot.spider(
unity_line=True,
index_order="incompatibility",
color=labels, # a categorical set of values
cmap="Paired",
alpha=0.5,
figsize=(11, 4),
)
legend_labels = pd.unique(labels) # process_color uses this behind the scenes
proxy_colors = process_color(color=legend_labels, cmap="Paired", alpha=0.5)["c"]
legend_proxies = [proxy_line(color=c, marker="D") for c in proxy_colors]
ax.legend(legend_proxies, legend_labels)
plt.show()
########################################################################################
# If the specific order of the labels in your legend is important or you only want to
# include some of the legend entries for some reason, you could use a dictionary to
# store the key-value pairs and remap the order of the legend entries manually:
#
proxies = {
label: proxy_line(color=c, marker="D")
for label, c in zip(legend_labels, proxy_colors)
}
ordered_labels = ["High", "Mid High", "Mid. Low", "Low"]
ax.legend([proxies[l] for l in ordered_labels], ordered_labels)
plt.show()
########################################################################################
# Split Configuration
# -------------------
#
# If you have potential conflicts between desired configurations for the lines and
# markers of your plots, you can explictly separate the configuration using the
# :code:`scatter_kw` and :code:`line_kw` keyword arguments:
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(10, 4))
ax.set_title("Split Configuration")
normdf.pyroplot.spider(
ax=ax,
unity_line=True,
index_order="incompatibility",
scatter_kw=dict(cmap="magma_r", color=np.log(normdf["Li"])),
line_kw=dict(
color=normdf["Cs"] > 5,
color_mappings={"color": {True: "green", False: "purple"}},
),
alpha=0.2, # common alpha config between lines and markers
s=25, # argument for scatter which won't be passed to lines
)
plt.show()
########################################################################################
# Filled Ranges
# -------------
#
# The spiderplot can be extended to provide visualisations of ranges and density via the
# various modes. We could now plot the range of compositions as a filled range:
#
ax = normdf.pyroplot.spider(
mode="fill",
color="green",
alpha=0.5,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Spider Density Plots
# --------------------
#
# Alternatively, we can plot a conditional density spider plot:
#
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(10, 6))
normdf.pyroplot.spider(
ax=ax[0], color="k", alpha=0.05, unity_line=True, index_order=by_incompatibility
)
normdf.pyroplot.spider(
ax=ax[1],
mode="binkde",
vmin=0.05, # 95th percentile,
resolution=10,
unity_line=True,
index_order="incompatibility",
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# We can now assemble a more complete comparison of some of the conditional density
# modes for spider plots:
#
modes = [
("plot", "plot", [], dict(color="k", alpha=0.01)),
("fill", "fill", [], dict(color="k", alpha=0.5)),
("binkde", "binkde", [], dict(resolution=5)),
(
"binkde",
"binkde contours specified",
[],
dict(contours=[0.95], resolution=5), # 95th percentile contour
),
("histogram", "histogram", [], dict(resolution=5, bins=30)),
]
########################################################################################
down, across = len(modes), 1
fig, ax = plt.subplots(
down, across, sharey=True, sharex=True, figsize=(across * 8, 2 * down)
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
for a, (m, name, args, kwargs) in zip(ax, modes):
| a.annotate( # label the axes rows
"Mode: {}".format(name),
xy=(0.1, 1.05),
xycoords=a.transAxes,
fontsize=8,
ha="left",
va="bottom",
) | conditional_block |
|
spider.py | import pandas as pd
# sphinx_gallery_thumbnail_number = 4
########################################################################################
# Here we'll set up an example which uses EMORB as a starting point. Typically we'll
# normalise trace element compositions to a reference composition
# to be able to link the diagram to 'relative enrichement' occuring during geological
# processes, so here we're normalising to a Primitive Mantle composition first.
# We're here taking this normalised composition and adding some noise in log-space to
# generate multiple compositions about this mean (i.e. a compositional distribution).
# For simplicility, this is handled by
# :func:`~pyrolite.util.synthetic.example_spider_data`:
#
from pyrolite.util.synthetic import example_spider_data
normdf = example_spider_data(start="EMORB_SM89", norm_to="PM_PON")
########################################################################################
# .. seealso:: `Normalisation <../geochem/normalization.html>`__
#
########################################################################################
# Basic spider plots are straightforward to produce:
#
import pyrolite.plot
ax = normdf.pyroplot.spider(color="0.5", alpha=0.5, unity_line=True, figsize=(10, 4))
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Index Ordering
# --------------
#
# The default ordering here follows that of the dataframe columns, but we typically
# want to reorder these based on some physical ordering. A :code:`index_order` keyword
# argument can be used to supply a function which will reorder the elements before
# plotting. Here we order the elements by relative incompatibility (using
# :func:`pyrolite.geochem.ind.by_incompatibility` behind the scenes):
from pyrolite.geochem.ind import by_incompatibility
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Similarly, you can also rearrange elements to be in order of atomic number:
#
from pyrolite.geochem.ind import by_number
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="number",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Color Mapping
# -------------
#
# We can also specify either continuous or categorical values to use for the colors,
# and even map categorical values to specific colors where useful:
#
fig, ax = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 8))
ax[0].set_title("Continuous Values")
normdf.pyroplot.spider(
ax=ax[0],
unity_line=True,
index_order="incompatibility",
cmap="plasma",
alpha=0.1,
color=np.log(normdf["Li"]), # a range of continous values
)
ax[1].set_title("Boolean/Categorical Values")
normdf.pyroplot.spider(
ax=ax[1],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
)
ax[2].set_title("Boolean/Categorical Values with Color Mapping")
normdf.pyroplot.spider(
ax=ax[2],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
color_mappings={ # mapping the boolean values to specific colors
"color": {True: "green", False: "purple"}
},
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# Legend Proxies for Spiderplots
# ------------------------------
#
# While it's relatively straightforward to style spider plots as you wish, for the
# moment can be a bit more involved to create a legend for these styles. Where you're
# creating styles based on a set of categories or labels, a few of pyrolite's utility
# functions might come in handy. Below we'll go through such an example, after creating
# a few labels (here based on a binning of the Cs abundance):
#
labels = pd.cut(
np.log(normdf["Cs"]), bins=4, labels=["Low", "Mid. Low", "Mid High", "High"]
)
pd.unique(labels)
########################################################################################
# Below we'll use :func:`~pyrolite.plot.color.process_color` and
# :func:`~pyrolite.util.plot.legend.proxy_line` to construct a set of legend proxies.
# Note that we need to pass the same configuration to both
# :func:`~pyrolite.plot.pyroplot.spider` and :func:`~pyrolite.plot.color.process_color`
# in order to get the same results, and that the order of labels in the legend
# will depend on which labels appear first in your dataframe or series (and hence the
# ordering of the unique values which are returned).
#
from pyrolite.plot.color import process_color
from pyrolite.util.plot.legend import proxy_line
ax = normdf.pyroplot.spider(
unity_line=True,
index_order="incompatibility",
color=labels, # a categorical set of values
cmap="Paired",
alpha=0.5,
figsize=(11, 4),
)
legend_labels = pd.unique(labels) # process_color uses this behind the scenes
proxy_colors = process_color(color=legend_labels, cmap="Paired", alpha=0.5)["c"]
legend_proxies = [proxy_line(color=c, marker="D") for c in proxy_colors]
ax.legend(legend_proxies, legend_labels)
plt.show()
########################################################################################
# If the specific order of the labels in your legend is important or you only want to
# include some of the legend entries for some reason, you could use a dictionary to
# store the key-value pairs and remap the order of the legend entries manually:
#
proxies = {
label: proxy_line(color=c, marker="D")
for label, c in zip(legend_labels, proxy_colors)
}
ordered_labels = ["High", "Mid High", "Mid. Low", "Low"]
ax.legend([proxies[l] for l in ordered_labels], ordered_labels)
plt.show()
########################################################################################
# Split Configuration
# -------------------
#
# If you have potential conflicts between desired configurations for the lines and
# markers of your plots, you can explictly separate the configuration using the
# :code:`scatter_kw` and :code:`line_kw` keyword arguments:
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(10, 4))
ax.set_title("Split Configuration")
normdf.pyroplot.spider(
ax=ax,
unity_line=True,
index_order="incompatibility",
scatter_kw=dict(cmap="magma_r", color=np.log(normdf["Li"])),
line_kw=dict(
color=normdf["Cs"] > 5,
color_mappings={"color": {True: "green", False: "purple"}},
),
alpha=0.2, # common alpha config between lines and markers
s=25, # argument for scatter which won't be passed to lines
)
plt.show()
########################################################################################
# Filled Ranges
# -------------
#
# The spiderplot can be extended to provide visualisations of ranges and density via the
# various modes. We could now plot the range of compositions as a filled range:
#
ax = normdf.pyroplot.spider(
mode="fill",
color="green",
alpha=0.5,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Spider Density Plots
# --------------------
#
# Alternatively, we can plot a conditional density spider plot:
#
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(10, 6))
normdf.pyroplot.spider(
ax=ax[0], color="k", alpha=0.05, unity_line=True, index_order=by_incompatibility
)
normdf.pyroplot.spider(
ax=ax[1],
mode="binkde",
vmin=0.05, # 95th percentile,
resolution=10,
unity_line=True,
index_order="incompatibility",
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# We can now assemble a more complete comparison of some of the conditional density
# modes for spider plots:
#
modes = [
("plot", "plot", [], dict(color="k", alpha=0.01)),
("fill", "fill", [], dict(color="k", alpha=0.5)),
("binkde", "binkde", [], dict(resolution=5)),
(
"binkde",
"binkde contours specified",
[],
dict(contours | """
import matplotlib.pyplot as plt
import numpy as np | random_line_split |
|
types.go | // Exactly one member can be set.
type VolumeAttachmentSource struct {
// persistentVolumeName represents the name of the persistent volume to attach.
// +optional
PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"`
// inlineVolumeSpec contains all the information necessary to attach
// a persistent volume defined by a pod's inline VolumeSource. This field
// is populated only for the CSIMigration feature. It contains
// translated fields from a pod's inline VolumeSource to a
// PersistentVolumeSpec. This field is beta-level and is only
// honored by servers that enabled the CSIMigration feature.
// +optional
InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"`
}
// VolumeAttachmentStatus is the status of a VolumeAttachment request.
type VolumeAttachmentStatus struct {
// attached indicates the volume is successfully attached.
// This field must only be set by the entity completing the attach
// operation, i.e. the external-attacher.
Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"`
// attachmentMetadata is populated with any
// information returned by the attach operation, upon successful attach, that must be passed
// into subsequent WaitForAttach or Mount calls.
// This field must only be set by the entity completing the attach
// operation, i.e. the external-attacher.
// +optional
AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"`
// attachError represents the last error encountered during attach operation, if any.
// This field must only be set by the entity completing the attach
// operation, i.e. the external-attacher.
// +optional
AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"`
// detachError represents the last error encountered during detach operation, if any.
// This field must only be set by the entity completing the detach
// operation, i.e. the external-attacher.
// +optional
DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"`
}
// VolumeError captures an error encountered during a volume operation.
type VolumeError struct {
// time represents the time the error was encountered.
// +optional
Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"`
// message represents the error encountered during Attach or Detach operation.
// This string may be logged, so it should not contain sensitive
// information.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSIDriver captures information about a Container Storage Interface (CSI)
// volume driver deployed on the cluster.
// Kubernetes attach detach controller uses this object to determine whether attach is required.
// Kubelet uses this object to determine whether pod information needs to be passed on mount.
// CSIDriver objects are non-namespaced.
type CSIDriver struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata.
// metadata.Name indicates the name of the CSI driver that this object
// refers to; it MUST be the same name returned by the CSI GetPluginName()
// call for that driver.
// The driver name must be 63 characters or less, beginning and ending with
// an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
// alphanumerics between.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec represents the specification of the CSI Driver.
Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSIDriverList is a collection of CSIDriver objects.
type CSIDriverList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of CSIDriver
Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// CSIDriverSpec is the specification of a CSIDriver.
type CSIDriverSpec struct {
// attachRequired indicates this CSI volume driver requires an attach
// operation (because it implements the CSI ControllerPublishVolume()
// method), and that the Kubernetes attach detach controller should call
// the attach volume interface which checks the volumeattachment status
// and waits until the volume is attached before proceeding to mounting.
// The CSI external-attacher coordinates with CSI volume driver and updates
// the volumeattachment status when the attach operation is complete.
// If the CSIDriverRegistry feature gate is enabled and the value is
// specified to false, the attach operation will be skipped.
// Otherwise the attach operation will be called.
//
// This field is immutable.
//
// +optional
AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"`
// podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.)
// during mount operations, if set to true.
// If set to false, pod information will not be passed on mount.
// Default is false.
//
// The CSI driver specifies podInfoOnMount as part of driver deployment.
// If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
// The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
//
// The following VolumeConext will be passed if podInfoOnMount is set to true.
// This list might grow, but the prefix will be used.
// "csi.storage.k8s.io/pod.name": pod.Name
// "csi.storage.k8s.io/pod.namespace": pod.Namespace
// "csi.storage.k8s.io/pod.uid": string(pod.UID)
// "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume
// defined by a CSIVolumeSource, otherwise "false"
//
// "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only
// required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode.
// Other drivers can leave pod info disabled and/or ignore this field.
// As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when
// deployed on such a cluster and the deployment determines which mode that is, for example
// via a command line parameter of the driver.
//
// This field is immutable.
//
// +optional
PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"`
// volumeLifecycleModes defines what kind of volumes this CSI volume driver supports.
// The default if the list is empty is "Persistent", which is the usage defined by the
// CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.
//
// The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec
// with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod.
// A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.
//
// For more information about implementing this mode, see
// https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
// A driver can support one or more of these modes and more modes may be added in the future.
//
// This field is beta.
// This field is immutable.
//
// +optional
// +listType=set
VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"`
// storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage
// capacity that the driver deployment will report by creating
// CSIStorageCapacity objects with capacity information, if set to true.
//
// The check can be enabled immediately when deploying a driver.
// In that case, provisioning new volumes with late binding
// will pause until the driver deployment has published
// some suitable CSIStorageCapacity | // VolumeAttachmentSource represents a volume that should be attached.
// Right now only PersistenVolumes can be attached via external attacher,
// in future we may allow also inline volumes in pods. | random_line_split |
|
DatePicker.ts | -date-picker',
providers: [DATE_PICKER_VALUE_ACCESSOR],
animations: [
trigger('startDateTextState', [
state(
'startDate',
style({
opacity: '1.0',
}),
),
state(
'endDate',
style({
opacity: '0.6',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('endDateTextState', [
state(
'startDate',
style({
opacity: '0.6',
}),
),
state(
'endDate',
style({
opacity: '1.0',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('indicatorState', [
state(
'startDate',
style({
transform: 'translateX(0%)',
}),
),
state(
'endDate',
style({
transform: 'translateX(100%)',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
],
template: `
<div class="date-picker-container">
<div class="date-range-tabs" *ngIf="range" [class.week-select-mode]="weekRangeSelect">
<span
class="range-tab"
(click)="toggleRangeSelect('startDate')"
[@startDateTextState]="rangeSelectMode"
data-automation-id="calendar-start-date"
>{{ startDateLabel }}</span
>
<span
class="range-tab"
(click)="toggleRangeSelect('endDate')"
[@endDateTextState]="rangeSelectMode"
data-automation-id="calendar-end-date"
>{{ endDateLabel }}</span
>
<i class="indicator" [@indicatorState]="rangeSelectMode"></i>
</div>
<novo-calendar
[activeDate]="activeDate"
[(selected)]="selection"
(selectedChange)="updateSelection($event)"
[mode]="mode"
[numberOfMonths]="numberOfMonths"
[weekStartsOn]="weekStart"
[disabledDateMessage]="disabledDateMessage"
[minDate]="start"
[maxDate]="end"
></novo-calendar>
<div class="calendar-footer" [hidden]="hideFooter">
<novo-button (click)="setToday()" class="today" size="small" data-automation-id="calendar-today">{{ labels.today }}</novo-button>
</div>
</div>
`,
styleUrls: ['./DatePicker.scss'],
})
export class NovoDatePickerElement implements ControlValueAccessor, OnInit {
/**
* The minimum year to allow selected in year select view
**/
@Input()
minYear: string | number;
/**
* The maximum year to allow selected in year select view
**/
@Input()
maxYear: string | number;
/**
* The minimum date that can be selected.
**/
@Input()
start: Date;
/**
* The maximum date that can be selected.
**/
@Input()
end: Date;
/**
* **Deprecated** Whether the date-picker is used outside of an overlay.
**/
@Input()
@BooleanInput()
inline: boolean;
/**
* Day of the week the calendar should display first, Sunday=0...Saturday=6
**/
@Input()
weekStart: Day = 0;
/**
* Certain dates that are already selected.
**/
@Input()
preselected: Date[] = [];
/**
* Whether the days for the previous and next month should be hidden.
**/
@Input()
@BooleanInput()
@HostBinding('class.hide-overflow-days')
public hideOverflowDays: boolean = false;
/**
* Whether the footer which contains `today` button should be hidden.
**/
@Input()
@BooleanInput()
public hideFooter: boolean = false;
@Input()
disabledDateMessage: string;
// Select callback for output
@Output()
onSelect: EventEmitter<any> = new EventEmitter(false);
_mode: DatePickerSelectModes = 'single';
_range: boolean;
_weekRangeSelect: boolean;
_numberOfMonths: number[] = [0];
/**
* Number of months to display at once.
* @default 1
**/
@Input()
get numberOfMonths(): number {
return this._numberOfMonths.length;
}
set numberOfMonths(value) {
this._numberOfMonths = Array.from(Array(Number(value)).keys());
}
/**
* How the date selection should work.
* @default single
**/
@Input()
get mode(): DatePickerSelectModes {
return this._mode;
}
set mode(value) {
if (this._mode !== value) {
this._mode = value;
}
}
/**
* **deprecated** please use `mode="range"`.
**/
@Input()
get range(): boolean {
return ['range', 'week'].includes(this.mode) || this._range;
}
set range(value) {
console.warn(`'range' property is deprecated, please use 'mode="range"'.`);
if (this._range !== value) {
this._range = value;
this.mode = 'range';
}
}
/**
* **deprecated** please use `mode="week"`.
**/
@Input()
get weekRangeSelect(): boolean {
return this._mode === 'week' || this._weekRangeSelect;
}
set weekRangeSelect(value) {
console.warn(`'weekRangeSelect' property is deprecated, please use 'mode="week"'.`);
if (this._weekRangeSelect !== value) {
this._weekRangeSelect = value;
this.mode = 'week';
}
}
// @HostBinding('style.width')
// get hb_width() {
// return this._sanitizer.bypassSecurityTrustStyle(`${this.numberOfMonths * 228}px`);
// }
model: modelTypes;
activeDate: Date;
_selection: Date[] = [];
preview: Date[] = [];
startDateLabel: string;
endDateLabel: string;
rangeSelectMode: rangeSelectModes = 'startDate';
_onChange: Function = () => {};
_onTouched: Function = () => {};
get selection(): Date[] {
return this._selection;
}
set selection(value) {
this._selection = value ? value.filter(isDate).map((d) => DateUtil.startOfDay(d)) : [];
}
constructor(
public labels: NovoLabelService,
private element: ElementRef,
private cdr: ChangeDetectorRef,
private _sanitizer: DomSanitizer,
) {}
ngOnInit() {
// Determine the year array
const now = new Date();
// Set labels
if (this.model) {
this.modelToSelection(this.model);
}
if (this.selection && this.selection.length) {
this.updateView(this.selection[0]);
}
}
updateView(date) {
const value: any = date ? new Date(date) : new Date();
this.activeDate = new Date(value);
}
updateSelection(selected: Date[], fireEvents = true) {
// Helpers.swallowEvent(event);
this.selection = selected;
this.startDateLabel = this.labels.formatDateWithFormat(this.selection[0], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
this.endDateLabel = this.labels.formatDateWithFormat(this.selection[1], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
if (fireEvents) {
switch (this.mode) {
case 'multiple':
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection);
this.model = this.selection;
break;
case 'range':
case 'week':
if (this.selection.filter(Boolean).length === 2) {
this.fireRangeSelect();
// Also, update the ngModel
const model = {
startDate: this.selection[0],
endDate: this.selection[1],
};
this._onChange(model);
this.model = model;
}
break;
case 'single':
default:
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection[0]);
this.model = this.selection[0];
break;
}
}
this.cdr.markForCheck();
}
eventData(date: Date) {
return {
year: date.getFullYear(),
month: this.labels.formatDateWithFormat(date, { month: 'long' }),
day: this.labels.formatDateWithFormat(date, { weekday: 'long' }),
date,
};
}
fireSelect() {
if (this.mode === 'multiple') {
this.onSelect.next(this.selection);
} else {
this.onSelect.next(this.eventData(this.selection[0]));
}
}
fireRangeSelect() {
// Make sure the start date is before the end date
if (this.selection.filter(Boolean).length === 2) {
const [start, end] = this.selection;
this.onSelect.next({
startDate: this.eventData(start),
endDate: this.eventData(end),
});
}
}
setToday() {
const tmp = new Date();
this.updateView(tmp);
this.updateSelection(Array.of(tmp));
}
| toggleRangeSelect | identifier_name |
|
DatePicker.ts | multi: true,
};
@Component({
selector: 'novo-date-picker',
providers: [DATE_PICKER_VALUE_ACCESSOR],
animations: [
trigger('startDateTextState', [
state(
'startDate',
style({
opacity: '1.0',
}),
),
state(
'endDate',
style({
opacity: '0.6',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('endDateTextState', [
state(
'startDate',
style({
opacity: '0.6',
}),
),
state(
'endDate',
style({
opacity: '1.0',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('indicatorState', [
state(
'startDate',
style({
transform: 'translateX(0%)',
}),
),
state(
'endDate',
style({
transform: 'translateX(100%)',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
],
template: `
<div class="date-picker-container">
<div class="date-range-tabs" *ngIf="range" [class.week-select-mode]="weekRangeSelect">
<span
class="range-tab"
(click)="toggleRangeSelect('startDate')"
[@startDateTextState]="rangeSelectMode"
data-automation-id="calendar-start-date"
>{{ startDateLabel }}</span
>
<span
class="range-tab"
(click)="toggleRangeSelect('endDate')"
[@endDateTextState]="rangeSelectMode"
data-automation-id="calendar-end-date"
>{{ endDateLabel }}</span
>
<i class="indicator" [@indicatorState]="rangeSelectMode"></i>
</div>
<novo-calendar
[activeDate]="activeDate"
[(selected)]="selection"
(selectedChange)="updateSelection($event)"
[mode]="mode"
[numberOfMonths]="numberOfMonths"
[weekStartsOn]="weekStart"
[disabledDateMessage]="disabledDateMessage"
[minDate]="start"
[maxDate]="end"
></novo-calendar>
<div class="calendar-footer" [hidden]="hideFooter">
<novo-button (click)="setToday()" class="today" size="small" data-automation-id="calendar-today">{{ labels.today }}</novo-button>
</div>
</div>
`,
styleUrls: ['./DatePicker.scss'],
})
export class NovoDatePickerElement implements ControlValueAccessor, OnInit {
/**
* The minimum year to allow selected in year select view
**/
@Input()
minYear: string | number;
/**
* The maximum year to allow selected in year select view
**/
@Input()
maxYear: string | number;
/**
* The minimum date that can be selected.
**/
@Input()
start: Date;
/**
* The maximum date that can be selected.
**/
@Input()
end: Date;
/**
* **Deprecated** Whether the date-picker is used outside of an overlay.
**/
@Input()
@BooleanInput()
inline: boolean;
/**
* Day of the week the calendar should display first, Sunday=0...Saturday=6
**/
@Input()
weekStart: Day = 0;
/**
* Certain dates that are already selected.
**/
@Input()
preselected: Date[] = [];
/**
* Whether the days for the previous and next month should be hidden.
**/
@Input()
@BooleanInput()
@HostBinding('class.hide-overflow-days')
public hideOverflowDays: boolean = false;
/**
* Whether the footer which contains `today` button should be hidden.
**/
@Input()
@BooleanInput()
public hideFooter: boolean = false;
@Input()
disabledDateMessage: string;
// Select callback for output
@Output()
onSelect: EventEmitter<any> = new EventEmitter(false);
_mode: DatePickerSelectModes = 'single';
_range: boolean;
_weekRangeSelect: boolean;
_numberOfMonths: number[] = [0];
/**
* Number of months to display at once.
* @default 1
**/
@Input()
get numberOfMonths(): number {
return this._numberOfMonths.length;
}
set numberOfMonths(value) {
this._numberOfMonths = Array.from(Array(Number(value)).keys());
}
/**
* How the date selection should work.
* @default single
**/
@Input()
get mode(): DatePickerSelectModes {
return this._mode;
}
set mode(value) {
if (this._mode !== value) {
this._mode = value;
}
}
/**
* **deprecated** please use `mode="range"`.
**/
@Input()
get range(): boolean {
return ['range', 'week'].includes(this.mode) || this._range;
}
set range(value) {
console.warn(`'range' property is deprecated, please use 'mode="range"'.`);
if (this._range !== value) {
this._range = value;
this.mode = 'range';
}
}
/**
* **deprecated** please use `mode="week"`.
**/
@Input()
get weekRangeSelect(): boolean {
return this._mode === 'week' || this._weekRangeSelect;
}
set weekRangeSelect(value) {
console.warn(`'weekRangeSelect' property is deprecated, please use 'mode="week"'.`);
if (this._weekRangeSelect !== value) {
this._weekRangeSelect = value;
this.mode = 'week';
}
}
// @HostBinding('style.width')
// get hb_width() {
// return this._sanitizer.bypassSecurityTrustStyle(`${this.numberOfMonths * 228}px`);
// }
model: modelTypes;
activeDate: Date;
_selection: Date[] = [];
preview: Date[] = [];
startDateLabel: string;
endDateLabel: string;
rangeSelectMode: rangeSelectModes = 'startDate';
_onChange: Function = () => {};
_onTouched: Function = () => {};
get selection(): Date[] {
return this._selection;
}
set selection(value) {
this._selection = value ? value.filter(isDate).map((d) => DateUtil.startOfDay(d)) : [];
}
constructor(
public labels: NovoLabelService,
private element: ElementRef,
private cdr: ChangeDetectorRef,
private _sanitizer: DomSanitizer,
) {}
ngOnInit() {
// Determine the year array
const now = new Date();
// Set labels
if (this.model) {
this.modelToSelection(this.model);
}
if (this.selection && this.selection.length) {
this.updateView(this.selection[0]);
}
}
updateView(date) {
const value: any = date ? new Date(date) : new Date();
this.activeDate = new Date(value);
}
updateSelection(selected: Date[], fireEvents = true) {
// Helpers.swallowEvent(event);
this.selection = selected;
this.startDateLabel = this.labels.formatDateWithFormat(this.selection[0], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
this.endDateLabel = this.labels.formatDateWithFormat(this.selection[1], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
if (fireEvents) {
switch (this.mode) {
case 'multiple':
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection);
this.model = this.selection;
break;
case 'range':
case 'week':
if (this.selection.filter(Boolean).length === 2) {
this.fireRangeSelect();
// Also, update the ngModel
const model = {
startDate: this.selection[0],
endDate: this.selection[1],
};
this._onChange(model);
this.model = model;
}
break;
case 'single':
default:
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection[0]);
this.model = this.selection[0];
break;
}
}
this.cdr.markForCheck();
}
eventData(date: Date) |
fireSelect() {
if (this.mode === 'multiple') {
this.onSelect.next(this.selection);
} else {
this.onSelect.next(this.eventData(this.selection[0]));
}
}
fireRangeSelect() {
// Make sure the start date is before the end date
if (this.selection.filter(Boolean).length === 2) {
const [start, end] = this.selection;
this.onSelect.next({
startDate: this.eventData(start),
endDate: this.eventData(end),
});
}
}
setToday() {
const tmp = new Date();
this.updateView(tmp | {
return {
year: date.getFullYear(),
month: this.labels.formatDateWithFormat(date, { month: 'long' }),
day: this.labels.formatDateWithFormat(date, { weekday: 'long' }),
date,
};
} | identifier_body |
DatePicker.ts | [(selected)]="selection"
(selectedChange)="updateSelection($event)"
[mode]="mode"
[numberOfMonths]="numberOfMonths"
[weekStartsOn]="weekStart"
[disabledDateMessage]="disabledDateMessage"
[minDate]="start"
[maxDate]="end"
></novo-calendar>
<div class="calendar-footer" [hidden]="hideFooter">
<novo-button (click)="setToday()" class="today" size="small" data-automation-id="calendar-today">{{ labels.today }}</novo-button>
</div>
</div>
`,
styleUrls: ['./DatePicker.scss'],
})
export class NovoDatePickerElement implements ControlValueAccessor, OnInit {
/**
* The minimum year to allow selected in year select view
**/
@Input()
minYear: string | number;
/**
* The maximum year to allow selected in year select view
**/
@Input()
maxYear: string | number;
/**
* The minimum date that can be selected.
**/
@Input()
start: Date;
/**
* The maximum date that can be selected.
**/
@Input()
end: Date;
/**
* **Deprecated** Whether the date-picker is used outside of an overlay.
**/
@Input()
@BooleanInput()
inline: boolean;
/**
* Day of the week the calendar should display first, Sunday=0...Saturday=6
**/
@Input()
weekStart: Day = 0;
/**
* Certain dates that are already selected.
**/
@Input()
preselected: Date[] = [];
/**
* Whether the days for the previous and next month should be hidden.
**/
@Input()
@BooleanInput()
@HostBinding('class.hide-overflow-days')
public hideOverflowDays: boolean = false;
/**
* Whether the footer which contains `today` button should be hidden.
**/
@Input()
@BooleanInput()
public hideFooter: boolean = false;
@Input()
disabledDateMessage: string;
// Select callback for output
@Output()
onSelect: EventEmitter<any> = new EventEmitter(false);
_mode: DatePickerSelectModes = 'single';
_range: boolean;
_weekRangeSelect: boolean;
_numberOfMonths: number[] = [0];
/**
* Number of months to display at once.
* @default 1
**/
@Input()
get numberOfMonths(): number {
return this._numberOfMonths.length;
}
set numberOfMonths(value) {
this._numberOfMonths = Array.from(Array(Number(value)).keys());
}
/**
* How the date selection should work.
* @default single
**/
@Input()
get mode(): DatePickerSelectModes {
return this._mode;
}
set mode(value) {
if (this._mode !== value) {
this._mode = value;
}
}
/**
* **deprecated** please use `mode="range"`.
**/
@Input()
get range(): boolean {
return ['range', 'week'].includes(this.mode) || this._range;
}
set range(value) {
console.warn(`'range' property is deprecated, please use 'mode="range"'.`);
if (this._range !== value) {
this._range = value;
this.mode = 'range';
}
}
/**
* **deprecated** please use `mode="week"`.
**/
@Input()
get weekRangeSelect(): boolean {
return this._mode === 'week' || this._weekRangeSelect;
}
set weekRangeSelect(value) {
console.warn(`'weekRangeSelect' property is deprecated, please use 'mode="week"'.`);
if (this._weekRangeSelect !== value) {
this._weekRangeSelect = value;
this.mode = 'week';
}
}
// @HostBinding('style.width')
// get hb_width() {
// return this._sanitizer.bypassSecurityTrustStyle(`${this.numberOfMonths * 228}px`);
// }
model: modelTypes;
activeDate: Date;
_selection: Date[] = [];
preview: Date[] = [];
startDateLabel: string;
endDateLabel: string;
rangeSelectMode: rangeSelectModes = 'startDate';
_onChange: Function = () => {};
_onTouched: Function = () => {};
get selection(): Date[] {
return this._selection;
}
set selection(value) {
this._selection = value ? value.filter(isDate).map((d) => DateUtil.startOfDay(d)) : [];
}
constructor(
public labels: NovoLabelService,
private element: ElementRef,
private cdr: ChangeDetectorRef,
private _sanitizer: DomSanitizer,
) {}
ngOnInit() {
// Determine the year array
const now = new Date();
// Set labels
if (this.model) {
this.modelToSelection(this.model);
}
if (this.selection && this.selection.length) {
this.updateView(this.selection[0]);
}
}
updateView(date) {
const value: any = date ? new Date(date) : new Date();
this.activeDate = new Date(value);
}
updateSelection(selected: Date[], fireEvents = true) {
// Helpers.swallowEvent(event);
this.selection = selected;
this.startDateLabel = this.labels.formatDateWithFormat(this.selection[0], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
this.endDateLabel = this.labels.formatDateWithFormat(this.selection[1], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
if (fireEvents) {
switch (this.mode) {
case 'multiple':
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection);
this.model = this.selection;
break;
case 'range':
case 'week':
if (this.selection.filter(Boolean).length === 2) {
this.fireRangeSelect();
// Also, update the ngModel
const model = {
startDate: this.selection[0],
endDate: this.selection[1],
};
this._onChange(model);
this.model = model;
}
break;
case 'single':
default:
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection[0]);
this.model = this.selection[0];
break;
}
}
this.cdr.markForCheck();
}
eventData(date: Date) {
return {
year: date.getFullYear(),
month: this.labels.formatDateWithFormat(date, { month: 'long' }),
day: this.labels.formatDateWithFormat(date, { weekday: 'long' }),
date,
};
}
fireSelect() {
if (this.mode === 'multiple') {
this.onSelect.next(this.selection);
} else {
this.onSelect.next(this.eventData(this.selection[0]));
}
}
fireRangeSelect() {
// Make sure the start date is before the end date
if (this.selection.filter(Boolean).length === 2) {
const [start, end] = this.selection;
this.onSelect.next({
startDate: this.eventData(start),
endDate: this.eventData(end),
});
}
}
setToday() {
const tmp = new Date();
this.updateView(tmp);
this.updateSelection(Array.of(tmp));
}
toggleRangeSelect(range: rangeSelectModes): void {
this.rangeSelectMode = range;
if (range === 'startDate' && this.selection.length) {
this.updateView(this.selection[0]);
}
if (range === 'endDate' && this.selection.length === 2) {
this.updateView(this.selection[1]);
}
}
modelToSelection(model: modelTypes) {
switch (this.mode) {
case 'multiple':
this.selection = model as Date[];
break;
case 'range':
case 'week':
this.setRangeSelection();
case 'single':
default:
this.selection = [model as Date];
break;
}
}
// ValueAccessor Functions
writeValue(model: modelTypes): void {
this.model = model;
if (this.mode === 'multiple') {
this.selection = this.model as Date[];
}
if (this.mode === 'range') {
this.setRangeSelection();
}
if (Helpers.isDate(model)) {
this.updateView(model);
this.modelToSelection(model);
} else if (Helpers.isString(model)) {
const date = DateUtil.parse(model as any);
if (isValid(date)) {
this.updateView(date);
this.modelToSelection(date);
}
}
}
setRangeSelection() {
if (this.model?.hasOwnProperty('startDate')) {
// coming from standalone date picker
const range = this.model as RangeModel;
this.selection = [range.startDate, range.endDate].filter(Boolean);
} else if (this.model?.hasOwnProperty('min')) {
// coming from data-table filter where model end date is the beginning of the next day
const range = this.model as DataTableRangeModel;
this.selection = [range.min, subDays(range.max, 1)].filter(Boolean);
}
}
| registerOnChange(fn: Function): void {
this._onChange = fn; | random_line_split |
|
cluster.go | starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial |
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
URL: "https://" + c.ControllerDomain(),
Key: c.ControllerKey,
GitHost: c.ClusterDomain + ":2222",
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf(" | {
continue
} | conditional_block |
cluster.go | (f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
| logf | identifier_name |
|
cluster.go | },
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command | {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false}, | identifier_body |
|
cluster.go | User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s |
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel, | random_line_split |
|
electron-oidc.ts | require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri, | nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function logout(
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') {
throw error;
}
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
}
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority) => | response_type: config.responseType,
scope: config.scope,
state: getRandomString(16), | random_line_split |
electron-oidc.ts | require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function | (
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') {
throw error;
}
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
}
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority) | logout | identifier_name |
electron-oidc.ts | require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function logout(
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> | state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') {
throw error;
}
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
}
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority) | {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope, | identifier_body |
electron-oidc.ts | require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function logout(
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') |
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
}
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority | {
throw error;
} | conditional_block |
clusterapi_utils.go | BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the
// CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected
// by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
return resource.ParseQuantity(val)
}
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func | (annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
}
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// | parseCPUCapacity | identifier_name |
clusterapi_utils.go | ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the
// CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected
// by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" |
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func parseCPUCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
}
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// | {
return resource.ParseQuantity(val)
} | conditional_block |
clusterapi_utils.go | " BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the | // by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
return resource.ParseQuantity(val)
}
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func parseCPUCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
}
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// machines to | // CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected | random_line_split |
clusterapi_utils.go | ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the
// CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected
// by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
return resource.ParseQuantity(val)
}
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func parseCPUCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string |
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// | {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
} | identifier_body |
analysisTools.py | 04-rgamma)*xval + rgamma*xval*xval)
# add systematic error for SPHEREx photometric extractions (in mag)
sysErr = 0.01 ## somewhat arbitrary, but realistic
return np.sqrt(err_rand**2 + sysErr**2)
# similar to getSPHERExSED, but already has noise-free static model
def getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, BrendanFile):
# Brendan's file with SPHEREx observations
mjdObs, waveObs, m5Obs = getBrendanSpectrum(dataDIR, BrendanFile)
# interpolate true mags to observed wavelengths
magTrueObs = np.interp(waveObs, wavSPH, magTrue)
## photometric errors
magErr = getPhotomErrors(magTrueObs, m5Obs)
# draw from a Gaussian distribution
dmNoise = np.random.normal(0, magErr)
## generate light curve offsets
dmOff = Amp*np.sin(2*np.pi*(mjdObs-mjd0)/Per)
# raw magnitudes: true mags with added variability and photometric noise
magRaw = magTrueObs + dmNoise + dmOff
return waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise
def getSPHERExSensitivity(dataDIR):
# read data from Olivier Dore's Point_Source_Sensitivity_v28_base_cbe.txt file
# wavelengthSPHEREx (in micron) is the first column
# m5SPHEREx (AB magnitude) is the second column
dataFile = dataDIR + 'Point_Source_Sensitivity_v28_base_cbe.txt'
SPHERExSensitivity = np.loadtxt(dataFile)
return SPHERExSensitivity.T[0], SPHERExSensitivity.T[1]
def getATMBusSED(SEDfile, waveSPH, BusTaxi, Dast, rAU, BusDIR):
# given ATM model, scale by Dast and interpolate to waveSPH
magTrue, eps, alb = getATMmodelMag(SEDfile, Dast, waveSPH)
# if requested, correct with Bus-DeMeo reflectivity curve
if (BusTaxi!=''):
# read reflectivity curve
file = BusDIR + "/" + "reflectivity" + BusTaxi + ".dat"
refldata = np.loadtxt(file, skiprows=1)
waveSPHrefl, reflectivity = refldata.T[0], refldata.T[1]
print('read in', file)
if (waveSPHrefl.size != waveSPH.size):
print('ERROR: different standard SPHEREx wavelength grids!')
# assumption is that emission is negligible below this wavelength
wavMinEm = 2.0 # micron, OK outside Earth's orbit
# compute and apply correction
magTrue += getBusAKARIMagCorr(waveSPH, reflectivity, magTrue, wavMinEm)
return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo) | return SPHmagAB, SPHeps, SPHalb
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append(d | random_line_split |
|
analysisTools.py | return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo)
return SPHmagAB, SPHeps, SPHalb
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append(dMJD)
print('season', i, ' Norb:', Norbits, ' Nobs=', Nobs)
print(' NoPairs=', NoPairs[i])
print(' dt=', dt)
print('No. of observations:', Nobs)
return Nobs
## select observations from a single season (zero-indexed!)
def selectSeasonSED(season, waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise):
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nseasons = len(MJDmin)
if (season > Nseasons):
print('there are only', Nseasons,' seasons, not', season)
return
Norbits=len(MJDmin[season])
mjdMinVal = MJDmin[season][0]
mjdMaxVal = MJDmax[season][Norbits-1]
wS = waveObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mjdS = mjdObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mRawS = magRaw[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mErrS = magErr[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmOffS = dmOff[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmNoiseS = dmNoise[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
return wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS
# given lambda from Brendon's file, return lambdaGrid which are
# the closest values in the standard SPHEREx wavelength grid
def getStandardLambda(waveBrandon, waveSPHEREx):
lambdaGrid = 0*waveBrandon
for i in range(0,len(waveBrandon)):
delLambda = np.abs(waveSPHEREx - waveBrandon[i])
lambdaGrid[i] = getClosest(delLambda, waveSPHEREx)[0][1]
return lambdaGrid
def getClosest(list1, list2):
zipped_pairs = zip(list1, list2)
return sorted(zipped_pairs)
def dumpSPHERExSED(MJD, wavelength, mag, magErr, dmVarOff, randNoise, filename):
np.savetxt(filename, (MJD, wavelength, mag, magErr, dmVarOff, randNoise))
return
def simSPHERExSpec(Dast, rAU, SEDfile, dataDIR, BusTaxi, LC, obsFile, ABrange='', outfilerootname=''):
## set defaults
if (ABrange==''):
ABrange = [15.0, 20.0]
if (outfilerootname==''):
| outfilerootname = './simSPHERExSpecDefault' | conditional_block |
|
analysisTools.py | 04-rgamma)*xval + rgamma*xval*xval)
# add systematic error for SPHEREx photometric extractions (in mag)
sysErr = 0.01 ## somewhat arbitrary, but realistic
return np.sqrt(err_rand**2 + sysErr**2)
# similar to getSPHERExSED, but already has noise-free static model
def getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, BrendanFile):
# Brendan's file with SPHEREx observations
mjdObs, waveObs, m5Obs = getBrendanSpectrum(dataDIR, BrendanFile)
# interpolate true mags to observed wavelengths
magTrueObs = np.interp(waveObs, wavSPH, magTrue)
## photometric errors
magErr = getPhotomErrors(magTrueObs, m5Obs)
# draw from a Gaussian distribution
dmNoise = np.random.normal(0, magErr)
## generate light curve offsets
dmOff = Amp*np.sin(2*np.pi*(mjdObs-mjd0)/Per)
# raw magnitudes: true mags with added variability and photometric noise
magRaw = magTrueObs + dmNoise + dmOff
return waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise
def getSPHERExSensitivity(dataDIR):
# read data from Olivier Dore's Point_Source_Sensitivity_v28_base_cbe.txt file
# wavelengthSPHEREx (in micron) is the first column
# m5SPHEREx (AB magnitude) is the second column
dataFile = dataDIR + 'Point_Source_Sensitivity_v28_base_cbe.txt'
SPHERExSensitivity = np.loadtxt(dataFile)
return SPHERExSensitivity.T[0], SPHERExSensitivity.T[1]
def getATMBusSED(SEDfile, waveSPH, BusTaxi, Dast, rAU, BusDIR):
# given ATM model, scale by Dast and interpolate to waveSPH
magTrue, eps, alb = getATMmodelMag(SEDfile, Dast, waveSPH)
# if requested, correct with Bus-DeMeo reflectivity curve
if (BusTaxi!=''):
# read reflectivity curve
file = BusDIR + "/" + "reflectivity" + BusTaxi + ".dat"
refldata = np.loadtxt(file, skiprows=1)
waveSPHrefl, reflectivity = refldata.T[0], refldata.T[1]
print('read in', file)
if (waveSPHrefl.size != waveSPH.size):
print('ERROR: different standard SPHEREx wavelength grids!')
# assumption is that emission is negligible below this wavelength
wavMinEm = 2.0 # micron, OK outside Earth's orbit
# compute and apply correction
magTrue += getBusAKARIMagCorr(waveSPH, reflectivity, magTrue, wavMinEm)
return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
|
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append | wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo)
return SPHmagAB, SPHeps, SPHalb | identifier_body |
analysisTools.py | Taxi!=''):
# read reflectivity curve
file = BusDIR + "/" + "reflectivity" + BusTaxi + ".dat"
refldata = np.loadtxt(file, skiprows=1)
waveSPHrefl, reflectivity = refldata.T[0], refldata.T[1]
print('read in', file)
if (waveSPHrefl.size != waveSPH.size):
print('ERROR: different standard SPHEREx wavelength grids!')
# assumption is that emission is negligible below this wavelength
wavMinEm = 2.0 # micron, OK outside Earth's orbit
# compute and apply correction
magTrue += getBusAKARIMagCorr(waveSPH, reflectivity, magTrue, wavMinEm)
return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo)
return SPHmagAB, SPHeps, SPHalb
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append(dMJD)
print('season', i, ' Norb:', Norbits, ' Nobs=', Nobs)
print(' NoPairs=', NoPairs[i])
print(' dt=', dt)
print('No. of observations:', Nobs)
return Nobs
## select observations from a single season (zero-indexed!)
def selectSeasonSED(season, waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise):
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nseasons = len(MJDmin)
if (season > Nseasons):
print('there are only', Nseasons,' seasons, not', season)
return
Norbits=len(MJDmin[season])
mjdMinVal = MJDmin[season][0]
mjdMaxVal = MJDmax[season][Norbits-1]
wS = waveObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mjdS = mjdObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mRawS = magRaw[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mErrS = magErr[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmOffS = dmOff[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmNoiseS = dmNoise[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
return wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS
# given lambda from Brendon's file, return lambdaGrid which are
# the closest values in the standard SPHEREx wavelength grid
def getStandardLambda(waveBrandon, waveSPHEREx):
lambdaGrid = 0*waveBrandon
for i in range(0,len(waveBrandon)):
delLambda = np.abs(waveSPHEREx - waveBrandon[i])
lambdaGrid[i] = getClosest(delLambda, waveSPHEREx)[0][1]
return lambdaGrid
def | getClosest | identifier_name |
|
timer.rs | <u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
}
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn | (&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 {
self.timer().shorts.get()
}
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static hil::time::Client>>,
}
// CC0 is used for capture
// CC1 is used for compare/interrupts
const ALARM_CAPTURE: usize = 0;
const ALARM_COMPARE: usize = 1;
const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE);
impl TimerAlarm {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm {
TimerAlarm {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn clear(&self) {
self.clear_alarm();
self.timer().task_clear.set(1);
}
pub fn clear_alarm(&self) {
self.timer().event_compare[ALARM_COMPARE].set(0);
self.disable_interrupts();
nvic::clear_pending(self.nvic);
}
pub fn set_client(&self, client | capture_to | identifier_name |
timer.rs | <u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => |
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn capture_to(&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 {
self.timer().shorts.get()
}
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static hil::time::Client>>,
}
// CC0 is used for capture
// CC1 is used for compare/interrupts
const ALARM_CAPTURE: usize = 0;
const ALARM_COMPARE: usize = 1;
const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE);
impl TimerAlarm {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm {
TimerAlarm {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn clear(&self) {
self.clear_alarm();
self.timer().task_clear.set(1);
}
pub fn clear_alarm(&self) {
self.timer().event_compare[ALARM_COMPARE].set(0);
self.disable_interrupts();
nvic::clear_pending(self.nvic);
}
pub fn set_client(&self, | {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
} | conditional_block |
timer.rs | <u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
}
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn capture_to(&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 |
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static hil::time::Client>>,
}
// CC0 is used for capture
// CC1 is used for compare/interrupts
const ALARM_CAPTURE: usize = 0;
const ALARM_COMPARE: usize = 1;
const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE);
impl TimerAlarm {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm {
TimerAlarm {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn clear(&self) {
self.clear_alarm();
self.timer().task_clear.set(1);
}
pub fn clear_alarm(&self) {
self.timer().event_compare[ALARM_COMPARE].set(0);
self.disable_interrupts();
nvic::clear_pending(self.nvic);
}
pub fn set_client(&self, | {
self.timer().shorts.get()
} | identifier_body |
timer.rs | //! This implementation provides a full-fledged Timer interface to
//! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock
//! timer system. It may be that the Tock timer system should be ultimately
//! placed on top of the RTC (from the low frequency clock). It's currently
//! implemented this way as a demonstration that it can be and because
//! the full RTC/clock interface hasn't been finalized yet.
//!
//! This approach should be rewritten, such that the timer system uses
//! the RTC from the low frequency clock (lower power) and the scheduler
//! uses the high frequency clock.
//!
//! Author: Philip Levis <[email protected]>
//! Date: August 18, 2016
use chip;
use core::cell::Cell;
use core::mem;
use kernel::common::VolatileCell;
use kernel::hil;
use nvic;
use peripheral_interrupts::NvicIdx;
#[repr(C, packed)]
struct Registers {
pub task_start: VolatileCell<u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
}
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn capture_to(&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 {
self.timer().shorts.get()
}
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx | random_line_split |
||
session_data.rs | /// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a 'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
}
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
| /// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType { | random_line_split |
|
session_data.rs | to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 |
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until | {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
} | conditional_block |
session_data.rs | Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone" , we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if !current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> | {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
} | identifier_body |
|
session_data.rs | to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct | {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
}
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one | ActiveBreakpoint | identifier_name |
render.rs | </span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn default() -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using 'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) | .iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value | {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))? | identifier_body |
render.rs | /// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn default() -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using 'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))?
.iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
| let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value::from("xyz"));
assert!(true);
}
| random_line_split |
|
render.rs | </span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn | () -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using 'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))?
.iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value | default | identifier_name |
quantity.go | return "" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "number"} }
// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
//
// Note about BinarySI:
// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between
// -1 and +1, it will be emitted as if q.Format were DecimalSI.
// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
// rounded up. (1.1i becomes 2i.)
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() {
return zeroBytes, nil
}
var rounded CanonicalValue
format := q.Format
switch format {
case DecimalExponent, DecimalSI:
case BinarySI:
if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
} else {
var exact bool
if rounded, exact = q.AsScale(0); !exact {
// Don't lose precision-- show as DecimalSI
format = DecimalSI
}
}
default:
format = DecimalExponent
}
// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
// one of the other formats.
switch format {
case DecimalExponent, DecimalSI:
number, exponent := q.AsCanonicalBytes(out)
suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
return number, suffix
default:
// format must be BinarySI
number, exponent := rounded.AsCanonicalBase1024Bytes(out)
suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
return number, suffix
}
}
// AsApproximateFloat64 returns a float64 representation of the quantity which may
// lose precision. If the value of the quantity is outside the range of a float64
// +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
if q.d.Dec != nil {
base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64()
exponent = int(-q.d.Dec.Scale())
} else {
base = float64(q.i.value)
exponent = int(q.i.scale)
}
if exponent == 0 {
return base
}
return base * math.Pow10(exponent)
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
if q.d.Dec != nil {
return 0, false
}
return q.i.AsInt64()
}
// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
func (q *Quantity) ToDec() *Quantity {
if q.d.Dec == nil {
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
}
return q
}
// AsDec returns the quantity as represented by a scaled inf.Dec.
func (q *Quantity) AsDec() *inf.Dec {
if q.d.Dec != nil {
return q.d.Dec
}
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
return q.d.Dec
}
// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
// allocation.
func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
if q.d.Dec != nil {
return q.d.AsCanonicalBytes(out)
}
return q.i.AsCanonicalBytes(out)
}
// IsZero returns true if the quantity is equal to zero.
func (q *Quantity) IsZero() bool {
if q.d.Dec != nil {
return q.d.Dec.Sign() == 0
}
return q.i.value == 0
}
// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
// quantity is greater than zero.
func (q *Quantity) Sign() int {
if q.d.Dec != nil {
return q.d.Dec.Sign()
}
return q.i.Sign()
}
// AsScale returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
return q.d.AsScale(scale)
}
return q.i.AsScale(scale)
}
// RoundUp updates the quantity to the provided scale, ensuring that the value is at
// least 1. False is returned if the rounding operation resulted in a loss of precision.
// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
func (q *Quantity) RoundUp(scale Scale) bool {
if q.d.Dec != nil {
q.s = ""
d, exact := q.d.AsScale(scale)
q.d = d
return exact
}
// avoid clearing the string value if we have already calculated it
if q.i.scale >= scale {
return true
}
q.s = ""
i, exact := q.i.AsScale(scale)
q.i = i
return exact
}
// Add adds the provide y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
func (q *Quantity) Add(y Quantity) {
q.s = ""
if q.d.Dec == nil && y.d.Dec == nil {
if q.i.value == 0 {
q.Format = y.Format
}
if q.i.Add(y.i) {
return
}
} else if q.IsZero() {
q.Format = y.Format
}
q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
}
// Sub subtracts the provided quantity from the current value in place. If the current
// value is zero, the format of the quantity will be updated to the format of y.
func (q *Quantity) Sub(y Quantity) {
q.s = ""
if q.IsZero() {
q.Format = y.Format
}
if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
return
}
q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
}
// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) Cmp(y Quantity) int {
if q.d.Dec == nil && y.d.Dec == nil {
return q.i.Cmp(y.i)
}
return q.AsDec().Cmp(y.AsDec())
}
// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) CmpInt64(y int64) int {
if q.d.Dec != nil {
return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
}
return q.i.Cmp(int64Amount{value: y})
}
// Neg sets quantity to be the negative value of itself.
func (q *Quantity) Neg() {
q.s = ""
if q.d.Dec == nil {
q.i.value = -q.i.value
return
}
q.d.Dec.Neg(q.d.Dec)
}
// Equal checks equality of two Quantities. This is useful for testing with
// cmp.Equal.
func (q Quantity) Equal(v Quantity) bool {
return q.Cmp(v) == 0
}
// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
// of most Quantity values.
const int64QuantityExpectedBytes = 18
// String formats the Quantity as a string, caching the result if not calculated.
// String is an expensive operation and caching this result significantly reduces the cost of
// normal parse / marshal operations on Quantity.
func (q *Quantity) String() string {
if q == nil {
return "<nil>"
}
if len(q.s) == 0 {
result := make([]byte, 0, int64QuantityExpectedBytes)
number, suffix := q.CanonicalizeBytes(result)
number = append(number, suffix...)
q.s = string(number)
}
return q.s
}
// MarshalJSON implements the json.Marshaller interface.
func (q Quantity) MarshalJSON() ([]byte, error) {
if len(q.s) > 0 | {
out := make([]byte, len(q.s)+2)
out[0], out[len(out)-1] = '"', '"'
copy(out[1:], q.s)
return out, nil
} | conditional_block |
|
quantity.go | amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
if base == 10 {
amount.SetScale(amount.Scale() + Scale(exponent).infScale())
} else if base == 2 {
// numericSuffix = 2 ** exponent
numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
ub := amount.UnscaledBig()
amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
}
// Cap at min/max bounds.
sign := amount.Sign()
if sign == -1 {
amount.Neg(amount)
}
// This rounds non-zero values up to the minimum representable value, under the theory that
// if you want some resources, you should get some resources, even if you asked for way too small
// of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
// the side effect of rounding values < .5n to zero.
if v, ok := amount.Unscaled(); v != int64(0) || !ok {
amount.Round(amount, Nano.infScale(), inf.RoundUp)
}
// The max is just a simple cap.
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
}
if sign == -1 {
amount.Neg(amount)
}
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// DeepCopy returns a deep-copy of the Quantity value. Note that the method
// receiver is a value, so we can mutate it in-place and return it.
func (q Quantity) DeepCopy() Quantity {
if q.d.Dec != nil {
tmp := &inf.Dec{}
q.d.Dec = tmp.Set(q.d.Dec)
}
return q
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ Quantity) OpenAPISchemaFormat() string { return "" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "number"} }
// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
//
// Note about BinarySI:
// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between
// -1 and +1, it will be emitted as if q.Format were DecimalSI.
// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
// rounded up. (1.1i becomes 2i.)
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() {
return zeroBytes, nil
}
var rounded CanonicalValue
format := q.Format
switch format {
case DecimalExponent, DecimalSI:
case BinarySI:
if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
} else {
var exact bool
if rounded, exact = q.AsScale(0); !exact {
// Don't lose precision-- show as DecimalSI
format = DecimalSI
}
}
default:
format = DecimalExponent
}
// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
// one of the other formats.
switch format {
case DecimalExponent, DecimalSI:
number, exponent := q.AsCanonicalBytes(out)
suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
return number, suffix
default:
// format must be BinarySI
number, exponent := rounded.AsCanonicalBase1024Bytes(out)
suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
return number, suffix
}
}
// AsApproximateFloat64 returns a float64 representation of the quantity which may
// lose precision. If the value of the quantity is outside the range of a float64
// +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
if q.d.Dec != nil {
base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64()
exponent = int(-q.d.Dec.Scale())
} else {
base = float64(q.i.value)
exponent = int(q.i.scale)
}
if exponent == 0 {
return base
}
return base * math.Pow10(exponent)
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
if q.d.Dec != nil {
return 0, false
}
return q.i.AsInt64()
}
// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
func (q *Quantity) ToDec() *Quantity {
if q.d.Dec == nil {
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
}
return q
}
// AsDec returns the quantity as represented by a scaled inf.Dec.
func (q *Quantity) AsDec() *inf.Dec {
if q.d.Dec != nil {
return q.d.Dec
}
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
return q.d.Dec
}
// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
// allocation.
func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
if q.d.Dec != nil {
return q.d.AsCanonicalBytes(out)
}
return q.i.AsCanonicalBytes(out)
}
// IsZero returns true if the quantity is equal to zero.
func (q *Quantity) IsZero() bool {
if q.d.Dec != nil {
return q.d.Dec.Sign() == 0
}
return q.i.value == 0
}
// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
// quantity is greater than zero.
func (q *Quantity) Sign() int {
if q.d.Dec != nil {
return q.d.Dec.Sign()
}
return q.i.Sign()
}
// AsScale returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
return q.d.AsScale(scale)
}
return q.i.AsScale(scale)
}
// RoundUp updates the quantity to the provided scale, ensuring that the value is at
// least 1. False is returned if the rounding operation resulted in a loss of precision.
// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
func (q *Quantity) RoundUp(scale Scale) bool {
if q.d.Dec != nil {
q.s = ""
d, exact := q.d.AsScale(scale)
q.d = d
return exact
}
// avoid clearing the string value if we have already calculated it
if q.i.scale >= scale {
return true
}
q.s = ""
i, exact := q.i.AsScale(scale)
q.i = i
return exact
}
// Add adds the provide y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
func (q *Quantity) Add(y Quantity) {
q.s = ""
if q.d.Dec == nil && y.d.Dec == nil {
if q.i.value == 0 {
q.Format = y.Format
}
if q.i.Add(y.i) {
return
}
} else if q.IsZero() {
q.Format = y.Format
}
q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
}
// Sub subtracts the provided quantity from the current value in place. If the current
// value is zero, the format of the quantity will be updated to the format of y.
func (q *Quantity) | Sub | identifier_name |
|
quantity.go |
const (
// splitREString is used to separate a number from its suffix; as such,
// this is overly permissive, but that's OK-- it will be checked later.
splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
)
var (
// Errors that could happen while parsing a string.
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
)
// parseQuantityString is a fast scanner for quantity values.
func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
positive = true
pos := 0
end := len(str)
// handle leading sign
if pos < end {
switch str[0] {
case '-':
positive = false
pos++
case '+':
pos++
}
}
// strip leading zeros
Zeroes:
for i := pos; ; i++ {
if i >= end {
num = "0"
value = num
return
}
switch str[i] {
case '0':
pos++
default:
break Zeroes
}
}
// extract the numerator
Num:
for i := pos; ; i++ {
if i >= end {
num = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
num = str[pos:i]
pos = i
break Num
}
}
// if we stripped all numerator positions, always return 0
if len(num) == 0 {
num = "0"
}
// handle a denominator
if pos < end && str[pos] == '.' {
pos++
Denom:
for i := pos; ; i++ {
if i >= end {
denom = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
denom = str[pos:i]
pos = i
break Denom
}
}
// TODO: we currently allow 1.G, but we may not want to in the future.
// if len(denom) == 0 {
// err = ErrFormatWrong
// return
// }
}
value = str[0:pos]
// grab the elements of the suffix
suffixStart := pos
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
pos = i
break
}
}
if pos < end {
switch str[pos] {
case '-', '+':
pos++
}
}
Suffix:
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
break Suffix
}
}
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
return
}
// ParseQuantity turns str into a Quantity, or returns an error.
func ParseQuantity(str string) (Quantity, error) {
if len(str) == 0 {
return Quantity{}, ErrFormatWrong
}
if str == "0" {
return Quantity{Format: DecimalSI, s: str}, nil
}
positive, value, num, denom, suf, err := parseQuantityString(str)
if err != nil {
return Quantity{}, err
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
if !ok {
return Quantity{}, ErrSuffix
}
precision := int32(0)
scale := int32(0)
mantissa := int64(1)
switch format {
case DecimalExponent, DecimalSI:
scale = exponent
precision = maxInt64Factors - int32(len(num)+len(denom))
case BinarySI:
scale = 0
switch {
case exponent >= 0 && len(denom) == 0:
// only handle positive binary numbers with the fast path
mantissa = int64(int64(mantissa) << uint64(exponent))
// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
default:
precision = -1
}
}
if precision >= 0 {
// if we have a denominator, shift the entire value to the left by the number of places in the
// denominator
scale -= int32(len(denom))
if scale >= int32(Nano) {
shifted := num + denom
var value int64
value, err := strconv.ParseInt(shifted, 10, 64)
if err != nil {
return Quantity{}, ErrNumeric
}
if result, ok := int64Multiply(value, int64(mantissa)); ok {
if !positive {
result = -result
}
// if the number is in canonical form, reuse the string
switch format {
case BinarySI:
if exponent%10 == 0 && (value&0x07 != 0) {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
default:
if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
}
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
}
}
}
amount := new(inf.Dec)
if _, ok := amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
if base == 10 {
amount.SetScale(amount.Scale() + Scale(exponent).infScale())
} else if base == 2 {
// numericSuffix = 2 ** exponent
numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
ub := amount.UnscaledBig()
amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
}
// Cap at min/max bounds.
sign := amount.Sign()
if sign == -1 {
amount.Neg(amount)
}
// This rounds non-zero values up to the minimum representable value, under the theory that
// if you want some resources, you should get some resources, even if you asked for way too small
// of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
// the side effect of rounding values < .5n to zero.
if v, ok := amount.Unscaled(); v != int64(0) || !ok {
amount.Round(amount, Nano.infScale(), inf.RoundUp)
}
// The max is just a simple cap.
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
}
if sign == -1 {
amount.Neg(amount)
}
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// DeepCopy returns a deep-copy of the Quantity value. Note that the method
// receiver is a value, so we can mutate it in-place and return it.
func (q Quantity) DeepCopy() Quantity {
if q.d.Dec != nil {
tmp := &inf.Dec{}
q.d.Dec = tmp.Set(q.d | {
q, err := ParseQuantity(str)
if err != nil {
panic(fmt.Errorf("cannot parse '%v': %v", str, err))
}
return q
} | identifier_body |
|
quantity.go | +]?[0-9]*)$"
)
var (
// Errors that could happen while parsing a string.
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
)
|
// handle leading sign
if pos < end {
switch str[0] {
case '-':
positive = false
pos++
case '+':
pos++
}
}
// strip leading zeros
Zeroes:
for i := pos; ; i++ {
if i >= end {
num = "0"
value = num
return
}
switch str[i] {
case '0':
pos++
default:
break Zeroes
}
}
// extract the numerator
Num:
for i := pos; ; i++ {
if i >= end {
num = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
num = str[pos:i]
pos = i
break Num
}
}
// if we stripped all numerator positions, always return 0
if len(num) == 0 {
num = "0"
}
// handle a denominator
if pos < end && str[pos] == '.' {
pos++
Denom:
for i := pos; ; i++ {
if i >= end {
denom = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
denom = str[pos:i]
pos = i
break Denom
}
}
// TODO: we currently allow 1.G, but we may not want to in the future.
// if len(denom) == 0 {
// err = ErrFormatWrong
// return
// }
}
value = str[0:pos]
// grab the elements of the suffix
suffixStart := pos
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
pos = i
break
}
}
if pos < end {
switch str[pos] {
case '-', '+':
pos++
}
}
Suffix:
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
break Suffix
}
}
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
return
}
// ParseQuantity turns str into a Quantity, or returns an error.
func ParseQuantity(str string) (Quantity, error) {
if len(str) == 0 {
return Quantity{}, ErrFormatWrong
}
if str == "0" {
return Quantity{Format: DecimalSI, s: str}, nil
}
positive, value, num, denom, suf, err := parseQuantityString(str)
if err != nil {
return Quantity{}, err
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
if !ok {
return Quantity{}, ErrSuffix
}
precision := int32(0)
scale := int32(0)
mantissa := int64(1)
switch format {
case DecimalExponent, DecimalSI:
scale = exponent
precision = maxInt64Factors - int32(len(num)+len(denom))
case BinarySI:
scale = 0
switch {
case exponent >= 0 && len(denom) == 0:
// only handle positive binary numbers with the fast path
mantissa = int64(int64(mantissa) << uint64(exponent))
// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
default:
precision = -1
}
}
if precision >= 0 {
// if we have a denominator, shift the entire value to the left by the number of places in the
// denominator
scale -= int32(len(denom))
if scale >= int32(Nano) {
shifted := num + denom
var value int64
value, err := strconv.ParseInt(shifted, 10, 64)
if err != nil {
return Quantity{}, ErrNumeric
}
if result, ok := int64Multiply(value, int64(mantissa)); ok {
if !positive {
result = -result
}
// if the number is in canonical form, reuse the string
switch format {
case BinarySI:
if exponent%10 == 0 && (value&0x07 != 0) {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
default:
if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
}
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
}
}
}
amount := new(inf.Dec)
if _, ok := amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
if base == 10 {
amount.SetScale(amount.Scale() + Scale(exponent).infScale())
} else if base == 2 {
// numericSuffix = 2 ** exponent
numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
ub := amount.UnscaledBig()
amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
}
// Cap at min/max bounds.
sign := amount.Sign()
if sign == -1 {
amount.Neg(amount)
}
// This rounds non-zero values up to the minimum representable value, under the theory that
// if you want some resources, you should get some resources, even if you asked for way too small
// of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
// the side effect of rounding values < .5n to zero.
if v, ok := amount.Unscaled(); v != int64(0) || !ok {
amount.Round(amount, Nano.infScale(), inf.RoundUp)
}
// The max is just a simple cap.
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
}
if sign == -1 {
amount.Neg(amount)
}
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// DeepCopy returns a deep-copy of the Quantity value. Note that the method
// receiver is a value, so we can mutate it in-place and return it.
func (q Quantity) DeepCopy() Quantity {
if q.d.Dec != nil {
tmp := &inf.Dec{}
q.d.Dec = tmp.Set(q.d.Dec)
}
return q
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ | // parseQuantityString is a fast scanner for quantity values.
func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
positive = true
pos := 0
end := len(str) | random_line_split |
generate_tiles.py | _data_rasterio
def get_files_from_folder(input_dir, extensions=None):
"""Method to get files from a folder with optional filter on extensions
Args:
input_dir: input folder
extensions (list or tuple): List of extensions to filter files (Default value = None)
Returns:
List of filepaths
"""
output = []
if extensions is None:
extensions = [""]
elif not isinstance(extensions, (tuple, list)):
extensions = [extensions]
for ext in extensions:
|
return output
@click.group()
def cli():
pass
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
| files = Path(input_dir).rglob("*{}".format(ext))
output.extend([f.as_posix() for f in files if f.is_file()]) | conditional_block |
generate_tiles.py |
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstSizeTiles, tile_size=tile_size, min_overlapping=min_overlapping, scale=scale)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
class EmptyBar(object):
def __init__(self, iterable=None, **kwargs):
self.iterable = iterable
def __enter__(self):
return self.iterable
def __exit__(self, *args, **kwargs):
pass
def update(self, n_steps):
pass
cli.add_command(run_const_stride_tiler, name="const_stride") | cli.add_command(run_const_size_tiler, name="const_size")
| random_line_split |
|
generate_tiles.py | read_data_rasterio
def get_files_from_folder(input_dir, extensions=None):
|
@click.group()
def cli():
pass
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
| """Method to get files from a folder with optional filter on extensions
Args:
input_dir: input folder
extensions (list or tuple): List of extensions to filter files (Default value = None)
Returns:
List of filepaths
"""
output = []
if extensions is None:
extensions = [""]
elif not isinstance(extensions, (tuple, list)):
extensions = [extensions]
for ext in extensions:
files = Path(input_dir).rglob("*{}".format(ext))
output.extend([f.as_posix() for f in files if f.is_file()])
return output | identifier_body |
generate_tiles.py | _data_rasterio
def | (input_dir, extensions=None):
"""Method to get files from a folder with optional filter on extensions
Args:
input_dir: input folder
extensions (list or tuple): List of extensions to filter files (Default value = None)
Returns:
List of filepaths
"""
output = []
if extensions is None:
extensions = [""]
elif not isinstance(extensions, (tuple, list)):
extensions = [extensions]
for ext in extensions:
files = Path(input_dir).rglob("*{}".format(ext))
output.extend([f.as_posix() for f in files if f.is_file()])
return output
@click.group()
def cli():
pass
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
| get_files_from_folder | identifier_name |
gridsummary.js |
v.afterMethod('render', this.refreshSummary, this);
v.afterMethod('refresh', this.refreshSummary, this);
v.afterMethod('onColumnWidthUpdated', this.doWidth, this);
v.afterMethod('onAllColumnWidthsUpdated', this.doAllWidths, this);
v.afterMethod('onColumnHiddenUpdated', this.doHidden, this);
grid.on('columnresize', this.refreshSummary, this);
grid.on('columnmove', this.refreshSummary, this);
grid.getColumnModel().on('hiddenchange', this.refreshSummary, this);
grid.on('resize', this.refreshSummary, this);
if (Ext.isGecko || Ext.isOpera) {
// restore gridview's horizontal scroll position when store data is changed
//
// TODO -- when sorting a column in Opera, the summary row's horizontal scroll position is
// synced with the gridview, but is displaced 1 vertical scrollbar width to the right
v.afterMethod('onDataChange', this.restoreGridHScroll, this);
}
grid.on({
bodyscroll : this.syncSummaryScroll,
beforedestroy : this.beforeDestroy,
scope : this
});
// update summary row on store's add/remove/clear/update events
grid.store.on({
add : this.refreshSummary,
remove : this.refreshSummary,
clear : this.refreshSummary,
update : this.refreshSummary,
scope : this
});
if (!this.rowTpl) {
this.rowTpl = new Ext.Template(
'<div class="x-grid3-summary-row x-grid3-gridsummary-row-offset">',
'<table class="x-grid3-summary-table" border="0" cellspacing="0" cellpadding="0" style="{tstyle}">',
'<tbody><tr>{cells}</tr></tbody>',
'</table>',
'</div>'
);
this.rowTpl.disableFormats = true;
}
this.rowTpl.compile();
if (!this.cellTpl) {
this.cellTpl = new Ext.Template(
'<td class="x-grid3-col x-grid3-cell x-grid3-td-{id} {css}" style="{style}">',
'<div class="x-grid3-cell-inner x-grid3-col-{id}" unselectable="on" {attr}>{value}</div>',
"</td>"
);
this.cellTpl.disableFormats = true;
}
this.cellTpl.compile();
},
// private
calculate : function(rs, cm) {
var data = {},
cfg = cm.config,
i, len, cf, cname, j, jlen, r;
for (i = 0, len = cfg.length; i < len; i++) { // loop through all columns in ColumnModel
cf = cfg[i]; // get column's configuration
cname = cf.dataIndex; // get column dataIndex
// initialise grid summary row data for
// the current column being worked on
data[cname] = 0;
if (cf.summaryType) {
for (j = 0, jlen = rs.length; j < jlen; j++) {
r = rs[j]; // get a single Record
data[cname] = Ext.ux.grid.GridSummary.Calculations[cf.summaryType](r.get(cname), r, cname, data, j);
}
}
}
return data;
},
// private
onLayout : function(vw, vh) { // note: this method is scoped to the GridView
if (typeof(vh) != 'number') { // handles grid's height:'auto' config
return;
}
if (!this.grid.getGridEl().hasClass('x-grid3-hide-gridsummary')) {
// readjust gridview's height only if grid summary row is visible
this.scroller.setHeight(vh - this.summaryWrap.getHeight());
}
},
// private
syncScroll : function(refEl, scrollEl, currX, currY) {
currX = currX || refEl.scrollLeft;
currY = currY || refEl.scrollTop;
if (this.oldX != currX) { // only adjust horizontal scroll when horizontal scroll is detected
scrollEl.scrollLeft = currX;
scrollEl.scrollLeft = currX; // second time for IE (1/2 the time first call fails. other browsers simply ignore repeated calls)
}
// remember current scroll position
this.oldX = currX;
this.oldY = currY;
},
// private
syncSummaryScroll : function(currX, currY) {
var v = this.view,
y = this.oldY;
if (
// workaround for Gecko's horizontal-scroll reset bug
// (see unresolved mozilla bug: https://bugzilla.mozilla.org/show_bug.cgi?id=386444
// "using vertical scrollbar changes horizontal scroll position with overflow-x:hidden and overflow-y:scroll")
Ext.isGecko && // 1) <div>s with overflow-x:hidden have their DOM.scrollLeft property set to 0 when scrolling vertically
currX === 0 && // 2) current x-ordinate is now zero
this.oldX > 0 && // 3) gridview is not at x=0 ordinate
(y !== currY || y === 0) // 4) vertical scroll detected / vertical scrollbar is moved rapidly all the way to the top
) {
this.restoreGridHScroll();
} else {
this.syncScroll(v.scroller.dom, v.summaryWrap.dom, currX, currY);
}
},
// private
restoreGridHScroll : function() {
// restore gridview's original x-ordinate
// (note: this causes an unvoidable flicker in the gridview)
this.view.scroller.dom.scrollLeft = this.oldX || 0;
},
// private
syncGridHScroll : function() {
var v = this.view;
this.syncScroll(v.summaryWrap.dom, v.scroller.dom);
},
// private
doWidth : function(col, w, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild;
fc.style.width = tw;
fc.rows[0].childNodes[col].style.width = w;
this.updateSummaryWidth();
},
// private
doAllWidths : function(ws, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
cells = fc.rows[0].childNodes,
wlen = ws.length,
j;
fc.style.width = tw;
for (j = 0; j < wlen; j++) {
cells[j].style.width = ws[j];
}
this.updateSummaryWidth();
},
// private
doHidden : function(col, hidden, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
display = hidden ? 'none' : '';
fc.style.width = tw;
fc.rows[0].childNodes[col].style.display = display;
this.updateSummaryWidth();
},
// private
getGridHeader : function() {
if (!this.gridHeader) {
this.gridHeader = this.view.mainHd.child('.x-grid3-header-offset');
}
return this.gridHeader;
},
// private
updateSummaryWidth : function() {
// all browsers add a 1 pixel space between the edges of the vert. and hori. scrollbars,
// so subtract one from the grid header width before setting the summary row's width
//kirov this.getSummaryNode().setWidth(this.getGridHeader().getWidth() - 1);
if (this.getSummaryNode()) {
this.getSummaryNode().setWidth(this.view.getTotalWidth()); //kirov
}
// kirov
if (Ext.isIE) {
var elWidth = this.grid.getGridEl().getSize().width;
if (this.grid.getColumnModel().getTotalWidth()+this.view.getScrollOffset() > elWidth){
//console.log('scroll');
//debugger;
this.view.summaryWrap.dom.style['overflow-y'] = 'hidden';
this.view.summaryWrap.setHeight(((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth) + 18 /* 18 = row-expander height */));
} else {
this.view.summaryWrap.dom.style['overflow-y'] = 'visible';
this.view.summaryWrap.setHeight((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth));
}
}
},
// private
renderSummary : function(o, cs, cm) {
cs = cs || this.view.getColumnData();
var cfg = cm.config,
buf = [],
last = cs.length - 1,
i, len, c, cf, p;
for (i = 0, len = cs.length; i < len; i++) {
c = cs[i];
cf = cfg[i];
p = {};
p.id = c.id;
p.style = c.style;
p.css = i === 0 ? 'x-grid3-cell-first ' : (i == last ? 'x-grid3-cell-last ' : '');
if (cf.summaryType || cf.summaryRenderer) {
p.value = (cf.summaryRenderer || | this.toggleGridHScroll(false);
}, this, { delay: 10 });
} else {
v.afterMethod('render', this.toggleGridHScroll, this);
} | random_line_split |
|
gridsummary.js | ().on('hiddenchange', this.refreshSummary, this);
grid.on('resize', this.refreshSummary, this);
if (Ext.isGecko || Ext.isOpera) {
// restore gridview's horizontal scroll position when store data is changed
//
// TODO -- when sorting a column in Opera, the summary row's horizontal scroll position is
// synced with the gridview, but is displaced 1 vertical scrollbar width to the right
v.afterMethod('onDataChange', this.restoreGridHScroll, this);
}
grid.on({
bodyscroll : this.syncSummaryScroll,
beforedestroy : this.beforeDestroy,
scope : this
});
// update summary row on store's add/remove/clear/update events
grid.store.on({
add : this.refreshSummary,
remove : this.refreshSummary,
clear : this.refreshSummary,
update : this.refreshSummary,
scope : this
});
if (!this.rowTpl) {
this.rowTpl = new Ext.Template(
'<div class="x-grid3-summary-row x-grid3-gridsummary-row-offset">',
'<table class="x-grid3-summary-table" border="0" cellspacing="0" cellpadding="0" style="{tstyle}">',
'<tbody><tr>{cells}</tr></tbody>',
'</table>',
'</div>'
);
this.rowTpl.disableFormats = true;
}
this.rowTpl.compile();
if (!this.cellTpl) {
this.cellTpl = new Ext.Template(
'<td class="x-grid3-col x-grid3-cell x-grid3-td-{id} {css}" style="{style}">',
'<div class="x-grid3-cell-inner x-grid3-col-{id}" unselectable="on" {attr}>{value}</div>',
"</td>"
);
this.cellTpl.disableFormats = true;
}
this.cellTpl.compile();
},
// private
calculate : function(rs, cm) {
var data = {},
cfg = cm.config,
i, len, cf, cname, j, jlen, r;
for (i = 0, len = cfg.length; i < len; i++) { // loop through all columns in ColumnModel
cf = cfg[i]; // get column's configuration
cname = cf.dataIndex; // get column dataIndex
// initialise grid summary row data for
// the current column being worked on
data[cname] = 0;
if (cf.summaryType) {
for (j = 0, jlen = rs.length; j < jlen; j++) {
r = rs[j]; // get a single Record
data[cname] = Ext.ux.grid.GridSummary.Calculations[cf.summaryType](r.get(cname), r, cname, data, j);
}
}
}
return data;
},
// private
onLayout : function(vw, vh) { // note: this method is scoped to the GridView
if (typeof(vh) != 'number') |
if (!this.grid.getGridEl().hasClass('x-grid3-hide-gridsummary')) {
// readjust gridview's height only if grid summary row is visible
this.scroller.setHeight(vh - this.summaryWrap.getHeight());
}
},
// private
syncScroll : function(refEl, scrollEl, currX, currY) {
currX = currX || refEl.scrollLeft;
currY = currY || refEl.scrollTop;
if (this.oldX != currX) { // only adjust horizontal scroll when horizontal scroll is detected
scrollEl.scrollLeft = currX;
scrollEl.scrollLeft = currX; // second time for IE (1/2 the time first call fails. other browsers simply ignore repeated calls)
}
// remember current scroll position
this.oldX = currX;
this.oldY = currY;
},
// private
syncSummaryScroll : function(currX, currY) {
var v = this.view,
y = this.oldY;
if (
// workaround for Gecko's horizontal-scroll reset bug
// (see unresolved mozilla bug: https://bugzilla.mozilla.org/show_bug.cgi?id=386444
// "using vertical scrollbar changes horizontal scroll position with overflow-x:hidden and overflow-y:scroll")
Ext.isGecko && // 1) <div>s with overflow-x:hidden have their DOM.scrollLeft property set to 0 when scrolling vertically
currX === 0 && // 2) current x-ordinate is now zero
this.oldX > 0 && // 3) gridview is not at x=0 ordinate
(y !== currY || y === 0) // 4) vertical scroll detected / vertical scrollbar is moved rapidly all the way to the top
) {
this.restoreGridHScroll();
} else {
this.syncScroll(v.scroller.dom, v.summaryWrap.dom, currX, currY);
}
},
// private
restoreGridHScroll : function() {
// restore gridview's original x-ordinate
// (note: this causes an unvoidable flicker in the gridview)
this.view.scroller.dom.scrollLeft = this.oldX || 0;
},
// private
syncGridHScroll : function() {
var v = this.view;
this.syncScroll(v.summaryWrap.dom, v.scroller.dom);
},
// private
doWidth : function(col, w, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild;
fc.style.width = tw;
fc.rows[0].childNodes[col].style.width = w;
this.updateSummaryWidth();
},
// private
doAllWidths : function(ws, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
cells = fc.rows[0].childNodes,
wlen = ws.length,
j;
fc.style.width = tw;
for (j = 0; j < wlen; j++) {
cells[j].style.width = ws[j];
}
this.updateSummaryWidth();
},
// private
doHidden : function(col, hidden, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
display = hidden ? 'none' : '';
fc.style.width = tw;
fc.rows[0].childNodes[col].style.display = display;
this.updateSummaryWidth();
},
// private
getGridHeader : function() {
if (!this.gridHeader) {
this.gridHeader = this.view.mainHd.child('.x-grid3-header-offset');
}
return this.gridHeader;
},
// private
updateSummaryWidth : function() {
// all browsers add a 1 pixel space between the edges of the vert. and hori. scrollbars,
// so subtract one from the grid header width before setting the summary row's width
//kirov this.getSummaryNode().setWidth(this.getGridHeader().getWidth() - 1);
if (this.getSummaryNode()) {
this.getSummaryNode().setWidth(this.view.getTotalWidth()); //kirov
}
// kirov
if (Ext.isIE) {
var elWidth = this.grid.getGridEl().getSize().width;
if (this.grid.getColumnModel().getTotalWidth()+this.view.getScrollOffset() > elWidth){
//console.log('scroll');
//debugger;
this.view.summaryWrap.dom.style['overflow-y'] = 'hidden';
this.view.summaryWrap.setHeight(((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth) + 18 /* 18 = row-expander height */));
} else {
this.view.summaryWrap.dom.style['overflow-y'] = 'visible';
this.view.summaryWrap.setHeight((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth));
}
}
},
// private
renderSummary : function(o, cs, cm) {
cs = cs || this.view.getColumnData();
var cfg = cm.config,
buf = [],
last = cs.length - 1,
i, len, c, cf, p;
for (i = 0, len = cs.length; i < len; i++) {
c = cs[i];
cf = cfg[i];
p = {};
p.id = c.id;
p.style = c.style;
p.css = i === 0 ? 'x-grid3-cell-first ' : (i == last ? 'x-grid3-cell-last ' : '');
if (cf.summaryType || cf.summaryRenderer) {
p.value = (cf.summaryRenderer || c.renderer)(o.data[c.name], p, o);
} else {
p.value = '';
}
if (p.value === undefined || p.value === "") {
p.value = " ";
}
buf[buf.length] = this.cellTpl.apply(p);
}
return this.rowTpl.apply({
tstyle: 'width:' + this.view.getTotalWidth() + ';',
cells: buf.join('')
});
},
// private
refreshSummary : function() {
var g = this.grid,
ds = g.store,
cs = this.view.getColumnData(),
cm = g.getColumnModel(),
rs = ds.getRange(),
| { // handles grid's height:'auto' config
return;
} | conditional_block |
output.rs | Tee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else |
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
| {
for item in iter {
self.push(item)?;
}
Ok(true)
} | conditional_block |
output.rs | new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut(); | Session::new(&mut self.inner, matched, self.batch_size, ca)
} | random_line_split |
|
output.rs | Tee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> |
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
| {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
} | identifier_body |
output.rs | Tee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn | (&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
| as_any_mut | identifier_name |
main.rs | 2) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
} | fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f6 | }
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
| random_line_split |
main.rs | 0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx | {
break;
} | conditional_block |
|
main.rs | _range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool | {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
} | identifier_body |
|
main.rs | triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn | new | identifier_name |
|
gossip.rs | pair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else |
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests | {
trace!("not converged {} {} {}", i, total + num, num * num);
} | conditional_block |
gossip.rs | pair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number"); | 10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(& |
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts( | random_line_split |
gossip.rs | pair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn | () {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests | gossip_ring | identifier_name |
gossip.rs | pair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) | test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(& | {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service, | identifier_body |
decrypt.py | _decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une liste qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(lettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
return texte_str
# print(substituer(texte2))
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
for lettre in texte:
a = False
i = 0
if lettre == " " or lettre == ":" or lettre == "," or lettre == "?" or lettre == "." or lettre == "2" or lettre == "6" or lettre == "'":
nouveau_texte.append(lettre)
else:
while a == False:
if lettre == alphabet_francais[i][1]:
nouveau_texte.append(alphabet[i])
a = True
else:
i += 1
if i == 26:
i = 0
texte_sub = str_convert(nouveau_texte)
return texte_sub
texte2_decode = "le prochain fichier est code par un mot de passe de taille inconnu et contient l'indice. les lettres du mot de passe permettent de décaler les lettres du message original modulo 26. seules les lettres de a a z sont chiffrees."
# print(decode_substitution(texte2, alphabet_decode))
def position_lettre(lettre):
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(alphabet_liste)):
if lettre == alphabet_liste[i]:
return i
def decaler_les_lettres(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
a = 0
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_sans_espace(texte, clef):
| liste_texte = list(texte)
| identifier_name |
|
decrypt.py | _clef] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in possibilite_clef for x in a]
return a
def brute_force_cesar(texte_a_trouver):
"""Trouve une clé longue de 1 et une suite de caractères qui
correspondent au texte à trouver. Pas sûr de l'idée."""
alphabet = "abcdefghijklmnopqrstuvwxyz :,?.0123456789'"
# Tous les caractères possibles / vus dans les textes à décoder
liste_car = []
# Liste vide qui contiendra les combinaisons de caractères possibles
texte_test = ""
# Texte codé à comparé avec le texte initial
l = 0 # Index de liste_car
m = 0 # Index de la clef
t = 1 # Taille clef
clef = creer_liste_clef(t)
for i in range(len(texte_a_trouver)):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in alphabet] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in | ar + a # On ajoute ce qu'on a trouvé à notre liste
while texte_test != texte_a_trouver:
# Tant qu'on code pas pareil que ce qu'on cherche
texte_test = chiffre_deux(str(liste_car[l]), clef)
# On teste l'encodage avec le texte et la clef actuels
l += 1 # On regarde le caractère suivant
if l >= len(liste_car): # Ne pas aller out of range
l = 0
m += 1 # On change la clef
if m == 256:
t += 1
clef = creer_liste_clef(t)
m += -1
entree_cle.insert(0, clef[m])
return ord(clef[m])
racine=tk.Tk()
racine.title("Cryptographie")
entree_texte = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_texte.grid(row = 0, column = 0)
entree_cle = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_cle.grid(row = 1, column = 0)
label_texte = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer le message ici.")
label_texte.grid(row = 0, column = 1)
label_cle = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer la clé ici.")
label_cle.grid(row = 1, column = 1)
bouton_coder=tk.Button(racine, text="Chiffrer texte",fg="black", width=15, command=chiffre)
bouton_coder.grid(row=2, column=0)
bouton_decoder=tk.Button(racine,text="Déchiffrer texte",fg="black", width=15,command=dechiffre)
bouton_decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une liste qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(lettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
return texte_str
# print(substituer(texte2))
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
| alphabet for x in a]
# On ajoute chaque caractère à chaque caractère
# (pas sûr de cette phrase -_-)
liste_car = liste_c | conditional_block |
decrypt.py | _clef] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in possibilite_clef for x in a]
return a
def brute_force_cesar(texte_a_trouver):
"""Trouve une clé longue de 1 et une suite de caractères qui
correspondent au texte à trouver. Pas sûr de l'idée."""
alphabet = "abcdefghijklmnopqrstuvwxyz :,?.0123456789'"
# Tous les caractères possibles / vus dans les textes à décoder
liste_car = []
# Liste vide qui contiendra les combinaisons de caractères possibles
texte_test = ""
# Texte codé à comparé avec le texte initial
l = 0 # Index de liste_car
m = 0 # Index de la clef
t = 1 # Taille clef
clef = creer_liste_clef(t)
for i in range(len(texte_a_trouver)):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in alphabet] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in alphabet for x in a]
# On ajoute chaque caractère à chaque caractère
# (pas sûr de cette phrase -_-)
liste_car = liste_car + a # On ajoute ce qu'on a trouvé à notre liste
while texte_test != texte_a_trouver:
# Tant qu'on code pas pareil que ce qu'on cherche
texte_test = chiffre_deux(str(liste_car[l]), clef)
# On teste l'encodage avec le texte et la clef actuels
l += 1 # On regarde le caractère suivant
if l >= len(liste_car): # Ne pas aller out of range
l = 0
m += 1 # On change la clef
if m == 256:
t += 1
clef = creer_liste_clef(t)
m += -1
entree_cle.insert(0, clef[m])
return ord(clef[m])
racine=tk.Tk()
racine.title("Cryptographie")
entree_texte = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_texte.grid(row = 0, column = 0)
entree_cle = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_cle.grid(row = 1, column = 0)
label_texte = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer le message ici.")
label_texte.grid(row = 0, column = 1)
label_cle = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer la clé ici.")
label_cle.grid(row = 1, column = 1)
bouton_coder=tk.Button(racine, text="Chiffrer texte",fg="black", width=15, command=chiffre)
bouton_coder.grid(row=2, column=0)
bouton_decoder=tk.Button(racine,text="Déchiffrer texte",fg="black", width=15,command=dechiffre)
bouton_decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une liste qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(lettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
|
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
| return texte_str
# print(substituer(texte2))
| random_line_split |
decrypt.py | _clef] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in possibilite_clef for x in a]
return a
def brute_force_cesar(texte_a_trouver):
"""Trouve une clé longue de 1 et une suite de caractères qui
correspondent au texte à trouver. Pas sûr de l'idée."""
alphabet = "abcdefghijklmnopqrstuvwxyz :,?.0123456789'"
# Tous les caractères possibles / vus dans les textes à décoder
liste_car = []
# Liste vide qui contiendra les combinaisons de caractères possibles
texte_test = ""
# Texte codé à comparé avec le texte initial
l = 0 # Index de liste_car
m = 0 # Index de la clef
t = 1 # Taille clef
clef = creer_liste_clef(t)
for i in range(len(texte_a_trouver)):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in alphabet] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in alphabet for x in a]
# On ajoute chaque caractère à chaque caractère
# (pas sûr de cette phrase -_-)
liste_car = liste_car + a # On ajoute ce qu'on a trouvé à notre liste
while texte_test != texte_a_trouver:
# Tant qu'on code pas pareil que ce qu'on cherche
texte_test = chiffre_deux(str(liste_car[l]), clef)
# On teste l'encodage avec le texte et la clef actuels
l += 1 # On regarde le caractère suivant
if l >= len(liste_car): # Ne pas aller out of range
l = 0
m += 1 # On change la clef
if m == 256:
t += 1
clef = creer_liste_clef(t)
m += -1
entree_cle.insert(0, clef[m])
return ord(clef[m])
racine=tk.Tk()
racine.title("Cryptographie")
entree_texte = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_texte.grid(row = 0, column = 0)
entree_cle = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_cle.grid(row = 1, column = 0)
label_texte = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer le message ici.")
label_texte.grid(row = 0, column = 1)
label_cle = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer la clé ici.")
label_cle.grid(row = 1, column = 1)
bouton_coder=tk.Button(racine, text="Chiffrer texte",fg="black", width=15, command=chiffre)
bouton_coder.grid(row=2, column=0)
bouton_decoder=tk.Button(racine,text="Déchiffrer texte",fg="black", width=15,command=dechiffre)
bouton_decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une lis | ettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
return texte_str
# print(substituer(texte2))
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
| te qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(l | identifier_body |
dashboard.go | forceYes); err != nil {
return err
}
warnIfNotLoopback(metabaseListenAddress)
if err := disclaimer(&forceYes); err != nil {
return err
}
dockerGroup, err := checkGroups(&forceYes)
if err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
if err := mb.DumpConfig(metabaseConfigPath); err != nil {
return err
}
log.Infof("Metabase is ready")
fmt.Println()
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmdDashSetup.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "[email protected]", "metabase user")
cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
return cmdDashSetup
}
func NewDashboardStartCmd() *cobra.Command {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
}
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func NewDashboardRemoveCmd() *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped & removed", metabaseContainerID)
}
log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil {
log.Warnf("failed to remove metabase internal db : %s", err)
}
if force {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
if err := metabase.RemoveImageContainer(m.Config.Image); err != nil {
if !strings.Contains(err.Error(), "No such image") {
return fmt.Errorf("removing docker image: %s", err)
}
}
}
return nil
},
}
cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashRemove
}
func passwordIsValid(password string) bool {
hasDigit := false
for _, j := range password {
if unicode.IsDigit(j) {
hasDigit = true
break
}
}
if !hasDigit || len(password) < 6 {
return false
}
return true
}
func checkSystemMemory(forceYes *bool) error {
totMem := memory.TotalMemory()
if totMem >= uint64(math.Pow(2, 30)) {
return nil
}
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Metabase requires 1-2GB of RAM, your system is below this requirement continue ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about RAM check: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
return nil
}
log.Warn("Metabase requires 1-2GB of RAM, your system is below this requirement")
return nil
}
func warnIfNotLoopback(addr string) {
if addr == "127.0.0.1" || addr == "::1" | {
return
} | conditional_block |
|
dashboard.go | metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile)
if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil {
return err
}
if err := require.DB(csConfig); err != nil {
return err
}
/*
Old container name was "/crowdsec-metabase" but podman doesn't
allow '/' in container name. We do this check to not break
existing dashboard setup.
*/
if !metabase.IsContainerExist(metabaseContainerID) {
oldContainerID := fmt.Sprintf("/%s", metabaseContainerID)
if metabase.IsContainerExist(oldContainerID) {
metabaseContainerID = oldContainerID
}
}
return nil
},
}
cmdDashboard.AddCommand(NewDashboardSetupCmd())
cmdDashboard.AddCommand(NewDashboardStartCmd())
cmdDashboard.AddCommand(NewDashboardStopCmd())
cmdDashboard.AddCommand(NewDashboardShowPasswordCmd())
cmdDashboard.AddCommand(NewDashboardRemoveCmd())
return cmdDashboard
}
func NewDashboardSetupCmd() *cobra.Command {
var force bool
var cmdDashSetup = &cobra.Command{
Use: "setup",
Short: "Setup a metabase container.",
Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard setup --listen 0.0.0.0
cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
`,
RunE: func(cmd *cobra.Command, args []string) error {
if metabaseDbPath == "" {
metabaseDbPath = csConfig.ConfigPaths.DataDir
}
if metabasePassword == "" {
isValid := passwordIsValid(metabasePassword)
for !isValid {
metabasePassword = generatePassword(16)
isValid = passwordIsValid(metabasePassword)
}
}
if err := checkSystemMemory(&forceYes); err != nil {
return err
}
warnIfNotLoopback(metabaseListenAddress)
if err := disclaimer(&forceYes); err != nil {
return err
}
dockerGroup, err := checkGroups(&forceYes)
if err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
if err := mb.DumpConfig(metabaseConfigPath); err != nil {
return err
}
log.Infof("Metabase is ready")
fmt.Println()
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmdDashSetup.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "[email protected]", "metabase user")
cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
return cmdDashSetup
}
func NewDashboardStartCmd() *cobra.Command {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
}
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func | () *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped | NewDashboardRemoveCmd | identifier_name |
dashboard.go | DashboardStartCmd() *cobra.Command {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
}
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func NewDashboardRemoveCmd() *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped & removed", metabaseContainerID)
}
log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil {
log.Warnf("failed to remove metabase internal db : %s", err)
}
if force {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
if err := metabase.RemoveImageContainer(m.Config.Image); err != nil {
if !strings.Contains(err.Error(), "No such image") {
return fmt.Errorf("removing docker image: %s", err)
}
}
}
return nil
},
}
cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashRemove
}
func passwordIsValid(password string) bool {
hasDigit := false
for _, j := range password {
if unicode.IsDigit(j) {
hasDigit = true
break
}
}
if !hasDigit || len(password) < 6 {
return false
}
return true
}
func checkSystemMemory(forceYes *bool) error {
totMem := memory.TotalMemory()
if totMem >= uint64(math.Pow(2, 30)) {
return nil
}
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Metabase requires 1-2GB of RAM, your system is below this requirement continue ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about RAM check: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
return nil
}
log.Warn("Metabase requires 1-2GB of RAM, your system is below this requirement")
return nil
}
func warnIfNotLoopback(addr string) {
if addr == "127.0.0.1" || addr == "::1" {
return
}
log.Warnf("You are potentially exposing your metabase port to the internet (addr: %s), please consider using a reverse proxy", addr)
}
func disclaimer(forceYes *bool) error {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "CrowdSec takes no responsibility for the security of your metabase instance. Do you accept these responsibilities ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to responsibilities")
}
return nil
}
log.Warn("CrowdSec takes no responsibility for the security of your metabase instance. You used force yes, so you accept this disclaimer")
return nil
}
func checkGroups(forceYes *bool) (*user.Group, error) {
groupExist := false
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil {
groupExist = true
}
if !groupExist {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: fmt.Sprintf("For metabase docker to be able to access SQLite file we need to add a new group called '%s' to the system, is it ok for you ?", crowdsecGroup),
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return dockerGroup, fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return dockerGroup, fmt.Errorf("unable to continue without creating '%s' group", crowdsecGroup)
}
}
groupAddCmd, err := exec.LookPath("groupadd")
if err != nil {
return dockerGroup, fmt.Errorf("unable to find 'groupadd' command, can't continue")
} |
groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}}
if err := groupAdd.Run(); err != nil {
return dockerGroup, fmt.Errorf("unable to add group '%s': %s", dockerGroup, err) | random_line_split |
|
dashboard.go | metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile)
if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil {
return err
}
if err := require.DB(csConfig); err != nil {
return err
}
/*
Old container name was "/crowdsec-metabase" but podman doesn't
allow '/' in container name. We do this check to not break
existing dashboard setup.
*/
if !metabase.IsContainerExist(metabaseContainerID) {
oldContainerID := fmt.Sprintf("/%s", metabaseContainerID)
if metabase.IsContainerExist(oldContainerID) {
metabaseContainerID = oldContainerID
}
}
return nil
},
}
cmdDashboard.AddCommand(NewDashboardSetupCmd())
cmdDashboard.AddCommand(NewDashboardStartCmd())
cmdDashboard.AddCommand(NewDashboardStopCmd())
cmdDashboard.AddCommand(NewDashboardShowPasswordCmd())
cmdDashboard.AddCommand(NewDashboardRemoveCmd())
return cmdDashboard
}
func NewDashboardSetupCmd() *cobra.Command {
var force bool
var cmdDashSetup = &cobra.Command{
Use: "setup",
Short: "Setup a metabase container.",
Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard setup --listen 0.0.0.0
cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
`,
RunE: func(cmd *cobra.Command, args []string) error {
if metabaseDbPath == "" {
metabaseDbPath = csConfig.ConfigPaths.DataDir
}
if metabasePassword == "" {
isValid := passwordIsValid(metabasePassword)
for !isValid {
metabasePassword = generatePassword(16)
isValid = passwordIsValid(metabasePassword)
}
}
if err := checkSystemMemory(&forceYes); err != nil {
return err
}
warnIfNotLoopback(metabaseListenAddress)
if err := disclaimer(&forceYes); err != nil {
return err
}
dockerGroup, err := checkGroups(&forceYes)
if err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
if err := mb.DumpConfig(metabaseConfigPath); err != nil {
return err
}
log.Infof("Metabase is ready")
fmt.Println()
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmdDashSetup.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "[email protected]", "metabase user")
cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
return cmdDashSetup
}
func NewDashboardStartCmd() *cobra.Command | log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
}
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func NewDashboardRemoveCmd() *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped | {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase") | identifier_body |
main.go | pprofile/v1alpha1"
secprofnodestatusv1alpha1 "sigs.k8s.io/security-profiles-operator/api/secprofnodestatus/v1alpha1"
selinuxprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/selinuxprofile/v1alpha1"
"sigs.k8s.io/security-profiles-operator/internal/pkg/config"
"sigs.k8s.io/security-profiles-operator/internal/pkg/controller"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp() | "to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources
if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
}
func getEnabledControllers(ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if err | app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " + | random_line_split |
main.go | pprofile/v1alpha1"
secprofnodestatusv1alpha1 "sigs.k8s.io/security-profiles-operator/api/secprofnodestatus/v1alpha1"
selinuxprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/selinuxprofile/v1alpha1"
"sigs.k8s.io/security-profiles-operator/internal/pkg/config"
"sigs.k8s.io/security-profiles-operator/internal/pkg/controller"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp()
app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " +
"to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources
if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
}
func | (ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if | getEnabledControllers | identifier_name |
main.go | pprofile/v1alpha1"
secprofnodestatusv1alpha1 "sigs.k8s.io/security-profiles-operator/api/secprofnodestatus/v1alpha1"
selinuxprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/selinuxprofile/v1alpha1"
"sigs.k8s.io/security-profiles-operator/internal/pkg/config"
"sigs.k8s.io/security-profiles-operator/internal/pkg/controller"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp()
app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " +
"to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) | if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
}
func getEnabledControllers(ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if err | {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources | identifier_body |
main.go | .io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp()
app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " +
"to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources
if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
}
func getEnabledControllers(ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if err := secprofnodestatusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add per-node Status API to scheme")
}
if err := setupEnabledControllers(ctx.Context, enabledControllers, mgr, met); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting daemon")
if err := mgr.Start(sigHandler); err != nil | {
return errors.Wrap(err, "SPOd error")
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.