code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from aqopa.model.parser.lex_yacc import LexYaccParserExtension
from aqopa.model import Function
class Builder():
"""
Builder for creating function objects
"""
def build_function(self, token):
"""
FUN IDENTIFIER function_params SEMICOLON
FUN IDENTIFIER function_params function_comment SEMICOLON
FUN IDENTIFIER function_params function_qopml_params SEMICOLON
FUN IDENTIFIER function_params function_qopml_params function_comment SEMICOLON
"""
f = Function(token[2], params=token[3])
if len(token) == 6:
if isinstance(token[4], list):
f.qop_params = token[4]
elif isinstance(token[4], str) or isinstance(token[4], unicode):
f.comment = token[4]
if len(token) == 7:
f.qop_params = token[4]
f.comment = token[5]
return f
class ModelParserExtension(LexYaccParserExtension):
"""
Extension for parsing functions
"""
def __init__(self):
LexYaccParserExtension.__init__(self)
self.open_blocks_cnt = 0
self.fun_left_brackets_cnt = 0
self.builder = Builder()
##########################################
# RESERVED WORDS
##########################################
def word_functions_specification(self, t):
t.lexer.push_state('functions')
return t
def word_fun(self, t):
self.fun_left_brackets_cnt = 0
return t
##########################################
# TOKENS
##########################################
def token_block_open(self, t):
r'{'
self.open_blocks_cnt += 1
return t
def token_block_close(self, t):
r'}'
self.open_blocks_cnt -= 1
if self.open_blocks_cnt == 0:
t.lexer.pop_state()
return t
def token_lparan(self, t):
r'\('
self.fun_left_brackets_cnt += 1
if self.fun_left_brackets_cnt == 2:
t.lexer.push_state('functioncomment')
return t
# Function Comment State
def token_funcomment_error(self, t):
self.parser.t_error(t)
def token_funcomment_rparan(self, t):
r'\)'
t.lexer.pop_state()
return t
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
##########################################
# RULES
##########################################
def functions_specification(self, t):
"""
specification : FUNCTIONS_SPECIFICATION BLOCKOPEN functions_list BLOCKCLOSE
"""
pass
def functions_list(self, t):
"""
functions_list : function
| functions_list function
"""
pass
def function(self, t):
"""
function : FUN IDENTIFIER function_params SEMICOLON
| FUN IDENTIFIER function_params function_comment SEMICOLON
| FUN IDENTIFIER function_params function_qopml_params SEMICOLON
| FUN IDENTIFIER function_params function_qopml_params function_comment SEMICOLON
"""
self.parser.store.functions.append(self.builder.build_function(t))
def function_comment(self, t):
"""
function_comment : LPARAN COMMENT RPARAN
"""
t[0] = t[2]
def function_params(self, t):
"""
function_params : LPARAN RPARAN
| LPARAN identifiers_list RPARAN
"""
if len(t) > 3:
t[0] = t[2]
else:
t[0] = []
def function_qopml_params(self, t):
"""
function_qopml_params : SQLPARAN function_qopml_params_list SQRPARAN
"""
t[0] = t[2]
def function_qopml_params_list(self, t):
"""
function_qopml_params_list : function_qop_param
| function_qopml_params_list SEMICOLON function_qop_param
"""
if len(t) > 2:
t[0] = t[1]
t[0].append(t[2])
else:
t[0] = [t[1]]
def function_qop_param(self, t):
"""
function_qop_param : IDENTIFIER COLON identifiers_list
"""
t[0] = (t[1], t[3])
def _extend(self):
self.parser.add_state('functions', 'inclusive')
self.parser.add_state('functioncomment', 'exclusive')
self.parser.add_reserved_word('functions', 'FUNCTIONS_SPECIFICATION', func=self.word_functions_specification,)
self.parser.add_reserved_word('fun', 'FUN', func=self.word_fun, state='functions')
self.parser.add_token('BLOCKOPEN', func=self.token_block_open, states=['functions'])
self.parser.add_token('BLOCKCLOSE', func=self.token_block_close, states=['functions'])
self.parser.add_token('LPARAN', func=self.token_lparan, states=['functions'])
self.parser.add_token('RPARAN', regex=r'\)', states=['functions'])
# Function comment State
self.parser.add_token('error', func=self.token_funcomment_error, states=['functioncomment'], include_in_tokens=False)
self.parser.add_token('ignore', "\t", states=['functioncomment'], include_in_tokens=False)
self.parser.add_token('newline', func=self.t_newline, states=['functioncomment'], include_in_tokens=False)
self.parser.add_token('COMMENT', r'[-_A-Za-z0-9 ]+', states=['functioncomment'])
self.parser.add_token('RPARAN', func=self.token_funcomment_rparan, states=['functioncomment'])
self.parser.add_rule(self.functions_specification)
self.parser.add_rule(self.functions_list)
self.parser.add_rule(self.function)
self.parser.add_rule(self.function_comment)
self.parser.add_rule(self.function_params)
self.parser.add_rule(self.function_qopml_params)
self.parser.add_rule(self.function_qopml_params_list)
self.parser.add_rule(self.function_qop_param) | AQoPA | /AQoPA-0.9.5.tar.gz/AQoPA-0.9.5/aqopa/model/parser/lex_yacc/grammar/functions.py | functions.py |
from aqopa.model.parser.lex_yacc import LexYaccParserExtension
from aqopa.model import Channel, TopologyRuleHost, TopologyRule,\
AlgWhile, AlgCallFunction, AlgIf, AlgReturn, AlgAssignment
class Builder():
"""
Builder for store objects
"""
class ModelParserExtension(LexYaccParserExtension):
"""
Extension for parsing functions
"""
def __init__(self):
LexYaccParserExtension.__init__(self)
self.builder = Builder()
self.open_blocks_cnt = 0
##########################################
# RESERVED WORDS
##########################################
def word_communication_specification(self, t):
t.lexer.push_state('communication')
return t
##########################################
# TOKENS
##########################################
def token_block_open(self, t):
r'{'
self.open_blocks_cnt += 1
return t
def token_block_close(self, t):
r'}'
self.open_blocks_cnt -= 1
if self.open_blocks_cnt == 0:
t.lexer.pop_state()
return t
##########################################
# RULES
##########################################
def communication_specification(self, t):
"""
specification : COMMUNICATION_SPECIFICATION BLOCKOPEN comm_specifications BLOCKCLOSE
"""
pass
def comm_specifications(self, t):
"""
comm_specifications : comm_specification
| comm_specifications comm_specification
"""
pass
# Medium
def medium_specification(self, t):
"""
comm_specification : MEDIUM_SPECIFICATION SQLPARAN IDENTIFIER SQRPARAN BLOCKOPEN medium_elements BLOCKCLOSE
"""
self.parser.store.mediums[t[3]] = t[6]
def medium_elements(self, t):
"""
medium_elements : medium_default_parameters medium_topology
"""
t[0] = {
'topology': t[2],
'default_parameters': t[1],
}
def medium_default_parameters(self, t):
"""
medium_default_parameters : medium_default_parameter SEMICOLON
| medium_default_parameters medium_default_parameter SEMICOLON
"""
t[0] = t[1]
if len(t) == 4:
t[0].update(t[2])
def quality_default_parameter(self, t):
"""
medium_default_parameter : QUALITY_DEFAULT_PARAMETER EQUAL number
"""
t[0] = {'default_q': t[3]}
# Topology
def medium_topology(self, t):
"""
medium_topology : TOPOLOGY_SPECIFICATION BLOCKOPEN topology_rules_list BLOCKCLOSE
"""
t[0] = {'rules': t[3]}
def topology_rules_list(self, t):
"""
topology_rules_list : topology_rule
| topology_rules_list topology_rule
"""
if len(t) == 3:
t[0] = t[1]
t[0].append(t[2])
else:
t[0] = []
t[0].append(t[1])
def topology_rule_point_to_point(self, t):
"""
topology_rule : IDENTIFIER topology_arrow IDENTIFIER SEMICOLON
| IDENTIFIER topology_arrow IDENTIFIER COLON topology_rule_parameters SEMICOLON
"""
parameters = {}
if len(t) == 7:
parameters = t[5]
l = TopologyRuleHost(t[1])
r = TopologyRuleHost(t[3])
t[0] = TopologyRule(l, t[2], r, parameters=parameters)
def topology_rule_broadcast(self, t):
"""
topology_rule : IDENTIFIER ARROWRIGHT STAR COLON topology_rule_parameters SEMICOLON
"""
parameters = {}
if len(t) == 7:
parameters = t[5]
l = TopologyRuleHost(t[1])
r = TopologyRuleHost(t[3])
t[0] = TopologyRule(l, t[2], r, parameters=parameters)
def topology_rule_parameters(self, t):
"""
topology_rule_parameters : topology_rule_parameter
| topology_rule_parameters COMMA topology_rule_parameter
"""
t[0] = t[1]
if len(t) == 4:
t[0].update(t[3])
def topology_rule_quality_parameter(self, t):
"""
topology_rule_parameter : Q_PARAMETER EQUAL number
"""
t[0] = {'q': t[3]}
def topology_arrow(self, t):
"""
topology_arrow : ARROWRIGHT
| ARROWLEFT
| ARROWBOTH
"""
t[0] = t[1]
def _extend(self):
self.parser.add_state('communication', 'inclusive')
self.parser.add_reserved_word('communication', 'COMMUNICATION_SPECIFICATION',
func=self.word_communication_specification)
self.parser.add_reserved_word('medium', 'MEDIUM_SPECIFICATION', state='communication',)
self.parser.add_reserved_word('default_q', 'QUALITY_DEFAULT_PARAMETER', state='communication',)
self.parser.add_reserved_word('q', 'Q_PARAMETER', state='communication',)
self.parser.add_reserved_word('topology', 'TOPOLOGY_SPECIFICATION', state='communication',)
self.parser.add_token('BLOCKOPEN', func=self.token_block_open, states=['communication'])
self.parser.add_token('BLOCKCLOSE', func=self.token_block_close, states=['communication'])
self.parser.add_token('ARROWRIGHT', r'\-\>', states=['communication'])
self.parser.add_token('ARROWLEFT', r'\<\-', states=['communication'])
self.parser.add_token('ARROWBOTH', r'\<\-\>', states=['communication'])
self.parser.add_rule(self.communication_specification)
self.parser.add_rule(self.comm_specifications)
self.parser.add_rule(self.medium_specification)
self.parser.add_rule(self.medium_elements)
self.parser.add_rule(self.medium_default_parameters)
self.parser.add_rule(self.quality_default_parameter)
self.parser.add_rule(self.medium_topology)
self.parser.add_rule(self.topology_rules_list)
self.parser.add_rule(self.topology_rule_point_to_point)
self.parser.add_rule(self.topology_rule_broadcast)
self.parser.add_rule(self.topology_rule_parameters)
self.parser.add_rule(self.topology_rule_quality_parameter)
self.parser.add_rule(self.topology_arrow)
class ConfigParserExtension(LexYaccParserExtension):
"""
Extension for parsing functions
"""
def __init__(self):
LexYaccParserExtension.__init__(self)
self.builder = Builder()
self.open_blocks_cnt = 0
##########################################
# RESERVED WORDS
##########################################
def word_communication_specification(self, t):
t.lexer.push_state('versioncommunication')
return t
##########################################
# TOKENS
##########################################
def token_block_open(self, t):
r'{'
self.open_blocks_cnt += 1
return t
def token_block_close(self, t):
r'}'
self.open_blocks_cnt -= 1
if self.open_blocks_cnt == 0:
t.lexer.pop_state()
return t
##########################################
# RULES
##########################################
def version_communication(self, t):
"""
version_communication : COMMUNICATION_SPECIFICATION BLOCKOPEN version_comm_specifications BLOCKCLOSE
"""
t[0] = {
'mediums': t[3]
}
def version_comm_specifications(self, t):
"""
version_comm_specifications : version_comm_specification
| version_comm_specifications version_comm_specification
"""
t[0] = t[1]
if len(t) == 3:
t[0].update(t[2])
# Medium
def version_medium_specification(self, t):
"""
version_comm_specification : MEDIUM_SPECIFICATION SQLPARAN IDENTIFIER SQRPARAN BLOCKOPEN version_medium_elements BLOCKCLOSE
"""
t[0] = {
t[3]: t[6]
}
def version_medium_elements(self, t):
"""
version_medium_elements : version_medium_default_parameters version_medium_topology
"""
t[0] = {
'topology': t[2],
'default_parameters': t[1]
}
def version_medium_default_parameters(self, t):
"""
version_medium_default_parameters : version_medium_default_parameter SEMICOLON
| version_medium_default_parameters version_medium_default_parameter SEMICOLON
"""
t[0] = t[1]
if len(t) == 4:
t[0].update(t[2])
def version_quality_default_parameter(self, t):
"""
version_medium_default_parameter : QUALITY_DEFAULT_PARAMETER EQUAL number
"""
t[0] = {'default_q': t[3]}
def version_medium_topology(self, t):
"""
version_medium_topology : TOPOLOGY_SPECIFICATION BLOCKOPEN version_topology_rules_list BLOCKCLOSE
"""
t[0] = {'rules': t[3]}
def version_topology_rules_list(self, t):
"""
version_topology_rules_list : version_topology_rule
| version_topology_rules_list version_topology_rule
"""
if len(t) == 3:
t[0] = t[1]
t[0].append(t[2])
else:
t[0] = []
t[0].append(t[1])
def version_topology_rule_point_to_point(self, t):
"""
version_topology_rule : version_topology_rule_left_hosts version_topology_arrow version_topology_rule_right_hosts SEMICOLON
| version_topology_rule_left_hosts version_topology_arrow version_topology_rule_right_hosts COLON version_topology_rule_parameters SEMICOLON
"""
parameters = {}
if len(t) == 7:
parameters = t[5]
t[0] = TopologyRule(t[1], t[2], t[3], parameters=parameters)
def version_topology_rule_boradcast(self, t):
"""
version_topology_rule : version_topology_rule_left_hosts ARROWRIGHT STAR COLON version_topology_rule_parameters SEMICOLON
"""
parameters = {}
if len(t) == 7:
parameters = t[5]
t[0] = TopologyRule(t[1], t[2], None, parameters=parameters)
def version_topology_rule_left_hosts(self, t):
"""
version_topology_rule_left_hosts : IDENTIFIER
| version_topology_host_with_indicies
"""
if isinstance(t[1], basestring):
t[0] = TopologyRuleHost(t[1])
else:
t[0] = t[1]
def version_topology_rule_right_hosts(self, t):
"""
version_topology_rule_right_hosts : IDENTIFIER
| STAR
| version_topology_host_with_indicies
| version_topology_host_with_i_index
"""
if isinstance(t[1], basestring):
if t[1] == u"*":
t[0] = None
else:
t[0] = TopologyRuleHost(t[1])
else:
t[0] = t[1]
def version_topology_host_with_indicies(self, t):
"""
version_topology_host_with_indicies : IDENTIFIER SQLPARAN INTEGER SQRPARAN
| IDENTIFIER SQLPARAN INTEGER COLON SQRPARAN
| IDENTIFIER SQLPARAN COLON INTEGER SQRPARAN
| IDENTIFIER SQLPARAN INTEGER COLON INTEGER SQRPARAN
"""
index_range = None
if len(t) == 5:
index_range = (t[3], t[3])
elif len(t) == 6:
if t[3] == ':':
index_range = (None, t[4])
else:
index_range = (t[3], None)
elif len(t) == 7:
index_range = (t[3], t[5])
t[0] = TopologyRuleHost(t[1], index_range=index_range)
def version_topology_host_with_i_index(self, t):
"""
version_topology_host_with_i_index : IDENTIFIER SQLPARAN I_INDEX SQRPARAN
| IDENTIFIER SQLPARAN I_INDEX COMM_PLUS INTEGER SQRPARAN
| IDENTIFIER SQLPARAN I_INDEX COMM_MINUS INTEGER SQRPARAN
"""
i_shift = None
if len(t) == 5:
i_shift = 0
elif len(t) == 7:
if t[4] == '-':
i_shift = - t[5]
else:
i_shift = t[5]
t[0] = TopologyRuleHost(t[1], i_shift=i_shift)
def version_topology_rule_parameters(self, t):
"""
version_topology_rule_parameters : version_topology_rule_parameter
| version_topology_rule_parameters COMMA version_topology_rule_parameter
"""
t[0] = t[1]
if len(t) == 4:
t[0].update(t[3])
def version_topology_rule_quality_parameter(self, t):
"""
version_topology_rule_parameter : Q_PARAMETER EQUAL number
"""
t[0] = {'q': t[3]}
def version_topology_arrow(self, t):
"""
version_topology_arrow : ARROWRIGHT
| ARROWLEFT
| ARROWBOTH
"""
t[0] = t[1]
def _extend(self):
self.parser.add_state('versioncommunication', 'inclusive')
self.parser.add_reserved_word('communication', 'COMMUNICATION_SPECIFICATION',
func=self.word_communication_specification)
self.parser.add_reserved_word('medium', 'MEDIUM_SPECIFICATION', state='versioncommunication',)
self.parser.add_reserved_word('default_q', 'QUALITY_DEFAULT_PARAMETER', state='versioncommunication',)
self.parser.add_reserved_word('topology', 'TOPOLOGY_SPECIFICATION', state='versioncommunication',)
self.parser.add_reserved_word('i', 'I_INDEX', state='versioncommunication')
self.parser.add_reserved_word('q', 'Q_PARAMETER', state='versioncommunication',)
self.parser.add_token('BLOCKOPEN', func=self.token_block_open, states=['versioncommunication'])
self.parser.add_token('BLOCKCLOSE', func=self.token_block_close, states=['versioncommunication'])
self.parser.add_token('ARROWRIGHT', r'\-\>', states=['versioncommunication'])
self.parser.add_token('ARROWLEFT', r'\<\-', states=['versioncommunication'])
self.parser.add_token('ARROWBOTH', r'\<\-\>', states=['versioncommunication'])
self.parser.add_token('COMM_PLUS', r'\+', states=['versioncommunication'])
self.parser.add_token('COMM_MINUS', r'\-', states=['versioncommunication'])
self.parser.add_rule(self.version_communication)
self.parser.add_rule(self.version_comm_specifications)
self.parser.add_rule(self.version_medium_specification)
self.parser.add_rule(self.version_medium_elements)
self.parser.add_rule(self.version_medium_default_parameters)
self.parser.add_rule(self.version_quality_default_parameter)
self.parser.add_rule(self.version_medium_topology)
self.parser.add_rule(self.version_topology_rules_list)
self.parser.add_rule(self.version_topology_rule_point_to_point)
self.parser.add_rule(self.version_topology_rule_boradcast)
self.parser.add_rule(self.version_topology_rule_left_hosts)
self.parser.add_rule(self.version_topology_rule_right_hosts)
self.parser.add_rule(self.version_topology_rule_parameters)
self.parser.add_rule(self.version_topology_rule_quality_parameter)
self.parser.add_rule(self.version_topology_host_with_indicies)
self.parser.add_rule(self.version_topology_host_with_i_index)
self.parser.add_rule(self.version_topology_arrow) | AQoPA | /AQoPA-0.9.5.tar.gz/AQoPA-0.9.5/aqopa/model/parser/lex_yacc/grammar/communication.py | communication.py |
from functools import wraps
from aquery.database import Database
from aquery.util.logger import logger
class IQuery(Database):
# 参数处理器
PARAM_HANDLER = None
# 执行之前对sql 进行处理
SQL_HANDLER = None
# 执行之前对data 进行处理
DATA_HANDLER = None
# 执行之后对 cursor 处理
CURSOR_HANDLER = None
# 忽略的数据库异常
IGNORE_ERRORS = ()
@classmethod
def query(cls, sql, data=None, **outer_kwargs):
sql = cls._prepare_sql(sql, data, **outer_kwargs)
data = cls._prepare_data(data, **outer_kwargs)
logger.debug("[query] %s %s", sql, data)
ignore_errors = cls.IGNORE_ERRORS + outer_kwargs.get('ignore_errors', ())
cursor = cls.execute(sql, data, ignore_errors=ignore_errors)
return cls._prepare_cursor(cursor, **outer_kwargs)
@classmethod
def query_wrapper(cls, sql, **outer_kwargs):
def outer_wrapper(func):
@wraps(func)
def inner_wrapper(*inner_args, **inner_kwargs):
data = func(*inner_args, **inner_kwargs)
data = cls._prepare_param(data, outer_kwargs, *inner_args, **inner_kwargs)
return cls.query(sql, data, **outer_kwargs)
return inner_wrapper
return outer_wrapper
@classmethod
def _prepare_param(cls, data, outer_kwargs, *inner_args, **inner_kwargs):
param_handler = outer_kwargs.get("param_handler", cls.PARAM_HANDLER)
if param_handler:
data = param_handler.handle(data, outer_kwargs, *inner_args, **inner_kwargs)
return data
@classmethod
def _prepare_sql(cls, sql, data=None, **outer_kwargs):
sql_handler = outer_kwargs.get("sql_handler", cls.SQL_HANDLER)
if sql_handler:
sql = sql_handler.handle(sql, data, **outer_kwargs)
return sql
@classmethod
def _prepare_data(cls, data, **outer_kwargs):
data_handler = outer_kwargs.get("data_handler", cls.DATA_HANDLER)
if data_handler:
data = data_handler.handle(data, **outer_kwargs)
return data
@classmethod
def _prepare_cursor(cls, cursor, **outer_kwargs):
cursor_handler = outer_kwargs.get("cursor_handler", cls.CURSOR_HANDLER)
if cursor_handler:
return cursor_handler(cursor)
else:
return cursor | AQuery | /AQuery-0.0.3-py3-none-any.whl/aquery/Iquery.py | Iquery.py |
import mysql.connector
from aquery.util.errors import TypeErrorException
from aquery.util.logger import logger
class Database(object):
"""
配置文档
https://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html
"""
# database配置
DATABASE_CONFIG = {
"database": "database",
"user": "root",
"password": "123456",
"host": "127.0.0.1",
"port": 3306,
"autocommit": True,
}
# cursor配置
CURSOR_CONFIG = {
"dictionary": True
}
# 驱动连接器
DATABASE_CONNECTOR = mysql.connector.Connect
connect = None
cursor = None
def __enter__(self):
self.open()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@classmethod
def open(cls):
logger.debug("open database")
cls.connect = cls.DATABASE_CONNECTOR(**cls.DATABASE_CONFIG)
cls.cursor = cls.connect.cursor(**cls.CURSOR_CONFIG)
@classmethod
def close(cls):
logger.debug("close database")
cls.cursor.close()
cls.connect.close()
@classmethod
def execute(cls, sql, data=None, ignore_errors=()):
"""
:param sql: str
:param data: dict/list[dict]
:param ignore_errors: tuple
:return: cursor
"""
is_many = False
# 类型校验
if isinstance(data, list):
is_many = True
for item in data:
if not isinstance(item, dict):
raise TypeErrorException()
elif isinstance(data, dict):
pass
elif data is None:
pass
else:
raise TypeErrorException()
if cls.connect is None:
cls.open()
try:
if is_many:
cls.cursor.executemany(sql, data)
else:
cls.cursor.execute(sql, data)
except ignore_errors as e:
logger.error(e)
return cls.cursor | AQuery | /AQuery-0.0.3-py3-none-any.whl/aquery/database.py | database.py |
from aquery.Iquery import IQuery
from aquery.handler.cursor_handler import CursorHandler
from aquery.handler.param_handler import ParamHandler
from aquery.handler.sql_handler import SqlDataHandler
class Query(IQuery):
# 参数处理器
PARAM_HANDLER = ParamHandler
# 执行之前对sql 进行处理
SQL_HANDLER = SqlDataHandler
# 执行之前对data 进行处理
DATA_HANDLER = None
# 执行之后对 cursor 处理
CURSOR_HANDLER = None
# ======================================
# 普通查询
# ======================================
@classmethod
def query_insert(cls, sql, data=None, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.insert_cursor_handle)
return cls.query(sql, data, **kwargs)
@classmethod
def query_insert_many(cls, sql, data=None, **kwargs):
kwargs['is_many'] = True
kwargs.setdefault('cursor_handler', CursorHandler.insert_many_cursor_handle)
return cls.query(sql, data, **kwargs)
@classmethod
def query_update(cls, sql, data=None, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.update_cursor_handle)
return cls.query(sql, data, **kwargs)
@classmethod
def query_delete(cls, sql, data=None, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.delete_cursor_handle)
return cls.query(sql, data, **kwargs)
@classmethod
def query_select(cls, sql, data=None, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.select_cursor_handle)
return cls.query(sql, data, **kwargs)
@classmethod
def query_select_one(cls, sql, data=None, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.select_one_cursor_handle)
return cls.query(sql, data, **kwargs)
# ======================================
# 装饰器方式查询
# ======================================
@classmethod
def select_one(cls, sql, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.select_one_cursor_handle)
return cls.query_wrapper(sql, **kwargs)
@classmethod
def select(cls, sql, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.select_cursor_handle)
return cls.query_wrapper(sql, **kwargs)
@classmethod
def insert(cls, sql, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.insert_cursor_handle)
return cls.query_wrapper(sql, **kwargs)
@classmethod
def insert_many(cls, sql, **kwargs):
kwargs['is_many'] = True
kwargs.setdefault('cursor_handler', CursorHandler.insert_many_cursor_handle)
return cls.query_wrapper(sql, **kwargs)
@classmethod
def update(cls, sql, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.update_cursor_handle)
return cls.query_wrapper(sql, **kwargs)
@classmethod
def delete(cls, sql, **kwargs):
kwargs.setdefault('cursor_handler', CursorHandler.delete_cursor_handle)
return cls.query_wrapper(sql, **kwargs) | AQuery | /AQuery-0.0.3-py3-none-any.whl/aquery/query.py | query.py |
from aquery.handler.abs_handler import SqlHandlerAbstract
from aquery.util.errors import RawDataNotFindException, TypeErrorException, AutoDataNotFindException
from aquery.util.logger import logger
from aquery.util.sql_util import SqlUtil
class SqlDataHandler(SqlHandlerAbstract):
# 默认的变量获取器
AUTO_DATA_GETTER = {
"fields": SqlUtil.get_data_field_str,
"values": SqlUtil.get_data_value_str,
"data": SqlUtil.get_auto_data_field_value_str
}
@classmethod
def handle(cls, sql, data, **kwargs):
sql = cls.raw_data_handle(sql, data, **kwargs)
sql = cls.list_data_handler(sql, data, **kwargs)
sql = cls.auto_data_handler(sql, data, **kwargs)
sql = cls.sql_compile_handler(sql, data, **kwargs)
return sql
@classmethod
def raw_data_handle(cls, sql, data, **kwargs):
"""
原样数据
${key}
优先级 data > class_object
"""
raw_keys = SqlUtil.get_raw_keys(sql)
# 如果有需要替换的原样数据
if not raw_keys:
return sql
logger.debug("[raw_keys] %s", raw_keys)
raw_data = {}
# 从class_object 中获取数据
class_object = kwargs.get("class_object", None)
if class_object:
for key in raw_keys:
if hasattr(class_object, key):
raw_data[key] = getattr(class_object, key)
# 从data中获取数据
if isinstance(data, dict):
for key in raw_keys:
if key in data:
raw_data[key] = data.pop(key)
# 数据检查
for key in raw_keys:
if key not in raw_data.keys():
raise RawDataNotFindException(key)
# 原样替换
return SqlUtil.raw_data_replace(sql, raw_data)
@classmethod
def auto_data_handler(cls, sql, data, **kwargs):
"""
@{key}
:return:
"""
auto_keys = SqlUtil.get_auto_keys(sql)
# 如果有需要替换的原样数据
if not auto_keys:
return sql
logger.debug("[auto_keys] %s", auto_keys)
auto_data = {}
for key in auto_keys:
value = cls.AUTO_DATA_GETTER.get(key)
if callable(value):
value = value(data, sql)
if value:
auto_data[key] = value
else:
raise AutoDataNotFindException(key)
return SqlUtil.auto_data_replace(sql, auto_data)
@classmethod
def list_data_handler(cls, sql, data, **kwargs):
"""
({list}) -> (#{list-1}, #{list-2}...)
"""
list_keys = SqlUtil.get_list_key(sql)
if not list_keys:
return sql
logger.debug("[list_keys] %s", list_keys)
for key in list_keys:
value = data.pop(key)
if isinstance(value, list):
in_data = SqlUtil.get_list_data(key, value)
# 确认没有交集才进行添加,否则会污染数据
if not data.keys().isdisjoint(in_data.keys()):
raise Exception("有重复数据,不能进行添加")
data.update(in_data)
value = SqlUtil.get_value_str(in_data.keys())
sql = sql.replace(f"({{{key}}})", value)
else:
raise TypeErrorException()
return sql
@classmethod
def sql_compile_handler(cls, sql, data, **kwargs):
"""
#{key}
"""
return SqlUtil.compile_sql(sql) | AQuery | /AQuery-0.0.3-py3-none-any.whl/aquery/handler/sql_handler.py | sql_handler.py |
import re
from aquery.util.errors import DataEmptyException
class SqlUtil(object):
@staticmethod
def get_field_str(keys):
"""
[name, age] -> (`name`, `age`)
"""
return "(" + ", ".join([f'`{key}`' for key in keys]) + ")"
@staticmethod
def get_value_str(keys):
"""
[name, age] -> (#{name}, #{age})
"""
return "(" + ", ".join([f'#{{{key}}}' for key in keys]) + ")"
@staticmethod
def get_field_value_str(keys):
"""
['name', 'age'] -> `name` = #{name}, `age` = #{age}
"""
return ", ".join([f"`{key}` = #{{{key}}}" for key in keys])
@staticmethod
def get_list_data(key, values):
keys = SqlUtil.get_key_list(key, len(values))
return dict(zip(keys, values))
@classmethod
def get_data_value_str(cls, data, sql):
if isinstance(data, list):
return cls.get_value_str(data[0])
else:
return cls.get_value_str(data)
@classmethod
def get_data_field_str(cls, data, sql):
if isinstance(data, list):
return cls.get_field_str(data[0])
else:
return cls.get_field_str(data)
@classmethod
def get_auto_data_field_value_str(cls, data, sql):
variables = cls.get_variable_keys(sql)
auto_data = {}
for key, value in data.items():
if key not in variables:
auto_data[key] = data[key]
if not auto_data:
raise DataEmptyException()
return cls.get_field_value_str(auto_data)
@classmethod
def get_data_field_value_str(cls, data, sql):
if isinstance(data, list):
return cls.get_field_value_str(data[0])
else:
return cls.get_field_value_str(data)
@classmethod
def get_key_list(cls, key, count):
"""
'name', 5 -> ['name-0', 'name-1', 'name-2', 'name-3', 'name-4']
"""
return [f"{key}-{index}" for index in range(count)]
@staticmethod
def compile_sql(sql):
"""
sql = "INSERT INTO ${table} #{fields} VALUES #{values}"
INSERT INTO ${table} %(fields)s VALUES %(values)s
http://www.mamicode.com/info-detail-2327645.html
将 #{values} 转换为 %(values)s
"""
return re.sub(r"#\{(?P<key>.*?)\}", r"%(\g<key>)s", sql)
@staticmethod
def raw_data_replace(sql, data):
"""
${key} 进行原样替换
"""
for key, value in data.items():
sql = sql.replace(f"${{{key}}}", data[key])
return sql
@staticmethod
def auto_data_replace(sql, data):
"""
@{key} 进行原样替换
"""
for key, value in data.items():
sql = sql.replace(f"@{{{key}}}", data[key])
return sql
@staticmethod
def get_variable_keys(sql):
return re.findall(r"#\{(.*?)\}", sql)
@staticmethod
def get_raw_keys(sql):
return re.findall(r"\$\{(.*?)\}", sql)
@staticmethod
def get_auto_keys(sql):
return re.findall(r"@\{(.*?)\}", sql)
@classmethod
def get_list_key(cls, sql):
return re.findall(r"\(\{(.*?)\}\)", sql)
if __name__ == '__main__':
print(SqlUtil.get_field_str(['name', 'age']))
print(SqlUtil.get_value_str((['name', 'age'])))
print(SqlUtil.get_field_value_str((['name', 'age'])))
print(SqlUtil.get_key_list('name', 10))
print(SqlUtil.get_list_key('select * from ${table} where id in ({uid_list}) limit 1')) | AQuery | /AQuery-0.0.3-py3-none-any.whl/aquery/util/sql_util.py | sql_util.py |
import pandas as pd
import re
import string
import argparse
from camel_tools.utils.normalize import normalize_alef_maksura_ar
from camel_tools.utils.normalize import normalize_alef_ar
from camel_tools.utils.normalize import normalize_teh_marbuta_ar
from camel_tools.utils.dediac import dediac_ar
from camel_tools.utils.normalize import normalize_unicode
from stop_words import get_stop_words
from sklearn.feature_extraction.text import TfidfVectorizer
import random
from sklearn.decomposition import NMF
from sklearn.decomposition import LatentDirichletAllocation
class Topic_modling:
def __init__(self,path,column):
self.stop_words= get_stop_words('arabic')+ get_stop_words('en')
self.reviews_datasets=reviews_datasets = pd.read_csv(path)
self.column=column
def DataCleaning(self) :
arabic_punctuations = '''`÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ'''
english_punctuations = string.punctuation
punctuations_list = arabic_punctuations + english_punctuations
arabic_diacritics = re.compile("""
ّ | # Tashdid
َ | # Fatha
ً | # Tanwin Fath
ُ | # Damma
ٌ | # Tanwin Damm
ِ | # Kasra
ٍ | # Tanwin Kasr
ْ | # Sukun
ـ # Tatwil/Kashida
""", re.VERBOSE)
def normalize_arabic(text):
text = re.sub("[إأآا]", "ا", text)
text = re.sub("ى", "ي", text)
text = re.sub("ؤ", "ء", text)
text = re.sub("ئ", "ء", text)
text = re.sub("ة", "ه", text)
text = re.sub("گ", "ك", text)
return text
def remove_diacritics(text):
text = re.sub(arabic_diacritics, '', text)
return text
def remove_punctuations(text):
translator = str.maketrans('', '', punctuations_list)
return text.translate(translator)
def remove_repeating_char(text):
return re.sub(r'(.)\1+', r'\1', text)
def normalize_text(text):
text = normalize_unicode(text)
text = dediac_ar(text)
# Normalize alef variants to 'ا'
text = normalize_alef_ar(text)
# Normalize alef maksura 'ى' to yeh 'ي'
text = normalize_alef_maksura_ar(text)
# Normalize teh marbuta 'ة' to heh 'ه'
text = normalize_teh_marbuta_ar(text)
return text
parser = argparse.ArgumentParser(description='Pre-process arabic text (remove '
'diacritics, punctuations, and repeating '
'characters).')
new=[]
for i in range(len(self.reviews_datasets)):
print(i)
text=str(self.reviews_datasets[self.column][i])
text= normalize_arabic(text)
text= remove_diacritics(text)
text= remove_punctuations(text)
text= remove_repeating_char(text)
new.append(text)
self.reviews_datasets[self.column] = new
# reviews_datasets = reviews_datasets.head(30000)
self.reviews_datasets.dropna()
self.reviews_datasets.head()
self.reviews_datasets[self.column][350]
def TopicModleingNMF(self,num):
tfidf_vect = TfidfVectorizer(max_df=0.8, min_df=2, stop_words=self.stop_words)
doc_term_matrix = tfidf_vect.fit_transform(self.reviews_datasets[self.column].values.astype('U'))
nmf = NMF(n_components=(num)+1, random_state=42)
nmf.fit(doc_term_matrix )
for i in range(10):
random_id = random.randint(0,len(tfidf_vect.get_feature_names()))
print(tfidf_vect.get_feature_names()[random_id])
first_topic = nmf.components_[0]
top_topic_words = first_topic.argsort()[-10:]
for i in top_topic_words:
print(tfidf_vect.get_feature_names()[i])
for i,topic in enumerate(nmf.components_):
print(f'Top 10 words for topic #{i}:')
print([tfidf_vect.get_feature_names()[i] for i in topic.argsort()[-10:]])
print('\n')
topic_values = nmf.transform(doc_term_matrix)
self.reviews_datasets['Topic'] = topic_values.argmax(axis=1)
self.reviews_datasets.head()
def TopicModleingLDA(self,num):
count_vect = TfidfVectorizer(max_df=0.8, min_df=2,stop_words=self.stop_words)
doc_term_matrix = count_vect.fit_transform(self.reviews_datasets[self.column].values.astype('U'))
doc_term_matrix
LDA = LatentDirichletAllocation(n_components=num, random_state=42)
LDA.fit(doc_term_matrix)
for i in range(15):
random_id = random.randint(0,len(count_vect.get_feature_names()))
print(count_vect.get_feature_names()[random_id])
first_topic = LDA.components_[0]
top_topic_words = first_topic.argsort()[-15:]
for i in top_topic_words:
print(count_vect.get_feature_names()[i])
for i,topic in enumerate(LDA.components_):
print(f'Top 10 words for topic #{i}:')
print([count_vect.get_feature_names()[i] for i in topic.argsort()[-15:]])
print('\n')
topic_values = LDA.transform(doc_term_matrix)
print('Topic Value shape: ',topic_values.shape)
self.reviews_datasets['Topic'] = topic_values.argmax(axis=1)
print('Dataset head: /n/n ',self.reviews_datasets.head()) | AR-NLP-TopicModling | /ar_nlp_topicmodling-0.0.6-py3-none-any.whl/src/AR_NLP_TopicModling.py | AR_NLP_TopicModling.py |
# ARBOLpy
python implementation of ARBOL scRNAseq iterative tiered clustering
<img src="docs/ARBOLsmall.jpg?raw=true" align="right" width=500px>
Iteratively cluster single cell datasets using a scanpy anndata object as input. Identify and use optimum
cluster resolution parameters at each tier of clustering. Outputs QC and visualization plots for each clustering event.
## Install
By github:
```
pip install git+https://github.com/jo-m-lab/ARBOLpy.git
```
from PyPI
```
pip install arbolpy
```
or clone the repository and source the functions directly from the script
```
git clone https://github.com/jo-m-lab/ARBOLpy.git
import "path/to/cloned/git/repo/ARBOLpy/ARBOL")
```
there is a docker image available with ARBOL and dependencies preinstalled
https://hub.docker.com/r/kkimler/arbolpy
## Recommended Usage
ARBOL was developed and used in the paper, "A treatment-naïve cellular atlas of pediatric Crohn’s disease predicts disease severity and therapeutic response"
Currently, a tutorial is only available for the R version, where the FGID atlas figure is reproduced:
https://jo-m-lab.github.io/ARBOL/ARBOLtutorial.html
This package is meant as a starting point for the way that we approached clustering and and is meant to be edited/customized through community feedback through users such as yourself!
We have dedicated effort to choosing reasonable defaults, but there is no certainty that they are
the best defaults for your data.
The main function of ARBOLpy is ARBOL() - here is an example call.
The helper function write_ARBOL_output writes the anytree object's endclusters to a csv file.
```
tree = ARBOL.ARBOL(adata)
ARBOL.write_ARBOL_output(tree)
```
**Note** This script can take a long time to run. Running on 20K cells could
take an hour. Running on 100k+ cells could take 5 hours. This timing varies
based on the heterogeneity of your data.
**Note** The script requires approximately 1.2 GB RAM per 1k cells, meaning on a local machine with 16GB RAM, one could reasonably run 12k cells. The current RAM/time bottleneck is the silhouette analysis, which runs
## ARBOL() Parameters
* *adata* scanpy anndata object
* *normalization_method* normalization method, defaults to "Pearson", scanpy's experimental implementation of SCTransform
* *tier* starting tier, defaults to 0
* *clustN* starting cluster, defaults to 0
* *min_cluster_size* minimum number of cells to allow further clustering
* *tree* anytree object to attach arbol to. Shouldn't be changed unless building onto a pre-existing tree.
* *parent* parent node of current clustering event, defaults to None. As with tree, shouldn't be changed unless building onto a pre-existing anytree object
* *max_tiers* maximum number of tiers to allow further clustering
* *min_silhouette_res* lower bound of silhouette analysis leiden clustering resolution parameter scan
* *max_silhouette_res* upper bound
* *h5dir* where to save h5 objects for each tier and cluster, if None, does not save
* *figdir* where to save QC and viz figures for each tier and cluster, if None does not save
## Returns
* anytree object based on iterative tiered clustering
| ARBOLpy-kkimler | /ARBOLpy-kkimler-0.0.1.tar.gz/ARBOLpy-kkimler-0.0.1/README.md | README.md |
# ARBOLpy
python implementation of the R package ARBOL, scRNAseq iterative tiered clustering

Iteratively cluster single cell datasets using a scanpy anndata object as input. Identifies and uses optimum
clustering parameters at each tier of clustering. Current build includes SCtransform normalization.
Outputs QC and visualization plots for each clustering event.
## Install
By github:
```
pip install git+https://github.com/jo-m-lab/ARBOLpy.git
```
from PyPI
```
pip install arbolpy
import ARBOL
```
or clone the repository and source the functions directly from the script
```
git clone https://github.com/jo-m-lab/ARBOLpy.git
import "path/to/cloned/git/repo/ARBOLpy/ARBOL"
```
there is a docker image available with ARBOL and dependencies preinstalled
https://hub.docker.com/r/kkimler/arbolpy
## Recommended Usage
ARBOL was developed and used in the paper, "A treatment-naïve cellular atlas of pediatric Crohn’s disease predicts disease severity and therapeutic response"
Currently, a tutorial is only available for the R version, where the FGID atlas figure is reproduced:
https://jo-m-lab.github.io/ARBOL/ARBOLtutorial.html
ARBOLpy is a stripped down version of ARBOL meant to perform iterative clustering with little overhead.
Currently it does not include the two stop conditions that the R version uses to heuristically join similar clusters.
This results in the Python version overclustering data. Methods for merging the end clusters of the tree are available on the develop branch of the R version of ARBOL.
This package is meant as a starting point for the way that we approached clustering and and is meant to be edited/customized through community feedback through users such as yourself!
The main function of ARBOLpy is ARBOL() - here is an example call.
```
import scanpy as sc
import ARBOLpy.ARBOL as arbol
adata = sc.datasets.pbmc3k()
tree = arbol.ARBOL(adata)
ARBOL.write_ARBOL_output(tree,output_csv='endclusts.csv')
```
The helper function write_ARBOL_output writes the anytree object's endclusters to a csv file.
**Note** This script can take a long time to run. Running on 20K cells could
take >30 minutes. Running on 100k+ cells could take 4 hours.
**Note** The script requires approximately 0.6 GB RAM per 1k cells, meaning on a local machine with 16GB RAM, one could reasonably run 24k cells. The current RAM/time bottleneck is the silhouette analysis, which runs 30 rounds of clustering at different resolutions.
## ARBOL() Parameters
* *adata* scanpy anndata object
* *normalize_method* normalization method, defaults to "Pearson", scanpy's experimental implementation of SCTransform. Also available: "TPM": as implemented in scanpy normalize_total()
* *tier* starting tier, defaults to 0
* *cluster* starting cluster, defaults to 0
* *min_cluster_size* minimum number of cells to allow further clustering
* *tree* anytree object to attach arbol to. Shouldn't be changed unless building onto a pre-existing tree.
* *parent* parent node of current clustering event, defaults to None. As with tree, shouldn't be changed unless building onto a pre-existing anytree object
* *max_tiers* maximum number of tiers to allow further clustering
* *min_silhouette_res* lower bound of silhouette analysis leiden clustering resolution parameter scan
* *max_silhouette_res* upper bound
* *silhouette_subsampling_n* number of cells to subsample anndata for silhouette analysis (cluster resolution choice)
* *h5dir* where to save h5 objects for each tier and cluster, if None, does not save
* *figdir* where to save QC and viz figures for each tier and cluster, if None does not save
## Returns
* anytree object based on iterative tiered clustering
| ARBOLpy | /ARBOLpy-0.0.7.tar.gz/ARBOLpy-0.0.7/README.md | README.md |
from __future__ import division, print_function, absolute_import
from arc._database import sqlite3, UsedModulesARC
import csv
import gzip
from math import exp, sqrt
from mpmath import angerj
# for web-server execution, uncomment the following two lines
# import matplotlib
# matplotlib.use("Agg")
import numpy as np
import re
import shutil
from numpy.linalg import eigh
from .wigner import Wigner6j, Wigner3j, CG, WignerDmatrix
from scipy.constants import physical_constants, pi, epsilon_0, hbar
from scipy.constants import k as C_k
from scipy.constants import c as C_c
from scipy.constants import h as C_h
from scipy.constants import e as C_e
from scipy.constants import m_e as C_m_e
# for matrices
from numpy import floor
import sys
import os
if sys.version_info > (2,):
xrange = range
import pickle
DPATH = os.path.join(os.path.expanduser("~"), ".arc-data")
__arc_data_version__ = 10
__all__ = [
"AlkaliAtom",
"printState",
"printStateString",
"printStateStringLatex",
"printStateLetter",
"formatNumberSI",
]
def setup_data_folder():
"""Setup the data folder in the users home directory."""
if not os.path.exists(DPATH):
os.makedirs(DPATH)
# check what is the local version of data
copyDataLocally = True
versionFile = os.path.join(DPATH, "version.txt")
if os.path.exists(versionFile):
with open(versionFile, "r") as f:
version = int(f.readline())
if version == __arc_data_version__:
copyDataLocally = False
if copyDataLocally:
dataFolder = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data"
)
for fn in os.listdir(dataFolder):
if os.path.isfile(os.path.join(dataFolder, fn)):
shutil.copy(os.path.join(dataFolder, fn), DPATH)
dataFolder = os.path.join(dataFolder, "refractive_index_data")
refractiveIndexData = os.path.join(DPATH, "refractive_index_data")
if not os.path.exists(refractiveIndexData):
os.makedirs(refractiveIndexData)
for fn in os.listdir(dataFolder):
if os.path.isfile(os.path.join(dataFolder, fn)):
shutil.copy(os.path.join(dataFolder, fn), refractiveIndexData)
with open(versionFile, "w") as f:
f.write("%d" % __arc_data_version__)
class AlkaliAtom(object):
"""
Implements general calculations for alkali atoms.
This abstract class implements general calculations methods.
Args:
preferQuantumDefects (bool):
Use quantum defects for energy level calculations. If False,
uses NIST ASD values where available. If True, uses quantum
defects for energy calculations for principal quantum numbers
equal or above :obj:`minQuantumDefectN` which is specified for
each element separately. For principal quantum numbers below
this value, NIST ASD values are used, since quantum defects
don't reproduce well low-lying states. Default is True.
cpp_numerov (bool):
should the wavefunction be calculated with Numerov algorithm
implemented in C++; if False, it uses pure Python
implementation that is much slower. Default is True.
"""
gS = 2.0023193043737 # : Electron Spin g-factor [Steck]
gL = 1.0 #: Electron Orbital g-factor
gI = 0.0 #: Nuclear g-factor
# ALL PARAMETERS ARE IN ATOMIC UNITS (Hatree)
alpha = physical_constants["fine-structure constant"][0]
#: Model potential parameters fitted from experimental observations for
#: different l (electron angular momentum)
a1, a2, a3, a4, rc = [0], [0], [0], [0], [0]
alphaC = 0.0 #: Core polarizability
Z = 0.0 #: Atomic number
I = 0.0 #: Nuclear spin
#: state energies from NIST values
#: sEnergy [n,l] = state energy for n, l, j = l-1/2
#: sEnergy [l,n] = state energy for j = l+1/2
sEnergy = 0
NISTdataLevels = 0
scaledRydbergConstant = 0 # : in eV
#: Contains list of modified Rydberg-Ritz coefficients for calculating
#: quantum defects for [[ :math:`S_{1/2},P_{1/2},D_{3/2},F_{5/2}`],
#: [ :math:`S_{1/2},P_{3/2},D_{5/2},F_{7/2}`]]."""
quantumDefect = [
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
#: location of stored NIST values of measured energy levels in eV
levelDataFromNIST = ""
#: location of hard-disk stored dipole matrix elements
dipoleMatrixElementFile = ""
#: location of hard-disk stored dipole matrix elements
quadrupoleMatrixElementFile = ""
dataFolder = DPATH
# now additional literature sources of dipole matrix elements
#: Filename of the additional literature source values of dipole matrix
#: elements.
#: These additional values should be saved as reduced dipole matrix
#: elements in J basis.
literatureDMEfilename = ""
#: levels that are for smaller principal quantum number (n) than ground
#: level, but are above in energy due to angular part
extraLevels = []
#: principal quantum number for the ground state
groundStateN = 0
#: swich - should the wavefunction be calculated with Numerov algorithm
#: implemented in C++
cpp_numerov = True
mass = 0.0 #: atomic mass in kg
abundance = 1.0 #: relative isotope abundance
elementName = "elementName" #: Human-readable element name
meltingPoint = 0 #: melting point of the element at standard conditions
preferQuantumDefects = False
#: minimal quantum number for which quantum defects can be used;
#: uses measured energy levels otherwise
minQuantumDefectN = 0
#: file cotaining data on hyperfine structure (magnetic dipole A and
#: magnetic quadrupole B constnats).
hyperfineStructureData = ""
def __init__(self, preferQuantumDefects=True, cpp_numerov=True):
# should the wavefunction be calculated with Numerov algorithm
# implemented in C; if false, it uses Python implementation
# that is much slower
UsedModulesARC.alkali_atoms = True
self.cpp_numerov = cpp_numerov
self.preferQuantumDefects = preferQuantumDefects
self._databaseInit()
c = self.conn.cursor()
if self.cpp_numerov:
from .arc_c_extensions import NumerovWavefunction
self.NumerovWavefunction = NumerovWavefunction
# load dipole matrix elements previously calculated
data = []
if self.dipoleMatrixElementFile != "":
if preferQuantumDefects is False:
self.dipoleMatrixElementFile = (
"NIST_" + self.dipoleMatrixElementFile
)
try:
data = np.load(
os.path.join(self.dataFolder, self.dipoleMatrixElementFile),
encoding="latin1",
allow_pickle=True,
)
except IOError as e:
print(
"Error reading dipoleMatrixElement File "
+ os.path.join(
self.dataFolder, self.dipoleMatrixElementFile
)
)
print(e)
# save to SQLite database
try:
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='dipoleME';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS dipoleME
(n1 TINYINT UNSIGNED, l1 TINYINT UNSIGNED,
j1_x2 TINYINT UNSIGNED,
n2 TINYINT UNSIGNED, l2 TINYINT UNSIGNED,
j2_x2 TINYINT UNSIGNED,
dme DOUBLE,
PRIMARY KEY (n1,l1,j1_x2,n2,l2,j2_x2)
) """
)
if len(data) > 0:
c.executemany(
"INSERT INTO dipoleME VALUES (?,?,?,?,?,?,?)", data
)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database")
print(e)
exit()
# load quadrupole matrix elements previously calculated
data = []
if self.quadrupoleMatrixElementFile != "":
if preferQuantumDefects is False:
self.quadrupoleMatrixElementFile = (
"NIST_" + self.quadrupoleMatrixElementFile
)
try:
data = np.load(
os.path.join(
self.dataFolder, self.quadrupoleMatrixElementFile
),
encoding="latin1",
allow_pickle=True,
)
except IOError as e:
print(
"Error reading quadrupoleMatrixElementFile File "
+ os.path.join(
self.dataFolder, self.quadrupoleMatrixElementFile
)
)
print(e)
# save to SQLite database
try:
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='quadrupoleME';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS quadrupoleME
(n1 TINYINT UNSIGNED, l1 TINYINT UNSIGNED,
j1_x2 TINYINT UNSIGNED,
n2 TINYINT UNSIGNED, l2 TINYINT UNSIGNED,
j2_x2 TINYINT UNSIGNED,
qme DOUBLE,
PRIMARY KEY (n1,l1,j1_x2,n2,l2,j2_x2)
) """
)
if len(data) > 0:
c.executemany(
"INSERT INTO quadrupoleME VALUES (?,?,?,?,?,?,?)", data
)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database")
print(e)
exit()
self.sEnergy = np.array(
[[0.0] * (self.NISTdataLevels + 1)] * (self.NISTdataLevels + 1)
)
# Always load NIST data on measured energy levels;
# Even when user wants to use quantum defects, qunatum defects for
# lowest lying state are not always so accurate, so below the
# minQuantumDefectN cut-off (defined for each element separately)
# getEnergy(...) will always return measured,
# not calculated energy levels
if self.levelDataFromNIST == "":
print(
"NIST level data file not specified."
+ "Only quantum defects will be used."
)
else:
levels = self._parseLevelsFromNIST(
os.path.join(self.dataFolder, self.levelDataFromNIST)
)
br = 0
while br < len(levels):
self._addEnergy(
levels[br][0], levels[br][1], levels[br][2], levels[br][3]
)
br = br + 1
# read Literature values for dipole matrix elements
self._readLiteratureValues()
self._readHFSdata()
return
def _databaseInit(self):
# SQL connection and cursor
self.conn = sqlite3.connect(
os.path.join(self.dataFolder, self.precalculatedDB)
)
def getPressure(self, temperature):
"""Vapour pressure (in Pa) at given temperature
Args:
temperature (float): temperature in K
Returns:
float: vapour pressure in Pa
"""
print(
"Error: getPressure to-be implement in child class (otherwise "
+ "this call is invalid for the specified atom"
)
exit()
def getNumberDensity(self, temperature):
"""Atom number density at given temperature
See `calculation of basic properties example snippet`_.
.. _`calculation of basic properties example snippet`:
./Rydberg_atoms_a_primer.html#General-atomic-properties
Args:
temperature (float): temperature in K
Returns:
float: atom concentration in :math:`1/m^3`
"""
return self.getPressure(temperature) / (C_k * temperature)
def getAverageInteratomicSpacing(self, temperature):
"""
Returns average interatomic spacing in atomic vapour
See `calculation of basic properties example snippet`_.
.. _`calculation of basic properties example snippet`:
./Rydberg_atoms_a_primer.html#General-atomic-properties
Args:
temperature (float): temperature (K) of the atomic vapour
Returns:
float: average interatomic spacing in m
"""
return (5.0 / 9.0) * self.getNumberDensity(temperature) ** (-1.0 / 3.0)
def corePotential(self, l, r):
"""core potential felt by valence electron
For more details about derivation of model potential see
Ref. [#marinescu]_.
Args:
l (int): orbital angular momentum
r (float): distance from the nucleus (in a.u.)
Returns:
float: core potential felt by valence electron (in a.u. ???)
References:
.. [#marinescu] M. Marinescu, H. R. Sadeghpour, and A. Dalgarno
PRA **49**, 982 (1994),
https://doi.org/10.1103/PhysRevA.49.982
"""
return -self.effectiveCharge(l, r) / r - self.alphaC / (2 * r**4) * (
1 - exp(-((r / self.rc[l]) ** 6))
)
def effectiveCharge(self, l, r):
"""effective charge of the core felt by valence electron
For more details about derivation of model potential see
Ref. [#marinescu]_.
Args:
l (int): orbital angular momentum
r (float): distance from the nucleus (in a.u.)
Returns:
float: effective charge (in a.u.)
"""
return (
1.0
+ (self.Z - 1) * exp(-self.a1[l] * r)
- r * (self.a3[l] + self.a4[l] * r) * exp(-self.a2[l] * r)
)
def potential(self, l, s, j, r):
"""returns total potential that electron feels
Total potential = core potential + Spin-Orbit interaction
Args:
l (int): orbital angular momentum
s (float): spin angular momentum
j (float): total angular momentum
r (float): distance from the nucleus (in a.u.)
Returns:
float: potential (in a.u.)
"""
if l < 4:
return (
self.corePotential(l, r)
+ self.alpha**2
/ (2.0 * r**3)
* (j * (j + 1.0) - l * (l + 1.0) - s * (s + 1))
/ 2.0
)
else:
# act as if it is a Hydrogen atom
return (
-1.0 / r
+ self.alpha**2
/ (2.0 * r**3)
* (j * (j + 1.0) - l * (l + 1.0) - s * (s + 1))
/ 2.0
)
def radialWavefunction(
self, l, s, j, stateEnergy, innerLimit, outerLimit, step
):
"""
Radial part of electron wavefunction
Calculates radial function with Numerov (from outside towards the
core). Note that wavefunction might not be calculated all the way to
the requested `innerLimit` if the divergence occurs before. In that
case third returned argument gives nonzero value, corresponding to the
first index in the array for which wavefunction was calculated. For
quick example see `Rydberg wavefunction calculation snippet`_.
.. _`Rydberg wavefunction calculation snippet`:
./Rydberg_atoms_a_primer.html#Rydberg-atom-wavefunctions
Args:
l (int): orbital angular momentum
s (float): spin angular momentum
j (float): total angular momentum
stateEnergy (float): state energy, relative to ionization
threshold, should be given in atomic units (Hatree)
innerLimit (float): inner limit at which wavefunction is requested
outerLimit (float): outer limit at which wavefunction is requested
step (flaot): radial step for integration mesh (a.u.)
Returns:
List[float], List[flaot], int:
:math:`r`
:math:`R(r)\\cdot r`
.. note::
Radial wavefunction is not scaled to unity! This normalization
condition means that we are using spherical harmonics which are
normalized such that
:math:`\\int \\mathrm{d}\\theta~\\mathrm{d}\\psi~Y(l,m_l)^* \
\\times Y(l',m_{l'}) = \\delta (l,l') ~\\delta (m_l, m_{l'})`.
Note:
Alternative calculation methods can be added here (potenatial
package expansion).
"""
innerLimit = max(
4.0 * step, innerLimit
) # prevent divergence due to hitting 0
if self.cpp_numerov:
# efficiant implementation in C
if l < 4:
d = self.NumerovWavefunction(
innerLimit,
outerLimit,
step,
0.01,
0.01,
l,
s,
j,
stateEnergy,
self.alphaC,
self.alpha,
self.Z,
self.a1[l],
self.a2[l],
self.a3[l],
self.a4[l],
self.rc[l],
(self.mass - C_m_e) / self.mass,
)
else:
d = self.NumerovWavefunction(
innerLimit,
outerLimit,
step,
0.01,
0.01,
l,
s,
j,
stateEnergy,
self.alphaC,
self.alpha,
self.Z,
0.0,
0.0,
0.0,
0.0,
0.0,
(self.mass - C_m_e) / self.mass,
)
psi_r = d[0]
r = d[1]
suma = np.trapz(psi_r**2, x=r)
psi_r = psi_r / (sqrt(suma))
else:
# full implementation in Python
mu = (self.mass - C_m_e) / self.mass
def potential(x):
r = x * x
return -3.0 / (4.0 * r) + 4.0 * r * (
2.0 * mu * (stateEnergy - self.potential(l, s, j, r))
- l * (l + 1) / (r**2)
)
r, psi_r = NumerovBack(
innerLimit, outerLimit, potential, step, 0.01, 0.01
)
suma = np.trapz(psi_r**2, x=r)
psi_r = psi_r / (sqrt(suma))
return r, psi_r
def _parseLevelsFromNIST(self, fileData):
"""
Parses the level energies from file listing the NIST ASD data
Args:
fileData (str): path to the file containing NIST ASD data for
the element
"""
f = open(fileData, "r")
l = 0
n = 0
levels = []
for line in f:
line = re.sub(r"[\[\]]", "", line)
pattern = r"\.\d*[spdfgh]"
pattern2 = r"\|\s+\d*/"
pattern3 = r"/\d* \|"
pattern4 = r"\| *\d*\.\d* *\|"
match = re.search(pattern, line)
if match is not None:
n = int(line[match.start() + 1 : match.end() - 1])
if match is not None:
ch = line[match.end() - 1 : match.end()]
if ch == "s":
l = 0
elif ch == "p":
l = 1
elif ch == "d":
l = 2
elif ch == "f":
l = 3
elif ch == "g":
l = 4
elif ch == "h":
l = 5
else:
print("Unidentified character in line:\n", line)
exit()
match = re.search(pattern2, line)
if match is not None:
br1 = float(line[match.start() + 2 : match.end() - 1])
match = re.search(pattern3, line)
br2 = float(line[match.start() + 1 : match.end() - 2])
match = re.search(pattern4, line)
energyValue = float(line[match.start() + 1 : match.end() - 1])
levels.append([n, l, br1 / br2, energyValue])
f.close()
return levels
def _addEnergy(self, n, l, j, energyNIST):
"""
Adding energy levels
Accepts energy level relative to **ground state**, and
saves energy levels, relative to the **ionization treshold**.
Args:
energyNIST (float): energy relative to
the nonexcited level (= 0 eV)
"""
#
if abs(j - (l - 0.5)) < 0.001:
# j =l-1/2
self.sEnergy[n, l] = energyNIST - self.ionisationEnergy
else:
# j = l+1/2
self.sEnergy[l, n] = energyNIST - self.ionisationEnergy
def getTransitionWavelength(self, n1, l1, j1, n2, l2, j2, s=0.5, s2=None):
"""
Calculated transition wavelength (in vacuum) in m.
Returned values is given relative to the centre of gravity of the
hyperfine-split states.
Args:
n1 (int): principal quantum number of the state
**from** which we are going
l1 (int): orbital angular momentum of the state
**from** which we are going
j1 (float): total angular momentum of the state
**from** which we are going
n2 (int): principal quantum number of the state
**to** which we are going
l2 (int): orbital angular momentum of the state
**to** which we are going
j2 (float): total angular momentum of the state
**to** which we are going
s (float): optional, spin of the intial state
(for Alkali this is fixed to 0.5)
s2 (float): optional, spin of the final state.
If not set, defaults to same value as :obj:`s`
Returns:
float:
transition wavelength (in m). If the returned value is
negative, level from which we are going is **above**
the level to which we are going.
"""
if s2 is None:
s2 = s
return (C_h * C_c) / (
(self.getEnergy(n2, l2, j2, s=s2) - self.getEnergy(n1, l1, j1, s=s))
* C_e
)
def getTransitionFrequency(self, n1, l1, j1, n2, l2, j2, s=0.5, s2=None):
"""
Calculated transition frequency in Hz
Returned values is given relative to the centre of gravity of the
hyperfine-split states.
Args:
n1 (int): principal quantum number of the state
**from** which we are going
l1 (int): orbital angular momentum of the state
**from** which we are going
j1 (float): total angular momentum of the state
**from** which we are going
n2 (int): principal quantum number of the state
**to** which we are going
l2 (int): orbital angular momentum of the state
**to** which we are going
j2 (float): total angular momentum of the state
**to** which we are going
s (float): optional, spin of the intial state
(for Alkali this is fixed to 0.5)
s2 (float): optional, spin of the final state
If not set, defaults to the same value as :obj:`s`
Returns:
float:
transition frequency (in Hz). If the returned value is
negative, level from which we are going is **above**
the level to which we are going.
"""
if s2 is None:
s2 = s
return (
(self.getEnergy(n2, l2, j2, s=s2) - self.getEnergy(n1, l1, j1, s=s))
* C_e
/ C_h
)
def getEnergy(self, n, l, j, s=0.5):
"""
Energy of the level relative to the ionisation level (in eV)
Returned energies are with respect to the center of gravity of the
hyperfine-split states.
If `preferQuantumDefects` =False (set during initialization)
program will try use NIST energy value, if such exists,
falling back to energy calculation with quantum defects if
the measured value doesn't exist. For `preferQuantumDefects` =True,
program will calculate energies from quantum defects
(useful for comparing quantum defect calculations with measured
energy level values) if the principal quantum number of the
requested state is larger than the minimal quantum principal quantum
number `self.minQuantumDefectN` which sets minimal quantum number
for which quantum defects still give good estimate of state energy
(below this value saved energies will be used if existing).
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
s (float): optional, total spin angular momentum. Default value
of 0.5 is correct for Alkali atoms, and has to be specified
explicitly for divalent atoms.
Returns:
float: state energy (eV)
"""
if l >= n:
raise ValueError(
"Requested energy for state l=%d >= n=%d !" % (l, n)
)
# use NIST data ?
if (
(not self.preferQuantumDefects or n < self.minQuantumDefectN)
and (n <= self.NISTdataLevels)
and (abs(self._getSavedEnergy(n, l, j, s=s)) > 1e-8)
):
return self._getSavedEnergy(n, l, j, s=s)
# else, use quantum defects
defect = self.getQuantumDefect(n, l, j, s=s)
return -self.scaledRydbergConstant / ((n - defect) ** 2)
def _getSavedEnergy(self, n, l, j, s=0.5):
if abs(j - (l - 0.5)) < 0.001:
# j = l-1/2
return self.sEnergy[n, l]
elif abs(j - (l + 0.5)) < 0.001:
# j =l+1/2
return self.sEnergy[l, n]
else:
raise ValueError(
"j (=%.1f) is not equal to l+1/2 nor l-1/2 (l=%d)" % (j, l)
)
def getQuantumDefect(self, n, l, j, s=0.5):
"""
Quantum defect of the level.
For an example, see `Rydberg energy levels example snippet`_.
.. _`Rydberg energy levels example snippet`:
./Rydberg_atoms_a_primer.html#Rydberg-Atom-Energy-Levels
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
s (float): (optional). Total spin angular momentum.
Default value of 0.5 correct for Alkali atoms. For divalent
atoms it has to be explicitly defined.
Returns:
float: quantum defect
"""
defect = 0.0
if l < 5:
# find correct part in table of quantum defects
modifiedRRcoef = self.quantumDefect[round(floor(s) + s + j - l)][l]
if l < 3 and abs(modifiedRRcoef[0]) < 1e-9 and self.Z != 1:
# it's not Hydrogen but for l in {s,p,d} quantum defect is 0
raise ValueError(
"Quantum defects for requested state "
+ ("(n = %d, l = %d, j = %.1f, s=%.1f) are" % (n, l, j, s))
+ " uknown. Aborting calculation."
)
defect = (
modifiedRRcoef[0]
+ modifiedRRcoef[1] / ((n - modifiedRRcoef[0]) ** 2)
+ modifiedRRcoef[2] / ((n - modifiedRRcoef[0]) ** 4)
+ modifiedRRcoef[3] / ((n - modifiedRRcoef[0]) ** 6)
+ modifiedRRcoef[4] / ((n - modifiedRRcoef[0]) ** 8)
+ modifiedRRcoef[5] / ((n - modifiedRRcoef[0]) ** 10)
)
else:
# use \delta_\ell = \delta_g * (4/\ell)**5
# from https://journals.aps.org/pra/abstract/10.1103/PhysRevA.74.062712
defect = self.quantumDefect[0][4][0] * (4 / l) ** 5
return defect
def getRadialMatrixElement(
self,
n1: int,
l1: int,
j1: float,
n2: int,
l2: int,
j2: float,
s=0.5,
useLiterature=True,
):
"""
Radial part of the dipole matrix element
Calculates :math:`\\int \\mathbf{d}r~R_{n_1,l_1,j_1}(r)\\cdot \
R_{n_1,l_1,j_1}(r) \\cdot r^3`.
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number of state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
s (float): optional, total spin angular momentum of state 1.
By default 0.5 for Alkali atoms.
useLiterature (bool): optional, should literature values for
dipole matrix element be used if existing? If true,
compiled values stored in `literatureDMEfilename` variable
for a given atom (file is stored locally at ~/.arc-data/),
will be checked, and if the value is found, selects the
value with smallest error estimate (if there are multiple
entries). If no value is found, it will default to numerical
integration of wavefunctions. By default True.
Returns:
float: dipole matrix element (:math:`a_0 e`).
"""
dl = abs(l1 - l2)
dj = abs(j1 - j2)
if not (dl == 1 and (dj < 1.1)):
return 0
if self.getEnergy(n1, l1, j1, s=s) > self.getEnergy(n2, l2, j2, s=s):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
n1 = round(n1)
n2 = round(n2)
l1 = round(l1)
l2 = round(l2)
j1_x2 = round(2 * j1)
j2_x2 = round(2 * j2)
c = self.conn.cursor()
if useLiterature:
# is there literature value for this DME? If there is,
# use the best one (smalles error)
c.execute(
"""SELECT dme FROM literatureDME WHERE
n1= ? AND l1 = ? AND j1_x2 = ? AND
n2 = ? AND l2 = ? AND j2_x2 = ?
ORDER BY errorEstimate ASC""",
(n1, l1, j1_x2, n2, l2, j2_x2),
)
answer = c.fetchone()
if answer:
# we did found literature value
return answer[0]
# was this calculated before? If it was, retrieve from memory
c.execute(
"""SELECT dme FROM dipoleME WHERE
n1= ? AND l1 = ? AND j1_x2 = ? AND
n2 = ? AND l2 = ? AND j2_x2 = ?""",
(n1, l1, j1_x2, n2, l2, j2_x2),
)
dme = c.fetchone()
if dme:
return dme[0]
step = 0.001
r1, psi1_r1 = self.radialWavefunction(
l1,
0.5,
j1,
self.getEnergy(n1, l1, j1) / 27.211,
self.alphaC ** (1 / 3.0),
2.0 * n1 * (n1 + 15.0),
step,
)
r2, psi2_r2 = self.radialWavefunction(
l2,
0.5,
j2,
self.getEnergy(n2, l2, j2) / 27.211,
self.alphaC ** (1 / 3.0),
2.0 * n2 * (n2 + 15.0),
step,
)
upTo = min(len(r1), len(r2))
# note that r1 and r2 change in same staps,
# starting from the same value
dipoleElement = np.trapz(
np.multiply(
np.multiply(psi1_r1[0:upTo], psi2_r2[0:upTo]), r1[0:upTo]
),
x=r1[0:upTo],
)
c.execute(
""" INSERT INTO dipoleME VALUES (?,?,?, ?,?,?, ?)""",
[n1, l1, j1_x2, n2, l2, j2_x2, dipoleElement],
)
self.conn.commit()
return dipoleElement
def getQuadrupoleMatrixElement(
self, n1: int, l1: int, j1: float, n2: int, l2: int, j2: float, s=0.5
):
"""
Radial part of the quadrupole matrix element
Calculates :math:`\\int \\mathbf{d}r~R_{n_1,l_1,j_1}(r)\\cdot \
R_{n_1,l_1,j_1}(r) \\cdot r^4`.
See `Quadrupole calculation example snippet`_ .
.. _`Quadrupole calculation example snippet`:
./Rydberg_atoms_a_primer.html#Quadrupole-matrix-elements
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number of state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: quadrupole matrix element (:math:`a_0^2 e`).
"""
dl = abs(l1 - l2)
dj = abs(j1 - j2)
if not ((dl == 0 or dl == 2 or dl == 1) and (dj < 2.1)):
return 0
if self.getEnergy(n1, l1, j1, s=s) > self.getEnergy(n2, l2, j2, s=s):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
n1 = round(n1)
n2 = round(n2)
l1 = round(l1)
l2 = round(l2)
j1_x2 = round(2 * j1)
j2_x2 = round(2 * j2)
c = self.conn.cursor()
# was this calculated before? If yes, retrieve from memory.
c.execute(
"""SELECT qme FROM quadrupoleME WHERE
n1= ? AND l1 = ? AND j1_x2 = ? AND
n2 = ? AND l2 = ? AND j2_x2 = ?""",
(n1, l1, j1_x2, n2, l2, j2_x2),
)
qme = c.fetchone()
if qme:
return qme[0]
# if it wasn't, calculate now
step = 0.001
r1, psi1_r1 = self.radialWavefunction(
l1,
0.5,
j1,
self.getEnergy(n1, l1, j1) / 27.211,
self.alphaC ** (1 / 3.0),
2.0 * n1 * (n1 + 15.0),
step,
)
r2, psi2_r2 = self.radialWavefunction(
l2,
0.5,
j2,
self.getEnergy(n2, l2, j2) / 27.211,
self.alphaC ** (1 / 3.0),
2.0 * n2 * (n2 + 15.0),
step,
)
upTo = min(len(r1), len(r2))
# note that r1 and r2 change in same staps,
# starting from the same value
quadrupoleElement = np.trapz(
np.multiply(
np.multiply(psi1_r1[0:upTo], psi2_r2[0:upTo]),
np.multiply(r1[0:upTo], r1[0:upTo]),
),
x=r1[0:upTo],
)
c.execute(
""" INSERT INTO quadrupoleME VALUES (?,?,?,?,?,?, ?)""",
[n1, l1, j1_x2, n2, l2, j2_x2, quadrupoleElement],
)
self.conn.commit()
return quadrupoleElement
def getReducedMatrixElementJ_asymmetric(
self, n1, l1, j1, n2, l2, j2, s=0.5
):
"""
Reduced matrix element in :math:`J` basis, defined in asymmetric
notation.
Note that notation for symmetric and asymmetricly defined
reduced matrix element is not consistent in the literature. For
example, notation is used e.g. in Steck [1]_ is precisely
the oposite.
Note:
Note that this notation is asymmetric: :math:`( j||e r \
||j' ) \\neq ( j'||e r ||j )`.
Relation between the two notation is :math:`\\langle j||er||j'\
\\rangle=\\sqrt{2j+1} ( j ||er ||j')`.
This function always returns value for transition from
lower to higher energy state, independent of the order of
states entered in the function call.
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number of state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float:
reduced dipole matrix element in Steck notation
:math:`( j || er || j' )` (:math:`a_0 e`).
.. [1] Daniel A. Steck, "Cesium D Line Data," (revision 2.0.1,
2 May 2008). http://steck.us/alkalidata
"""
#
if self.getTransitionFrequency(n1, l1, j1, n2, l2, j2, s=s, s2=s) < 0:
temp = n2
n2 = n1
n1 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
return (
(-1) ** (round((l2 + l1 + 3.0) / 2.0 + s + j2))
* sqrt((2.0 * j2 + 1.0) * (2.0 * l1 + 1.0))
* Wigner6j(l1, l2, 1, j2, j1, s)
* sqrt(float(max(l1, l2)) / (2.0 * l1 + 1.0))
* self.getRadialMatrixElement(n1, l1, j1, n2, l2, j2, s=s)
)
def getReducedMatrixElementL(self, n1, l1, j1, n2, l2, j2, s=0.5):
"""
Reduced matrix element in :math:`L` basis (symmetric notation)
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number of state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
Returns:
float:
reduced dipole matrix element in :math:`L` basis
:math:`\\langle l || er || l' \\rangle` (:math:`a_0 e`).
"""
return (
(-1) ** l1
* sqrt((2.0 * l1 + 1.0) * (2.0 * l2 + 1.0))
* Wigner3j(l1, 1, l2, 0, 0, 0)
* self.getRadialMatrixElement(n1, l1, j1, n2, l2, j2, s=s)
)
def getReducedMatrixElementJ(self, n1, l1, j1, n2, l2, j2, s=0.5):
"""
Reduced matrix element in :math:`J` basis (symmetric notation)
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number of state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float:
reduced dipole matrix element in :math:`J` basis
:math:`\\langle j || er || j' \\rangle` (:math:`a_0 e`).
"""
return (
(-1) ** (round(l1 + s + j2 + 1.0))
* sqrt((2.0 * j1 + 1.0) * (2.0 * j2 + 1.0))
* Wigner6j(j1, 1.0, j2, l2, s, l1)
* self.getReducedMatrixElementL(n1, l1, j1, n2, l2, j2, s=s)
)
def getDipoleMatrixElement(
self, n1, l1, j1, mj1, n2, l2, j2, mj2, q, s=0.5
):
r"""
Dipole matrix element
:math:`\langle n_1 l_1 j_1 m_{j_1} |e\mathbf{r}|\
n_2 l_2 j_2 m_{j_2}\rangle`
in units of :math:`a_0 e`
Args:
n1. l1, j1, mj1: principal, orbital, total angular momentum,
and projection of total angular momentum for state 1
n2. l2, j2, mj2: principal, orbital, total angular momentum,
and projection of total angular momentum for state 2
q (int): specifies transition that the driving field couples to,
+1, 0 or -1 corresponding to driving :math:`\sigma^+`,
:math:`\pi` and :math:`\sigma^-` transitions respectively.
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: dipole matrix element( :math:`a_0 e`)
Example:
For example, calculation of :math:`5 S_{1/2}m_j=-\frac{1}{2}\
\rightarrow 5 P_{3/2}m_j=-\frac{3}{2}`
transition dipole matrix element for laser driving
:math:`\sigma^-` transition::
from arc import *
atom = Rubidium()
# transition 5 S_{1/2} m_j=-0.5 -> 5 P_{3/2} m_j=-1.5
# for laser driving sigma- transition
print(atom.getDipoleMatrixElement(5,0,0.5,-0.5,5,1,1.5,-1.5,-1))
"""
if abs(q) > 1.1:
return 0
# return (-1)**(int(j1 - mj1)) *\
# Wigner3j(j1, 1, j2, -mj1, -q, mj2) *\
# self.getReducedMatrixElementJ(n1, l1, j1, n2, l2, j2, s=s)
return self.getSphericalDipoleMatrixElement(
j1, mj1, j2, mj2, q
) * self.getReducedMatrixElementJ(n1, l1, j1, n2, l2, j2, s=s)
def getDipoleMatrixElementHFS(
self, n1, l1, j1, f1, mf1, n2, l2, j2, f2, mf2, q, s=0.5
):
r"""
Dipole matrix element for hyperfine structure resolved transitions
:math:`\langle n_1 l_1 j_1 f_1 m_{f_1} |e\mathbf{r}|\
n_2 l_2 j_2 f_2 m_{f_2}\rangle`
in units of :math:`a_0 e`
For hyperfine resolved transitions, the dipole matrix element is
:math:`\langle n_1,\ell_1,j_1,f_1,m_{f1} | \
\mathbf{\hat{r}}\cdot \mathbf{\varepsilon}_q \
| n_2,\ell_2,j_2,f_2,m_{f2} \rangle = (-1)^{f_1-m_{f1}} \
\left( \
\begin{matrix} \
f_1 & 1 & f_2 \\ \
-m_{f1} & q & m_{f2} \
\end{matrix}\right) \
\langle n_1 \ell_1 j_1 f_1|| r || n_2 \ell_2 j_2 f_2 \rangle,` where
:math:`\langle n_1 \ell_1 j_1 f_1 ||r|| n_2 \ell_2 j_2 f_2 \rangle \
= (-1)^{j_1+I+F_2+1}\sqrt{(2f_1+1)(2f_2+1)} ~ \
\left\{ \begin{matrix}\
F_1 & 1 & F_2 \\ \
j_2 & I & j_1 \
\end{matrix}\right\}~ \
\langle n_1 \ell_1 j_1||r || n_2 \ell_2 j_2 \rangle.`
Args:
n1. l1, j1, f1, mf1: principal, orbital, total orbital,
fine basis (total atomic) angular momentum,
and projection of total angular momentum for state 1
n2. l2, j2, f2, mf2: principal, orbital, total orbital,
fine basis (total atomic) angular momentum,
and projection of total angular momentum for state 2
q (int): specifies transition that the driving field couples to,
+1, 0 or -1 corresponding to driving :math:`\sigma^+`,
:math:`\pi` and :math:`\sigma^-` transitions respectively.
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: dipole matrix element( :math:`a_0 e`)
"""
# dme = (- 1)**(f1 - mf1) * Wigner3j(f1, 1, f2, - mf1, -q, mf2)
# dme *= (- 1)**(j1 + self.I + f2 + 1) * ((2. * f1 + 1)
# * (2 * f2 + 1))**0.5
# dme *= Wigner6j(f1, 1, f2, j2, self.I, j1)
dme = self.getSphericalDipoleMatrixElement(f1, mf1, f2, mf2, q)
dme *= self._reducedMatrixElementFJ(j1, f1, j2, f2)
dme *= self.getReducedMatrixElementJ(n1, l1, j1, n2, l2, j2, s=s)
return dme
def getRabiFrequency(
self, n1, l1, j1, mj1, n2, l2, j2, q, laserPower, laserWaist, s=0.5
):
"""
Returns a Rabi frequency for resonantly driven atom in a
center of TEM00 mode of a driving field
Args:
n1,l1,j1,mj1 : state from which we are driving transition
n2,l2,j2 : state to which we are driving transition
q : laser polarization (-1,0,1 correspond to :math:`\\sigma^-`,
:math:`\\pi` and :math:`\\sigma^+` respectively)
laserPower : laser power in units of W
laserWaist : laser :math:`1/e^2` waist (radius) in units of m
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float:
Frequency in rad :math:`^{-1}`. If you want frequency
in Hz, divide by returned value by :math:`2\\pi`
"""
maxIntensity = 2 * laserPower / (pi * laserWaist**2)
electricField = sqrt(2.0 * maxIntensity / (C_c * epsilon_0))
return self.getRabiFrequency2(
n1, l1, j1, mj1, n2, l2, j2, q, electricField, s=s
)
def getRabiFrequency2(
self, n1, l1, j1, mj1, n2, l2, j2, q, electricFieldAmplitude, s=0.5
):
"""
Returns a Rabi frequency for resonant excitation with a given
electric field amplitude
Args:
n1,l1,j1,mj1 : state from which we are driving transition
n2,l2,j2 : state to which we are driving transition
q : laser polarization (-1,0,1 correspond to :math:`\\sigma^-`,
:math:`\\pi` and :math:`\\sigma^+` respectively)
electricFieldAmplitude : amplitude of electric field
driving (V/m)
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float:
Frequency in rad :math:`^{-1}`. If you want frequency
in Hz, divide by returned value by :math:`2\\pi`
"""
mj2 = mj1 + q
if abs(mj2) - 0.1 > j2:
return 0
dipole = (
self.getDipoleMatrixElement(
n1, l1, j1, mj1, n2, l2, j2, mj2, q, s=s
)
* C_e
* physical_constants["Bohr radius"][0]
)
freq = electricFieldAmplitude * abs(dipole) / hbar
return freq
def getC6term(self, n, l, j, n1, l1, j1, n2, l2, j2, s=0.5):
"""
C6 interaction term for the given two pair-states
Calculates :math:`C_6` intaraction term for :math:`|n,l,j,n,l,j\
\\rangle \\leftrightarrow |n_1,l_1,j_1,n_2,l_2,j_2\\rangle`.
For details of calculation see Ref. [#c6r1]_.
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: :math:`C_6 = \\frac{1}{4\\pi\\varepsilon_0} \
\\frac{|\\langle n,l,j |er|n_1,l_1,j_1\\rangle|^2|\
\\langle n,l,j |er|n_2,l_2,j_2\\rangle|^2}\
{E(n_1,l_1,j_2,n_2,j_2,j_2)-E(n,l,j,n,l,j)}`
(:math:`h` Hz m :math:`{}^6`).
Example:
We can reproduce values from Ref. [#c6r1]_ for C3 coupling
to particular channels. Taking for example channels described
by the Eq. (50a-c) we can get the values::
from arc import *
channels = [[70,0,0.5, 70, 1,1.5, 69,1, 1.5],\\
[70,0,0.5, 70, 1,1.5, 69,1, 0.5],\\
[70,0,0.5, 69, 1,1.5, 70,1, 0.5],\\
[70,0,0.5, 70, 1,0.5, 69,1, 0.5]]
print(" = = = Caesium = = = ")
atom = Caesium()
for channel in channels:
print("%.0f GHz (mu m)^6" % ( atom.getC6term(*channel)
/ C_h * 1.e27 ))
print("\\n = = = Rubidium = = =")
atom = Rubidium()
for channel in channels:
print("%.0f GHz (mu m)^6" % ( atom.getC6term(*channel)
/ C_h * 1.e27 ))
Returns::
= = = Caesium = = =
722 GHz (mu m)^6
316 GHz (mu m)^6
383 GHz (mu m)^6
228 GHz (mu m)^6
= = = Rubidium = = =
799 GHz (mu m)^6
543 GHz (mu m)^6
589 GHz (mu m)^6
437 GHz (mu m)^6
which is in good agreement with the values cited in the
Ref. [#c6r1]_. Small discrepancies for Caesium originate from
slightly different quantum defects used in calculations.
References:
.. [#c6r1] T. G. Walker, M. Saffman, PRA **77**, 032723 (2008)
https://doi.org/10.1103/PhysRevA.77.032723
"""
d1 = self.getRadialMatrixElement(n, l, j, n1, l1, j1, s=s)
d2 = self.getRadialMatrixElement(n, l, j, n2, l2, j2, s=s)
d1d2 = (
1
/ (4.0 * pi * epsilon_0)
* d1
* d2
* C_e**2
* (physical_constants["Bohr radius"][0]) ** 2
)
return -(d1d2**2) / (
C_e
* (
self.getEnergy(n1, l1, j1, s=s)
+ self.getEnergy(n2, l2, j2, s=s)
- 2 * self.getEnergy(n, l, j, s=s)
)
)
def getC3term(self, n, l, j, n1, l1, j1, n2, l2, j2, s=0.5):
"""
C3 interaction term for the given two pair-states
Calculates :math:`C_3` intaraction term for
:math:`|n,l,j,n,l,j\\rangle \
\\leftrightarrow |n_1,l_1,j_1,n_2,l_2,j_2\\rangle`
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: :math:`C_3 = \\frac{\\langle n,l,j |er\
|n_1,l_1,j_1\\rangle \
\\langle n,l,j |er|n_2,l_2,j_2\\rangle}{4\\pi\\varepsilon_0}`
(:math:`h` Hz m :math:`{}^3`).
"""
d1 = self.getRadialMatrixElement(n, l, j, n1, l1, j1, s=s)
d2 = self.getRadialMatrixElement(n, l, j, n2, l2, j2, s=s)
d1d2 = (
1
/ (4.0 * pi * epsilon_0)
* d1
* d2
* C_e**2
* (physical_constants["Bohr radius"][0]) ** 2
)
return d1d2
def getEnergyDefect(self, n, l, j, n1, l1, j1, n2, l2, j2, s=0.5):
"""
Energy defect for the given two pair-states (one of the state has
two atoms in the same state)
Energy difference between the states
:math:`E(n_1,l_1,j_1,n_2,l_2,j_2) - E(n,l,j,n,l,j)`
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
s (float): optional. Spin angular momentum
(default 0.5 for Alkali)
Returns:
float: energy defect (SI units: J)
"""
return C_e * (
self.getEnergy(n1, l1, j1, s=s)
+ self.getEnergy(n2, l2, j2, s=s)
- 2 * self.getEnergy(n, l, j, s=s)
)
def getEnergyDefect2(
self, n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2, s=0.5
):
"""
Energy defect for the given two pair-states
Energy difference between the states
:math:`E(n_1,l_1,j_1,n_2,l_2,j_2) - E(n,l,j,nn,ll,jj)`
See `pair-state energy defects example snippet`_.
.. _`pair-state energy defects example snippet`:
./Rydberg_atoms_a_primer.html#Rydberg-atom-interactions
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
nn (int): principal quantum number
ll (int): orbital angular momentum
jj (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
s (float): optional. Spin angular momentum
(default 0.5 for Alkali)
Returns:
float: energy defect (SI units: J)
"""
return C_e * (
self.getEnergy(n1, l1, j1, s=s)
+ self.getEnergy(n2, l2, j2, s=s)
- self.getEnergy(n, l, j, s=s)
- self.getEnergy(nn, ll, jj, s=s)
)
def updateDipoleMatrixElementsFile(self):
"""
Updates the file with pre-calculated dipole matrix elements.
This function will add the the file all the elements that have been
calculated in the previous run, allowing quick access to them in
the future calculations.
"""
# obtain dipole matrix elements from the database
dipoleMatrixElement = []
c = self.conn.cursor()
c.execute("""SELECT * FROM dipoleME """)
for v in c.fetchall():
dipoleMatrixElement.append(v)
# obtain quadrupole matrix elements from the database
quadrupoleMatrixElement = []
c.execute("""SELECT * FROM quadrupoleME """)
for v in c.fetchall():
quadrupoleMatrixElement.append(v)
# save dipole elements
try:
np.save(
os.path.join(self.dataFolder, self.dipoleMatrixElementFile),
dipoleMatrixElement,
)
except IOError as e:
print(
"Error while updating dipoleMatrixElements File "
+ self.dipoleMatrixElementFile
)
print(e)
# save quadrupole elements
try:
np.save(
os.path.join(self.dataFolder, self.quadrupoleMatrixElementFile),
quadrupoleMatrixElement,
)
except IOError as e:
print(
"Error while updating quadrupoleMatrixElements File "
+ self.quadrupoleMatrixElementFile
)
print(e)
def getTransitionRate(self, n1, l1, j1, n2, l2, j2, temperature=0.0, s=0.5):
"""
Transition rate due to coupling to vacuum modes
(black body included)
Calculates transition rate from the first given state to the second
given state :math:`|n_1,l_1,j_1\\rangle \\rightarrow \
|n_2,j_2,j_2\\rangle` at given temperature due to interaction with
the vacuum field. For zero temperature this returns Einstein A
coefficient. For details of calculation see Ref. [#lf1]_ and
Ref. [#lf2]_.
See `Black-body induced population transfer example snippet`_.
.. _`Black-body induced population transfer example snippet`:
./Rydberg_atoms_a_primer.html#Rydberg-Atom-Lifetimes
Args:
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
[temperature] (float): temperature in K
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: transition rate in s :math:`{}^{-1}` (SI)
References:
.. [#lf1] C. E. Theodosiou, PRA **30**, 2881 (1984)
https://doi.org/10.1103/PhysRevA.30.2881
.. [#lf2] I. I. Beterov, I. I. Ryabtsev, D. B. Tretyakov,\
and V. M. Entin, PRA **79**, 052504 (2009)
https://doi.org/10.1103/PhysRevA.79.052504
"""
degeneracyTerm = 1.0
# find dipoleRadialPart
if self.getTransitionFrequency(n1, l1, j1, n2, l2, j2, s=s, s2=s) > 0:
dipoleRadialPart = (
self.getReducedMatrixElementJ_asymmetric(
n1, l1, j1, n2, l2, j2, s=s
)
* C_e
* (physical_constants["Bohr radius"][0])
)
else:
dipoleRadialPart = (
self.getReducedMatrixElementJ_asymmetric(
n2, l2, j2, n1, l1, j1, s=s
)
* C_e
* (physical_constants["Bohr radius"][0])
)
degeneracyTerm = (2.0 * j2 + 1.0) / (2.0 * j1 + 1.0)
omega = abs(
2.0
* pi
* self.getTransitionFrequency(n1, l1, j1, n2, l2, j2, s=s, s2=s)
)
modeOccupationTerm = 0.0
if self.getTransitionFrequency(n1, l1, j1, n2, l2, j2, s=s, s2=s) < 0:
modeOccupationTerm = 1.0
# only possible by absorbing thermal photons ?
if (hbar * omega < 100 * C_k * temperature) and (omega > 1e2):
modeOccupationTerm += 1.0 / (
exp(hbar * omega / (C_k * temperature)) - 1.0
)
return (
omega**3
* dipoleRadialPart**2
/ (3 * pi * epsilon_0 * hbar * C_c**3)
* degeneracyTerm
* modeOccupationTerm
)
def getStateLifetime(
self, n, l, j, temperature=0, includeLevelsUpTo=0, s=0.5
):
"""
Returns the lifetime of the state (in s)
For non-zero temperatures, user must specify up to which principal
quantum number levels, that is **above** the initial state, should
be included in order to account for black-body induced transitions
to higher lying states. See `Rydberg lifetimes example snippet`_.
.. _`Rydberg lifetimes example snippet`:
./Rydberg_atoms_a_primer.html#Rydberg-Atom-Lifetimes
Args:
n, l, j (int,int,float): specifies state whose lifetime we are
calculating
temperature : optional. Temperature at which the atom
environment is, measured in K. If this parameter
is non-zero, user has to specify transitions up to
which state (due to black-body decay) should be included
in calculation.
includeLevelsUpTo (int): optional and not needed for atom
lifetimes calculated at zero temperature. At non zero
temperatures, this specify maximum principal quantum number
of the state to which black-body induced transitions will
be included. Minimal value of the parameter in that case is
:math:`n+1`
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float:
State lifetime in units of s (seconds)
See also:
:obj:`getTransitionRate` for calculating rates of individual
transition rates between the two states
"""
if temperature > 0.1 and includeLevelsUpTo <= n:
raise ValueError(
"For non-zero temperatures, user has to specify "
+ "principal quantum number of the maximum state *above* the "
+ "state for which we are calculating the lifetime. This is "
+ "in order to include black-body induced transitions to "
+ " higher lying up in energy levels."
)
elif temperature < 0.1:
includeLevelsUpTo = max(n, self.groundStateN)
transitionRate = 0.0
for nto in xrange(max(self.groundStateN, l), includeLevelsUpTo + 1):
# sum over all l-1
if l > 0:
lto = l - 1
if lto > j - s - 0.1:
jto = j
transitionRate += self.getTransitionRate(
n, l, j, nto, lto, jto, temperature, s=s
)
jto = j - 1.0
if jto > 0:
transitionRate += self.getTransitionRate(
n, l, j, nto, lto, jto, temperature, s=s
)
for nto in xrange(max(self.groundStateN, l + 2), includeLevelsUpTo + 1):
# sum over all l+1
lto = l + 1
if lto - s - 0.1 < j:
jto = j
transitionRate += self.getTransitionRate(
n, l, j, nto, lto, jto, temperature, s=s
)
jto = j + 1
transitionRate += self.getTransitionRate(
n, l, j, nto, lto, jto, temperature, s=s
)
# sum over additional states
for state in self.extraLevels:
if (
(abs(j - state[2]) < 1.1)
and (abs(state[1] - l) < 1.1)
and (abs(state[1] - l) > 0.9)
):
transitionRate += self.getTransitionRate(
n, l, j, state[0], state[1], state[2], temperature, s=s
)
# add something small decay (1e-50) rate to prevent division by zero
return 1.0 / (transitionRate + 1e-50)
def getRadialCoupling(self, n, l, j, n1, l1, j1, s=0.5):
"""
Returns radial part of the coupling between two states (dipole and
quadrupole interactions only)
Args:
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: radial coupling strength (in a.u.), or zero for
forbidden transitions in dipole and quadrupole approximation.
"""
dl = abs(l - l1)
if dl == 1 and abs(j - j1) < 1.1:
return self.getRadialMatrixElement(n, l, j, n1, l1, j1, s=s)
elif (dl == 0 or dl == 1 or dl == 2) and (abs(j - j1) < 2.1):
# quadrupole coupling
# return 0.
return self.getQuadrupoleMatrixElement(n, l, j, n1, l1, j1, s=s)
else:
# neglect octopole coupling and higher
return 0
def getAverageSpeed(self, temperature):
"""
Average (mean) speed at a given temperature
Args:
temperature (float): temperature (K)
Returns:
float: mean speed (m/s)
"""
return sqrt(8.0 * C_k * temperature / (pi * self.mass))
def _readHFSdata(self):
c = self.conn.cursor()
c.execute("""DROP TABLE IF EXISTS hfsDataAB""")
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='hfsDataAB';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS hfsDataAB
(n TINYINT UNSIGNED, l TINYINT UNSIGNED, j_x2 TINYINT UNSIGNED,
hfsA DOUBLE, hfsB DOUBLE,
errorA DOUBLE, errorB DOUBLE,
typeOfSource TINYINT,
comment TINYTEXT,
ref TINYTEXT,
refdoi TINYTEXT
);"""
)
c.execute(
"""CREATE INDEX compositeIndexHFS
ON hfsDataAB (n,l,j_x2);"""
)
self.conn.commit()
if self.hyperfineStructureData == "":
return 0 # no file specified for literature values
try:
fn = open(
os.path.join(self.dataFolder, self.hyperfineStructureData), "r"
)
dialect = csv.Sniffer().sniff(fn.read(2024), delimiters=";,\t")
fn.seek(0)
data = csv.reader(fn, dialect, quotechar='"')
literatureHFS = []
count = 0
for row in data:
if count != 0:
# if not header
n = int(row[0])
l = int(row[1])
j = float(row[2])
A = float(row[3])
B = float(row[4])
errorA = float(row[5])
errorB = float(row[6])
typeOfSource = row[7]
comment = row[8]
ref = row[9]
refdoi = row[10]
literatureHFS.append(
[
n,
l,
j * 2,
A,
B,
errorA,
errorB,
typeOfSource,
comment,
ref,
refdoi,
]
)
count += 1
fn.close()
try:
if count > 1:
c.executemany(
"""INSERT INTO hfsDataAB
VALUES (?,?,?,?,?,
?, ?,
?,?,?,?)""",
literatureHFS,
)
self.conn.commit()
except sqlite3.Error as e:
if count > 0:
print(
"Error while loading precalculated values "
"into the database"
)
print(e)
return
except IOError as e:
print(
"Error reading literature values File "
+ self.hyperfineStructureData
)
print(e)
def _readLiteratureValues(self):
# clear previously saved results, since literature file
# might have been updated in the meantime
c = self.conn.cursor()
c.execute("""DROP TABLE IF EXISTS literatureDME""")
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='literatureDME';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS literatureDME
(n1 TINYINT UNSIGNED, l1 TINYINT UNSIGNED, j1_x2 TINYINT UNSIGNED,
n2 TINYINT UNSIGNED, l2 TINYINT UNSIGNED, j2_x2 TINYINT UNSIGNED,
dme DOUBLE,
typeOfSource TINYINT,
errorEstimate DOUBLE,
comment TINYTEXT,
ref TINYTEXT,
refdoi TINYTEXT
);"""
)
c.execute(
"""CREATE INDEX compositeIndex
ON literatureDME (n1,l1,j1_x2,n2,l2,j2_x2); """
)
self.conn.commit()
if self.literatureDMEfilename == "":
return 0 # no file specified for literature values
try:
fn = open(
os.path.join(self.dataFolder, self.literatureDMEfilename), "r"
)
dialect = csv.Sniffer().sniff(fn.read(2024), delimiters=";,\t")
fn.seek(0)
data = csv.reader(fn, dialect, quotechar='"')
literatureDME = []
# i=0 is header
i = 0
for row in data:
if i != 0:
n1 = int(row[0])
l1 = int(row[1])
j1 = float(row[2])
n2 = int(row[3])
l2 = int(row[4])
j2 = float(row[5])
if self.getEnergy(n1, l1, j1) > self.getEnergy(n2, l2, j2):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
# convered from reduced DME in J basis (symmetric notation)
# to radial part of dme as it is saved for calculated
# values
dme = float(row[6]) / (
(-1) ** (round(l1 + 0.5 + j2 + 1.0))
* sqrt((2.0 * j1 + 1.0) * (2.0 * j2 + 1.0))
* Wigner6j(j1, 1.0, j2, l2, 0.5, l1)
* (-1) ** l1
* sqrt((2.0 * l1 + 1.0) * (2.0 * l2 + 1.0))
* Wigner3j(l1, 1, l2, 0, 0, 0)
)
comment = row[7]
typeOfSource = int(row[8]) # 0 = experiment; 1 = theory
errorEstimate = float(row[9])
ref = row[10]
refdoi = row[11]
literatureDME.append(
[
n1,
l1,
j1 * 2,
n2,
l2,
j2 * 2,
dme,
typeOfSource,
errorEstimate,
comment,
ref,
refdoi,
]
)
i += 1
fn.close()
try:
if i > 1:
c.executemany(
"""INSERT INTO literatureDME
VALUES (?,?,?,?,?,?,?,
?,?,?,?,?)""",
literatureDME,
)
self.conn.commit()
except sqlite3.Error as e:
if i > 0:
print(
"Error while loading precalculated values "
"into the database"
)
print(e)
exit()
except IOError as e:
print(
"Error reading literature values File "
+ self.literatureDMEfilename
)
print(e)
def getLiteratureDME(self, n1, l1, j1, n2, l2, j2, s=0.5):
"""
Returns literature information on requested transition.
Args:
n1,l1,j1: one of the states we are coupling
n2,l2,j2: the other state to which we are coupling
Returns:
bool, float, [int,float,string,string,string]:
hasLiteratureValue?, dme, referenceInformation
**If Boolean value is True**, a literature value for
dipole matrix element was found and reduced DME in J basis
is returned as the number. The third returned argument
(array) contains additional information about the
literature value in the following order [ typeOfSource,
errorEstimate , comment , reference, reference DOI]
upon success to find a literature value for dipole matrix
element:
* typeOfSource=1 if the value is theoretical \
calculation; otherwise, if it is experimentally \
obtained value typeOfSource=0
* comment details where within the publication the \
value can be found
* errorEstimate is absolute error estimate
* reference is human-readable formatted reference
* reference DOI provides link to the publication.
**Boolean value is False**, followed by zero and an empty
array if no literature value for dipole matrix element is
found.
Note:
The literature values are stored in /data folder in
<element name>_literature_dme.csv files as a ; separated
values. Each row in the file consists of one literature entry,
that has information in the following order:
* n1
* l1
* j1
* n2
* l2
* j2
* dipole matrix element reduced l basis (a.u.)
* comment (e.g. where in the paper value appears?)
* value origin: 1 for theoretical; 0 for experimental values
* accuracy
* source (human readable formatted citation)
* doi number (e.g. 10.1103/RevModPhys.82.2313 )
If there are several values for a given transition, program
outputs the value that has smallest error (under column
accuracy). The list of values can be expanded - every time
program runs this file is read and the list is parsed again
for use in calculations.
"""
if self.getEnergy(n1, l1, j1) > self.getEnergy(n2, l2, j2):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
# is there literature value for this DME? If there is,
# use the best one (wit the smallest error)
j1_x2 = 2 * j1
j2_x2 = 2 * j2
c = self.conn.cursor()
c.execute(
"""SELECT dme, typeOfSource,
errorEstimate ,
comment ,
ref,
refdoi FROM literatureDME WHERE
n1= ? AND l1 = ? AND j1_x2 = ? AND
n2 = ? AND l2 = ? AND j2_x2 = ?
ORDER BY errorEstimate ASC""",
(n1, l1, j1_x2, n2, l2, j2_x2),
)
answer = c.fetchone()
if answer:
# we did found literature value
return (
True,
answer[0],
[answer[1], answer[2], answer[3], answer[4], answer[5]],
)
# if we are here, we were unsucessfull in literature search
# for this value
return False, 0, []
def getZeemanEnergyShift(self, l, j, mj, magneticFieldBz, s=0.5):
r"""
Retuns linear (paramagnetic) Zeeman shift.
:math:`\mathcal{H}_P=\frac{\mu_B B_z}{\hbar}(\hat{L}_{\rm z}+\
g_{\rm S}S_{\rm z})`
Args:
l (int): orbital angular momentum
j (float): total angular momentum
mj (float): projection of total angular momentum alon z-axis
magneticFieldBz (float): applied magnetic field (alon z-axis
only) in units of T (Tesla)
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: energy offset of the state (in J)
"""
prefactor = physical_constants["Bohr magneton"][0] * magneticFieldBz
gs = -physical_constants["electron g factor"][0]
sumOverMl = 0
for ml in np.linspace(mj - s, mj + s, round(2 * s + 1)):
if abs(ml) <= l + 0.1:
ms = mj - ml
sumOverMl += (ml + gs * ms) * abs(CG(l, ml, s, ms, j, mj)) ** 2
return prefactor * sumOverMl
def _getRadialDipoleSemiClassical(self, n1, l1, j1, n2, l2, j2, s=0.5):
# get the effective principal number of both states
nu = np.sqrt(
-self.scaledRydbergConstant / self.getEnergy(n1, l1, j1, s=s)
)
nu1 = np.sqrt(
-self.scaledRydbergConstant / self.getEnergy(n2, l2, j2, s=s)
)
# get the parameters required to calculate the sum
l_c = (l1 + l2 + 1.0) / 2.0
nu_c = sqrt(nu * nu1)
delta_nu = nu - nu1
delta_l = l2 - l1
# I am not sure if this correct
gamma = (delta_l * l_c) / nu_c
if delta_nu == 0:
g0 = 1
g1 = 0
g2 = 0
g3 = 0
else:
g0 = (1.0 / (3.0 * delta_nu)) * (
angerj(delta_nu - 1.0, -delta_nu)
- angerj(delta_nu + 1, -delta_nu)
)
g1 = -(1.0 / (3.0 * delta_nu)) * (
angerj(delta_nu - 1.0, -delta_nu)
+ angerj(delta_nu + 1, -delta_nu)
)
g2 = g0 - np.sin(np.pi * delta_nu) / (np.pi * delta_nu)
g3 = (delta_nu / 2.0) * g0 + g1
radial_ME = (
(3 / 2)
* nu_c**2
* (1 - (l_c / nu_c) ** (2)) ** 0.5
* (g0 + gamma * g1 + gamma**2 * g2 + gamma**3 * g3)
)
return float(radial_ME)
def _getRadialQuadrupoleSemiClassical(self, n1, l1, j1, n2, l2, j2, s=0.5):
dl = abs(l2 - l1)
nu = n1 - self.getQuantumDefect(n1, l1, j1, s=s)
nu1 = n2 - self.getQuantumDefect(n2, l2, j2, s=s)
# get the parameters required to calculate the sum
l_c = (l1 + l2 + 1.0) / 2.0
nu_c = np.sqrt(nu * nu1)
delta_nu = nu - nu1
delta_l = l2 - l1
gamma = (delta_l * l_c) / nu_c
if delta_nu == 0:
q = np.array([1, 0, 0, 0])
else:
g0 = (1.0 / (3.0 * delta_nu)) * (
angerj(delta_nu - 1.0, -delta_nu)
- angerj(delta_nu + 1, -delta_nu)
)
g1 = -(1.0 / (3.0 * delta_nu)) * (
angerj(delta_nu - 1.0, -delta_nu)
+ angerj(delta_nu + 1, -delta_nu)
)
q = np.zeros((4,))
q[0] = -(6.0 / (5.0 * delta_nu)) * g1
q[1] = -(6.0 / (5.0 * delta_nu)) * g0 + (6.0 / 5.0) * np.sin(
np.pi * delta_nu
) / (np.pi * delta_nu**2)
q[2] = -(3.0 / 4.0) * (6.0 / (5.0 * delta_nu) * g1 + g0)
q[3] = 0.5 * (delta_nu * 0.5 * q[0] + q[1])
sm = 0
if dl == 0:
quadrupoleElement = (
(5.0 / 2.0)
* nu_c**4
* (1.0 - (3.0 * l_c**2) / (5 * nu_c**2))
)
for p in range(0, 2, 1):
sm += gamma ** (2 * p) * q[2 * p]
return quadrupoleElement * sm
elif dl == 2:
quadrupoleElement = (
(5.0 / 2.0)
* nu_c**4
* (1 - (l_c + 1) ** 2 / (nu_c**2)) ** 0.5
* (1 - (l_c + 2) ** 2 / (nu_c**2)) ** 0.5
)
for p in range(0, 4):
sm += gamma ** (p) * q[p]
return quadrupoleElement * sm
else:
return 0
# Additional AMO Functions
def getHFSCoefficients(self, n, l, j, s=None):
"""
Returns hyperfine splitting coefficients for state :math:`n`,
:math:`l`, :math:`j`.
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
s (float): (optional) total spin momentum
Returns:
float: A,B hyperfine splitting constants (in Hz)
"""
UsedModulesARC.hyperfine = True
c = self.conn.cursor()
c.execute(
"""SELECT hfsA, hfsB FROM hfsDataAB WHERE
n= ? AND l = ? AND j_x2 = ?""",
(n, l, j * 2),
)
answer = c.fetchone()
if answer:
# we did found literature value (A and B respectively)
return answer[0], answer[1]
else:
raise ValueError(
"There is no data available on HFS structure"
" of %s state" % printStateString(n, l, j, s=s)
)
def _reducedMatrixElementFJ(self, j1, f1, j2, f2):
sph = 0.0
if (abs(f2 - f1) < 2) & (round(abs(j2 - j1)) < 2):
# Reduced Matrix Element <f||er||f'> in units of reduced matrix element <j||er||j'>
sph = (
(-1.0) ** (j1 + self.I + f2 + 1.0)
* ((2.0 * f1 + 1) * (2 * f2 + 1)) ** 0.5
* Wigner6j(f1, 1, f2, j2, self.I, j1)
)
return sph
def getSphericalDipoleMatrixElement(self, j1, mj1, j2, mj2, q):
# Spherical Component of Angular Matrix Element in units of reduced matrix element <j||er||j'>
return (-1) ** (j1 - mj1) * Wigner3j(j1, 1, j2, -mj1, -q, mj2)
def getSphericalMatrixElementHFStoFS(self, j1, f1, mf1, j2, mj2, q):
r"""
Spherical matrix element for transition from hyperfine resolved state
to unresolved fine-structure state
:math:`\langle f,m_f \vert\mu_q\vert j',m_j'\rangle`
in units of :math:`\langle j\vert\vert\mu\vert\vert j'\rangle`
Args:
j1, f1, mf1: total orbital,
fine basis (total atomic) angular momentum,
and projection of total angular momentum for state 1
j2, mj2: total orbital,
fine basis (total atomic) angular momentum,
and projection of total orbital angular momentum for state 2
q (int): specifies transition that the driving field couples to,
+1, 0 or -1 corresponding to driving :math:`\sigma^+`,
:math:`\pi` and :math:`\sigma^-` transitions respectively.
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: spherical dipole matrix element( :math:`\langle j\vert\vert\mu\vert\vert j'\rangle`)
"""
UsedModulesARC.hyperfine = True
mf2 = mf1 + q
mI = mf2 - mj2
sph = 0.0
if abs(mI) <= self.I:
for f2 in np.arange(
max(self.I - j2, abs(mf2), f1 - 1), 1 + min(self.I + j2, f1 + 1)
):
# Enforce Triangle Rule
if abs(j2 - self.I) <= f2:
# CG multiplied by <j1 f1 mf1|er_q|j2 f2 mf2> in units of <j1 || er || j2 >
sph += (
CG(j2, mj2, self.I, mI, f2, mf2)
* self.getSphericalDipoleMatrixElement(
f1, mf1, f2, mf2, q
)
* self._reducedMatrixElementFJ(j1, f1, j2, f2)
)
return sph
def getDipoleMatrixElementHFStoFS(
self, n1, l1, j1, f1, mf1, n2, l2, j2, mj2, q, s=0.5
):
r"""
Dipole matrix element for transition from hyperfine resolved state
to unresolved fine-structure state
:math:`\langle n_1 l_1 j_1 f_1 m_{f_1} |e\mathbf{r}|\
n_2 l_2 j_2 m_{j_2}\rangle`
in units of :math:`a_0 e`
For hyperfine resolved transitions, the dipole matrix element is
:math:`\langle n_1,\ell_1,j_1,f_1,m_{f1} | \
\mathbf{\hat{r}}\cdot \mathbf{\varepsilon}_q \
| n_2,\ell_2,j_2,f_2,m_{f2} \rangle = (-1)^{f_1-m_{f1}} \
\left( \
\begin{matrix} \
f_1 & 1 & f_2 \\ \
-m_{f1} & q & m_{f2} \
\end{matrix}\right) \
\langle n_1 \ell_1 j_1 f_1|| r || n_2 \ell_2 j_2 f_2 \rangle,` where
:math:`\langle n_1 \ell_1 j_1 f_1 ||r|| n_2 \ell_2 j_2 f_2 \rangle \
= (-1)^{j_1+I+F_2+1}\sqrt{(2f_1+1)(2f_2+1)} ~ \
\left\{ \begin{matrix}\
F_1 & 1 & F_2 \\ \
j_2 & I & j_1 \
\end{matrix}\right\}~ \
\langle n_1 \ell_1 j_1||r || n_2 \ell_2 j_2 \rangle.`
Args:
n1. l1, j1, f1, mf1: principal, orbital, total orbital,
fine basis (total atomic) angular momentum,
and projection of total angular momentum for state 1
n2. l2, j2, mj2: principal, orbital, total orbital,
fine basis (total atomic) angular momentum,
and projection of total orbital angular momentum for state 2
q (int): specifies transition that the driving field couples to,
+1, 0 or -1 corresponding to driving :math:`\sigma^+`,
:math:`\pi` and :math:`\sigma^-` transitions respectively.
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: dipole matrix element( :math:`a_0 e`)
"""
return self.getSphericalMatrixElementHFStoFS(
j1, f1, mf1, j2, mj2, q
) * self.getReducedMatrixElementJ(n1, l1, j1, n2, l2, j2, s=s)
def getMagneticDipoleMatrixElementHFS(
self, l, j, f1, mf1, f2, mf2, q, s=0.5
):
r"""
Magnetic dipole matrix element :math:`\langle f_1,m_{f_1} \vert \mu_q \vert f_2,m_{f_2}\rangle` \for transitions from :math:`\vert f_1,m_{f_1}\rangle\rightarrow\vert f_2,m_{f_2}\rangle` within the same :math:`n,\ell,j` state in units of :math:`\mu_B B_q`.
The magnetic dipole matrix element is given by
:math:`\langle f_1,m_{f_1}\vert \mu_q \vert f_2,m_{f_2}\rangle = g_J \mu_B B_q (-1)^{f_2+j+I+1+f_1-m_{f_1}} \sqrt{(2f_1+1)(2f_2+1)j(j+1)(2j+1)} \begin{pmatrix}f_1&1&f_2\\-m_{f_1} & -q & m_{f_2}\end{pmatrix} \begin{Bmatrix}f_1&1&f_2\\j & I & j\end{Bmatrix}`
Args:
l, j, f1, mf1: orbital, total orbital,
fine basis (total atomic) angular momentum,total anuglar momentum
and projection of total angular momentum for state 1
f2,mf2: principal, orbital, total orbital,
fine basis (total atomic) angular momentum,
and projection of total orbital angular momentum for state 2
q (int): specifies transition that the driving field couples to,
+1, 0 or -1 corresponding to driving :math:`\sigma^+`,
:math:`\pi` and :math:`\sigma^-` transitions respectively.
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: magnetic dipole matrix element (in units of :math:`\mu_BB_q`)
"""
return (
self.getLandegj(l, j, s)
* (-1) ** (f2 + j + self.I + 1)
* np.sqrt((2 * f1 + 1) * (2 * f2 + 1) * j * (j + 1) * (2 * j + 1))
* self.getSphericalDipoleMatrixElement(f1, mf1, f2, mf2, q)
* Wigner6j(f1, 1, f2, j, self.I, j)
)
def getLandegj(self, l, j, s=0.5):
r"""
Lande g-factor :math:`g_J\simeq 1+\frac{j(j+1)+s(s+1)-l(l+1)}{2j(j+1)}`
Args:
l (float): orbital angular momentum
j (float): total orbital angular momentum
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: Lande g-factor ( :math:`g_J`)
"""
UsedModulesARC.hyperfine = True
return 1.0 + (j * (j + 1.0) + s * (s + 1.0) - l * (l + 1.0)) / (
2.0 * j * (j + 1.0)
)
def getLandegjExact(self, l, j, s=0.5):
r"""
Lande g-factor :math:`g_J=g_L\frac{j(j+1)-s(s+1)+l(l+1)}{2j(j+1)}+g_S\frac{j(j+1)+s(s+1)-l(l+1)}{2j(j+1)}`
Args:
l (float): orbital angular momentum
j (float): total orbital angular momentum
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: Lande g-factor ( :math:`g_J`)
"""
UsedModulesARC.hyperfine = True
return self.gL * (j * (j + 1.0) - s * (s + 1.0) + l * (l + 1.0)) / (
2.0 * j * (j + 1.0)
) + self.gS * (j * (j + 1.0) + s * (s + 1.0) - l * (l + 1.0)) / (
2.0 * j * (j + 1.0)
)
def getLandegf(self, l, j, f, s=0.5):
r"""
Lande g-factor :math:`g_F\simeq g_J\frac{f(f+1)-I(I+1)+j(j+1)}{2f(f+1)}`
Args:
l (float): orbital angular momentum
j (float): total orbital angular momentum
f (float): total atomic angular momentum
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: Lande g-factor ( :math:`g_F`)
"""
UsedModulesARC.hyperfine = True
gf = (
self.getLandegj(l, j, s)
* (f * (f + 1.0) - self.I * (self.I + 1.0) + j * (j + 1.0))
/ (2.0 * f * (f + 1.0))
)
return gf
def getLandegfExact(self, l, j, f, s=0.5):
r"""
Lande g-factor :math:`g_F`
:math:`g_F=g_J\frac{f(f+1)-I(I+1)+j(j+1)}{2f(f+1)}+g_I\frac{f(f+1)+I(I+1)-j(j+1)}{2f(f+1)}`
Args:
l (float): orbital angular momentum
j (float): total orbital angular momentum
f (float): total atomic angular momentum
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: Lande g-factor ( :math:`g_F`)
"""
UsedModulesARC.hyperfine = True
gf = self.getLandegjExact(l, j, s) * (
f * (f + 1) - self.I * (self.I + 1) + j * (j + 1.0)
) / (2 * f * (f + 1.0)) + self.gI * (
f * (f + 1.0) + self.I * (self.I + 1.0) - j * (j + 1.0)
) / (
2.0 * f * (f + 1.0)
)
return gf
def getHFSEnergyShift(self, j, f, A, B=0, s=0.5):
r"""
Energy shift of HFS from centre of mass :math:`\Delta E_\mathrm{hfs}`
:math:`\Delta E_\mathrm{hfs} = \frac{A}{2}K+B\frac{\frac{3}{2}K(K+1)-2I(I+1)J(J+1)}{2I(2I-1)2J(2J-1)}`
where :math:`K=F(F+1)-I(I+1)-J(J+1)`
Args:
j (float): total orbital angular momentum
f (float): total atomic angular momentum
A (float): HFS magnetic dipole constant
B (float): HFS magnetic quadrupole constant
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: Energy shift ( :math:`\Delta E_\mathrm{hfs}`)
"""
UsedModulesARC.hyperfine = True
K = f * (f + 1.0) - self.I * (self.I + 1.0) - j * (j + 1.0)
Ehfs = A / 2.0 * K
if abs(B) > 0:
Ehfs += (
B
* (
3.0 / 2.0 * K * (K + 1)
- 2.0 * self.I * (self.I + 1.0) * j * (j + 1.0)
)
/ (
2.0
* self.I
* (2.0 * self.I - 1.0)
* 2.0
* j
* (2.0 * j - 1)
)
)
return Ehfs
def getBranchingRatio(self, jg, fg, mfg, je, fe, mfe, s=0.5):
r"""
Branching ratio for decay from :math:`\vert j_e,f_e,m_{f_e} \rangle \rightarrow \vert j_g,f_g,m_{f_g}\rangle`
:math:`b = \displaystyle\sum_q (2j_e+1)\left(\begin{matrix}f_1 & 1 & f_2 \\-m_{f1} & q & m_{f2}\end{matrix}\right)^2\vert \langle j_e,f_e\vert \vert er \vert\vert j_g,f_g\rangle\vert^2/|\langle j_e || er || j_g \rangle |^2`
Args:
jg, fg, mfg: total orbital, fine basis (total atomic) angular momentum,
and projection of total angular momentum for ground state
je, fe, mfe: total orbital, fine basis (total atomic) angular momentum,
and projection of total angular momentum for excited state
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: branching ratio
"""
UsedModulesARC.hyperfine = True
b = 0.0
for q in np.arange(-1, 2):
b += (
self.getSphericalDipoleMatrixElement(fg, mfg, fe, mfe, q) ** 2
* self._reducedMatrixElementFJ(jg, fg, je, fe) ** 2
)
# Rescale
return b * (2.0 * je + 1.0)
def getSaturationIntensity(
self, ng, lg, jg, fg, mfg, ne, le, je, fe, mfe, s=0.5
):
r"""
Saturation Intensity :math:`I_\mathrm{sat}` for transition :math:`\vert j_g,f_g,m_{f_g}\rangle\rightarrow\vert j_e,f_e,m_{f_e}\rangle` in units of :math:`\mathrm{W}/\mathrm{m}^2`.
:math:`I_\mathrm{sat} = \frac{c\epsilon_0\Gamma^2\hbar^2}{4\vert \epsilon_q\cdot\mathrm{d}\vert^2}`
Args:
ng, lg, jg, fg, mfg: total orbital, fine basis (total atomic) angular momentum,
and projection of total angular momentum for ground state
ne, le, je, fe, mfe: total orbital, fine basis (total atomic) angular momentum,
and projection of total angular momentum for excited state
s (float): optional, total spin angular momentum of state.
By default 0.5 for Alkali atoms.
Returns:
float: Saturation Intensity in units of :math:`\mathrm{W}/\mathrm{m}^2`
"""
UsedModulesARC.hyperfine = True
q = mfe - mfg
if abs(q) <= 1:
d = (
self.getDipoleMatrixElementHFS(
ng, lg, jg, fg, mfg, ne, le, je, fe, mfe, q
)
* C_e
* physical_constants["Bohr radius"][0]
)
Gamma = 1.0 / self.getStateLifetime(ne, le, je)
Is = C_c * epsilon_0 * Gamma**2 * hbar**2 / (4.0 * d**2)
else:
raise ValueError("States not coupled")
return Is
def getSaturationIntensityIsotropic(self, ng, lg, jg, fg, ne, le, je, fe):
r"""
Isotropic Saturation Intensity :math:`I_\mathrm{sat}` for transition :math:`f_g\rightarrow f_e` averaged over all polarisations in units of :math:`\mathrm{W}/\mathrm{m}^2`.
:math:`I_\mathrm{sat} = \frac{c\epsilon_0\Gamma^2\hbar^2}{4\vert \epsilon_q\cdot\mathrm{d}\vert^2}`
Args:
ng, lg, jg, fg, mfg: total orbital, fine basis (total atomic) angular momentum,
and projection of total angular momentum for ground state
ne, le, je, fe, mfe: total orbital, fine basis (total atomic) angular momentum,
and projection of total angular momentum for excited state
Returns:
float: Saturation Intensity in units of :math:`\mathrm{W}/\mathrm{m}^2`
"""
UsedModulesARC.hyperfine = True
d_iso_sq = 0.0
for q in range(-1, 2):
for mfg in range(-fg, fg + 1):
d_iso_sq += (
self.getDipoleMatrixElementHFS(
ng, lg, jg, fg, mfg, ne, le, je, fe, mfg + q, q
)
** 2
)
# Avergage over (2fg+1) levels and 3 polarisationsand rescale
d_iso_sq = (
d_iso_sq
/ 3.0
/ (2 * fg + 1)
* (C_e * physical_constants["Bohr radius"][0]) ** 2
)
Gamma = 1.0 / self.getStateLifetime(ne, le, je)
Is = C_c * epsilon_0 * Gamma**2 * hbar**2 / (4.0 * d_iso_sq)
return Is
def groundStateRamanTransition(
self, Pa, wa, qa, Pb, wb, qb, Delta, f0, mf0, f1, mf1, ne, le, je
):
r"""
Returns two-photon Rabi frequency :math:`\Omega_R`, differential AC Stark shift :math:`\Delta_\mathrm{AC}` and probability to scatter a photon during a :math:`\pi`-pulse :math:`P_\mathrm{sc}` for two-photon ground-state Raman transitions from :math:`\vert f_g,m_{f_g}\rangle\rightarrow\vert nL_{j_r} j_r,m_{j_r}\rangle` via an intermediate excited state :math:`n_e,\ell_e,j_e`.
:math:`\Omega_R=\displaystyle\sum_{f_e,m_{f_e}}\frac{\Omega^a_{0\rightarrow f_e}\Omega^b_{1\rightarrow f_e}}{2(\Delta-\Delta_{f_e})},`
:math:`\Delta_{\mathrm{AC}} = \displaystyle\sum_{f_e,m_{f_e}}\left[\frac{\vert\Omega^a_{0\rightarrow f_e}\vert^2-\vert\Omega^b_{1\rightarrow f_e}\vert^2}{4(\Delta-\Delta_{f_e})}+\frac{\vert\Omega^a_{1\rightarrow f_e}\vert^2}{4(\Delta+\omega_{01}-\Delta_{f_e})}-\frac{\vert\Omega^b_{0\rightarrow f_e}\vert^2}{4(\Delta-\omega_{01}-\Delta_{f_e})}\right],`
:math:`P_\mathrm{sc} =\frac{\Gamma_e t_\pi}{2}\displaystyle\sum_{f_e,m_{f_e}}\left[\frac{\vert\Omega^a_{0\rightarrow f_e}\vert^2}{2(\Delta-\Delta_{f_e})^2}+\frac{\vert\Omega^b_{1\rightarrow f_e}\vert^2}{2(\Delta-\Delta_{f_e})^2}+\frac{\vert\Omega^a_{1\rightarrow f_e}\vert^2}{4(\Delta+\omega_{01}-\Delta_{f_e})^2}+\frac{\vert\Omega^b_{0\rightarrow f_e}\vert^2}{4(\Delta-\omega_{01}-\Delta_{f_e})^2}\right]`
where :math:`\tau_\pi=\pi/\Omega_R`.
.. figure:: ./GroundStateRaman.png
:width: 250 px
:alt: Schema of |0>-> -> |e> -> |1> transition
:align: right
Args:
Pa:
power (W), of laser a :math:`\vert 0 \rangle\rightarrow\vert e\rangle`
wa:
beam waist (m) of laser a :math:`\vert 0 \rangle\rightarrow\vert e\rangle`
qa:
polarisation (+1, 0 or -1 corresponding to driving :math:`\sigma^+`, :math:`\pi` and :math:`\sigma^-`)
of laser a :math:`\vert 0 \rangle\rightarrow\vert e\rangle`
Pb: power (W) of laser b :math:`\vert 1 \rangle\rightarrow\vert e\rangle`
wb: beam waist (m) of laser b :math:`\vert 1 \rangle\rightarrow\vert e\rangle`
qb: polarisation (+1, 0 or -1 corresponding to driving :math:`\sigma^+`, :math:`\pi` and :math:`\sigma^-`) of laser b :math:`\vert 1 \rangle\rightarrow\vert e\rangle`
Delta : Detuning from excited state centre of mass (rad :math:`\mathrm{s}^{-1}`)
f0,mf0: Lower hyperfine level
f1,mf1: Upper hyperfine level
ne, le, je: principal, orbital, total orbital quantum numbers of excited state
Returns:
float: Two-Photon Rabi frequency :math:`\Omega_R` (units :math:`\mathrm{rads}^{-1}`), differential AC Stark shift :math:`\Delta_\mathrm{AC}` (units :math:`\mathrm{rads}^{-1}`) and probability to scatter a photon during a :math:`\pi`-pulse :math:`P_\mathrm{sc}`
"""
UsedModulesARC.hyperfine = True
# Intensity/beam (W/m^2)
Ia = 2.0 * Pa / (pi * wa**2)
Ib = 2.0 * Pb / (pi * wb**2)
# Electric field (V/m)
Ea = np.sqrt(2.0 * Ia / (epsilon_0 * C_c))
Eb = np.sqrt(2.0 * Ib / (epsilon_0 * C_c))
# Reduced Matrix Element (au)
ng = self.groundStateN
lg = 0
jg = 0.5
rme_j = self.getReducedMatrixElementJ(ng, lg, jg, ne, le, je)
# Rescale to (Cm)
rme_j *= C_e * physical_constants["Bohr radius"][0]
# Qubit level energy separation (rad s-1)
[A, B] = self.getHFSCoefficients(ng, lg, jg)
omega01 = (jg + self.I) * A * 2.0 * pi
# Excited State Properties
# Hyperfine Coefficients (Hz)
[A, B] = self.getHFSCoefficients(ne, le, je)
# Linewidth (rad s-1)
Gamma = 1.0 / self.getStateLifetime(ne, le, je)
# Initialise Output Variables
OmegaR = np.zeros(np.shape(Delta))
AC1 = np.zeros(np.shape(Delta))
AC0 = np.zeros(np.shape(Delta))
Pe = np.zeros(np.shape(Delta))
# Loop over excited state energylevels
for fe in range(round(abs(je - self.I)), round(1.0 + (je + self.I))):
# Hyperfine energy shift (rad s-1)
Ehfs = 2.0 * np.pi * self.getHFSEnergyShift(je, fe, A, B)
for mfe in range(
max(-fe, min(mf1, mf0) - 1), 1 + min(fe, max(mf1, mf0) + 1)
):
# Rabi frequency of each laser from each transition (rad s-1)
Omaf0 = (
Ea
* rme_j
/ hbar
* self.getSphericalDipoleMatrixElement(f0, mf0, fe, mfe, qa)
* self._reducedMatrixElementFJ(jg, f0, je, fe)
)
Omaf1 = (
Ea
* rme_j
/ hbar
* self.getSphericalDipoleMatrixElement(f1, mf1, fe, mfe, qa)
* self._reducedMatrixElementFJ(jg, f1, je, fe)
)
Ombf0 = (
Eb
* rme_j
/ hbar
* self.getSphericalDipoleMatrixElement(f0, mf0, fe, mfe, qb)
* self._reducedMatrixElementFJ(jg, f0, je, fe)
)
Ombf1 = (
Eb
* rme_j
/ hbar
* self.getSphericalDipoleMatrixElement(f1, mf1, fe, mfe, qb)
* self._reducedMatrixElementFJ(jg, f1, je, fe)
)
# AC Stark shift on qubit states
AC1 += Ombf1**2 / (4 * (Delta - Ehfs)) + Omaf1**2 / (
4 * (Delta + omega01 - Ehfs)
)
AC0 += Omaf0**2 / (4 * (Delta - Ehfs)) + Ombf0**2 / (
4 * (Delta - omega01 - Ehfs)
)
# Two-Photon Rabi Frequency
OmegaR += Omaf0 * Ombf1 / (2 * (Delta - Ehfs))
# Excitated state population Pe
Pe += (
0.5 * Omaf0**2 / (2 * (Delta - Ehfs) ** 2)
+ 0.5 * Ombf1**2 / (2 * (Delta - Ehfs) ** 2)
+ 0.5 * Omaf1**2 / (2 * (Delta + omega01 - Ehfs) ** 2)
+ 0.5 * Ombf0**2 / (2 * (Delta - omega01 - Ehfs) ** 2)
)
# Total Differential Shift
AC = AC0 - AC1
# Pi-rotation time (s)
tau_pi = pi / abs(OmegaR)
# Spontaneous Emission Probability
Psc = Gamma * tau_pi * Pe
return OmegaR, AC, Psc
def twoPhotonRydbergExcitation(
self,
Pp,
wp,
qp,
Pc,
wc,
qc,
Delta,
fg,
mfg,
ne,
le,
je,
nr,
lr,
jr,
mjr,
):
r"""
Returns two-photon Rabi frequency :math:`\Omega_R`, ground AC Stark shift :math:`\Delta_{\mathrm{AC}_g}`, Rydberg state AC Stark shift :math:`\Delta_{\mathrm{AC}_r}` and probability to scatter a photon during a :math:`\pi`-pulse :math:`P_\mathrm{sc}` for two-photon excitation from :math:`\vert f_h,m_{f_g}\rangle\rightarrow \vert j_r,m_{j_r}\rangle` via intermediate excited state
:math:`\Omega_R=\displaystyle\sum_{f_e,m_{f_e}}\frac{\Omega_p^{g\rightarrow f_e}\Omega_c^{f_e\rightarrow r}}{2(\Delta-\Delta_{f_e})}`
:math:`\Delta_{\mathrm{AC}_g} = \displaystyle\sum_{f_e,m_{f_e}}\frac{\vert\Omega_p^{g\rightarrow f_e}\vert^2}{4(\Delta-\Delta_{f_e})}`
:math:`\Delta_{\mathrm{AC}_r} = \displaystyle\sum_{f_e,m_{f_e}}\frac{\vert\Omega_p^{g\rightarrow f_e}\vert^2}{4(\Delta-\Delta_{f_e})}``
:math:`P_\mathrm{sc} = \frac{\Gamma_et_\pi}{2}\displaystyle\sum_{f_e,m_{f_e}}\left[\frac{\vert\Omega_p^{g\rightarrow f_e}\vert^2}{2(\Delta-\Delta_{f_e})^2}+\frac{\vert\Omega_c^{f_e\rightarrow r}\vert^2}{2(\Delta-\Delta_{f_e})^2}\right]`
where :math:`\tau_\pi=\pi/\Omega_R`.
.. figure:: ./twophotonexcitation.png
:width: 150 px
:alt: Schema of |g-> -> |e> -> |r> transition
:align: right
Args:
Pp: power (W) of probe laser :math:`\vert g \rangle\rightarrow\vert e\rangle`
wp: beam waist (m) of probe laser :math:`\vert g \rangle\rightarrow\vert e\rangle`
qp: polarisation (+1, 0 or -1 corresponding to driving :math:`\sigma^+`,:math:`\pi` and :math:`\sigma^-`) of probe laser :math:`\vert g \rangle\rightarrow\vert e\rangle`
Pb: power (W) of coupling laser :math:`\vert e\rangle\rightarrow\vert r\rangle`
wb: beam waist (m) of coupling laser :math:`\vert e\rangle\rightarrow\vert r\rangle`
qb: polarisation (+1, 0 or -1 corresponding to driving :math:`\sigma^+`,:math:`\pi` and :math:`\sigma^-`) of coupling laser :math:`\vert e\rangle\rightarrow\vert r\rangle`
Delta : Detuning from excited state centre of mass (rad s:math:`^{-1}`)
fg: ground state hyperfine state
mfg: projection of ground state hyperfine state
f1,mf1: upper hyperfine state
ne: principal quantum numbers of excited state
le: orbital angular momentum of excited state
je: total angular momentum of excited state
nr: principal quantum number of target Rydberg state
lr: orbital angular momentum of target Rydberg state
jr: total angular momentum of target Rydberg state
mjr: projection of total angular momenutm of target Rydberg state
Returns:
float: Two-Photon Rabi frequency :math:`\Omega_R` (units :math:`\mathrm{rads}^{-1}`),
ground-state AC Stark shift :math:`\Delta_{\mathrm{AC}_g}` (units :math:`\mathrm{rads}^{-1}`) Rydberg-state AC Stark shift :math:`\Delta_{\mathrm{AC}_r}` (units :math:`\mathrm{rads}^{-1}`) and probability to scatter a photon during a :math:`\pi`-pulse :math:`P_\mathrm{sc}`
"""
UsedModulesARC.hyperfine = True
# Intensity/beam (W/m^2)
Ip = 2.0 * Pp / (pi * wp**2)
Ic = 2.0 * Pc / (pi * wc**2)
# Electric field (V/m)
Ep = np.sqrt(2.0 * Ip / (epsilon_0 * C_c))
Ec = np.sqrt(2.0 * Ic / (epsilon_0 * C_c))
# Excited State Properties
# Reduced Matrix Element (au)
ng = self.groundStateN
lg = 0
jg = 0.5
rme_j = self.getReducedMatrixElementJ(ng, lg, jg, ne, le, je)
# Rescale to (Cm)
rme_j *= C_e * physical_constants["Bohr radius"][0]
# Hyperfine Coefficients (Hz)
[A, B] = self.getHFSCoefficients(ne, le, je)
# Linewidth (rad s-1)
Gamma = 1.0 / self.getStateLifetime(ne, le, je)
# Rydberg State Reduced Matrix Element (au)
rme_jRyd = self.getReducedMatrixElementJ(ne, le, je, nr, lr, jr)
# Rescale to (Cm)
rme_jRyd *= C_e * physical_constants["Bohr radius"][0]
# Initialise Output Variables
OmegaR = np.zeros(np.shape(Delta))
ACg = np.zeros(np.shape(Delta))
ACr = np.zeros(np.shape(Delta))
Pe = np.zeros(np.shape(Delta))
# Loop over excited state energylevels
for fe in range(round(abs(je - self.I)), 1 + round(je + self.I)):
# Hyperfine energy shift (rad s-1)
Ehfs = 2.0 * np.pi * self.getHFSEnergyShift(je, fe, A, B)
# range(max(-fe,min(mf1,mf0)-1),1+min(fe,max(mf1,mf0)+1)):
for mfe in range(-fe, fe + 1):
# Probe Rabi Frequency (rad s-1)
OmP = (
Ep
* rme_j
/ hbar
* self.getSphericalDipoleMatrixElement(fg, mfg, fe, mfe, qp)
* self._reducedMatrixElementFJ(jg, fg, je, fe)
)
# Coupling Rabi Frequency (rad s-1)
OmC = (
Ec
* rme_jRyd
/ hbar
* self.getSphericalMatrixElementHFStoFS(
je, fe, mfe, jr, mjr, qc
)
)
# AC Stark shift on ground state (rad s-1)
ACg += (OmP**2) / (4 * (Delta - Ehfs))
# AC Stark shift on Rydberg state (rad s-1)
ACr += (OmC**2) / (4 * (Delta - Ehfs))
# Two-Photon Rabi Frequency (rad s-1)
OmegaR += OmP * OmC / (2 * (Delta - Ehfs))
# Excitated state population Pe
Pe += 0.5 * (OmP**2 + OmC**2) / (2 * (Delta - Ehfs) ** 2)
# Pi-rotation time (s)
tau_pi = pi / abs(OmegaR)
# Spontaneous Emission Probability
Psc = Gamma * tau_pi * Pe
return OmegaR, ACg, ACr, Psc
def _spinMatrices(self, j):
# SPINMATRICES Generates spin-matrices for spin S
# [Sx,Sy,Sz]=SPINMATRICES(S) returns the Sx,Sy,Sz spin
# matrices calculated using raising and lowering operators
mj = -np.arange(-j + 1, j + 1)
jm = np.sqrt(j * (j + 1) - mj * (mj + 1))
Jplus = np.matrix(np.diag(jm, 1)) # Raising Operator
Jminus = np.matrix(np.diag(jm, -1)) # Lowering Operator
Jx = (Jplus + Jminus) / 2.0
Jy = (-Jplus + Jminus) * 1j / 2.0
Jz = (Jplus * Jminus - Jminus * Jplus) / 2.0
# J2=Jx**2+Jy**2+Jz**2
return Jx, Jy, Jz
def breitRabi(self, n, l, j, B):
r"""
Returns exact Zeeman energies math:`E_z` for states :math:`\vert F,m_f\rangle` in the :math:`\ell,j` manifold via exact diagonalisation of the Zeeman interaction :math:`\mathcal{H}_z` and the hyperfine interaction :math:`\mathcal{H}_\mathrm{hfs}` given by equations
:math:`\mathcal{H}_Z=\frac{\mu_B}{\hbar}(g_J J_z+g_I I_z)B_z`
and
:math:`\mathcal{H}_\mathrm{hfs}=A_\mathrm{hfs}I\cdot J + B_\mathrm{hfs}\frac{3(I\cdot J)^2+3/2 I\cdot J -I^2J^2}{2I(2I-1)2J(2J-1)}`.
Args:
n,l,j: principal,orbital, total orbital quantum numbers
B: Magnetic Field (units T)
Returns:
float: State energy :math:`E_z` in SI units (Hz), state f, state mf
"""
UsedModulesARC.hyperfine = True
Ahfs, Bhfs = self.getHFSCoefficients(n, l, j)
# Bohr Magneton
uB = physical_constants["Bohr magneton in Hz/T"][0]
# Define Spin Matrices
N = round((2 * j + 1) * (2 * self.I + 1))
[jx, jy, jz] = self._spinMatrices(j)
ji = np.eye(round(2.0 * j + 1.0))
[ix, iy, iz] = self._spinMatrices(self.I)
ii = np.eye(round(2.0 * self.I + 1.0))
# Calculate Tensor Products
Jx = np.kron(jx, ii)
Jy = np.kron(jy, ii)
Jz = np.kron(jz, ii)
Ix = np.kron(ji, ix)
Iy = np.kron(ji, iy)
Iz = np.kron(ji, iz)
J2 = Jx**2 + Jy**2 + Jz**2
I2 = Ix**2 + Iy**2 + Iz**2
IJ = Ix * Jx + Iy * Jy + Iz * Jz
# F Basis
Fx = Jx + Ix
Fy = Jy + Iy
Fz = Jz + Iz
F2 = Fx**2 + Fy**2 + Fz**2
# Hyperfine Interaction
Hhfs = Ahfs * IJ
if Bhfs != 0:
Hhfs += (
Bhfs
* (3 * IJ * IJ + 3 / 2 * IJ - I2 * J2)
/ (2 * self.I * (2 * self.I - 1) * 2 * j * (2 * j - 1))
)
# Zeeman Interaction
Hz = uB * (self.getLandegjExact(l, j) * Jz + self.gI * Iz)
# Initialise Output
en = np.zeros([B.size, N])
ctr = -1
for b in B:
ctr = ctr + 1
eVal, eVec = eigh(Hhfs + b * Hz)
en[ctr, :] = eVal
# Determine States
eVal, eVec = eigh(Hhfs + 1e-4 * Hz)
eVec = np.matrix(eVec)
f = np.zeros(N)
mf = np.zeros(N)
for ctr in range(N):
f2 = eVec[:, ctr].conj().T * F2 * eVec[:, ctr]
f[ctr] = np.round(1 / 2 * (-1 + np.sqrt(1 + 4 * np.real(f2[0, 0]))))
m = eVec[:, ctr].conj().T * Fz * eVec[:, ctr]
mf[ctr] = np.round(np.real(m[0, 0]))
return en, f, mf
def NumerovBack(innerLimit, outerLimit, kfun, step, init1, init2):
"""
Full Python implementation of Numerov integration
Calculates solution function :math:`rad(r)` with descrete step in
:math:`r` size of `step`, integrating from `outerLimit` towards the
`innerLimit` (from outside, inwards) equation
:math:`\\frac{\\mathrm{d}^2 rad(r)}{\\mathrm{d} r^2} = \
kfun(r)\\cdot rad(r)`.
Args:
innerLimit (float): inner limit of integration
outerLimit (flaot): outer limit of integration
kfun (function(double)): pointer to function used in equation (see
longer explanation above)
step: descrete step size for integration
init1 (float): initial value, `rad`(`outerLimit`+`step`)
init2 (float): initial value,
`rad`(`outerLimit`+:math:`2\\cdot` `step`)
Returns:
numpy array of float , numpy array of float, int : :math:`r` (a.u),
:math:`rad(r)`;
Note:
Returned function is not normalized!
Note:
If :obj:`AlkaliAtom.cpp_numerov` swich is set to True (default),
much faster C implementation of the algorithm will be used instead.
That is recommended option. See documentation installation
instructions for more details.
"""
br = round((sqrt(outerLimit) - sqrt(innerLimit)) / step)
# integrated wavefunction R(r)*r^{3/4}
sol = np.zeros(br, dtype=np.dtype("d"))
# radial coordinate for integration \sqrt(r)
rad = np.zeros(br, dtype=np.dtype("d"))
br = br - 1
x = sqrt(innerLimit) + step * (br - 1)
sol[br] = (
2.0 * (1.0 - 5.0 / 12.0 * step**2 * kfun(x)) * init1
- (1.0 + 1.0 / 12.0 * step**2 * kfun(x + step)) * init2
) / (1 + 1 / 12.0 * step**2 * kfun(x - step))
rad[br] = x
x = x - step
br = br - 1
sol[br] = (
2.0 * (1.0 - 5.0 / 12.0 * step**2 * kfun(x)) * sol[br + 1]
- (1.0 + 1.0 / 12.0 * step**2 * kfun(x + step)) * init1
) / (1 + 1 / 12.0 * step**2 * kfun(x - step))
rad[br] = x
# check if the function starts diverging before the innerLimit
# -> in that case break integration earlier
maxValue = 0.0
checkPoint = 0
fromLastMax = 0
while br > checkPoint:
br = br - 1
x = x - step
sol[br] = (
2.0 * (1.0 - 5.0 / 12.0 * step**2 * kfun(x)) * sol[br + 1]
- (1.0 + 1.0 / 12.0 * step**2 * kfun(x + step)) * sol[br + 2]
) / (1.0 + 1.0 / 12.0 * step**2 * kfun(x - step))
rad[br] = x
if abs(sol[br] * sqrt(x)) > maxValue:
maxValue = abs(sol[br] * sqrt(x))
else:
fromLastMax += 1
if fromLastMax > 50:
checkPoint = br
# now proceed with caution - checking if the divergence starts
# - if it does, cut earlier
divergencePoint = 0
while (br > 0) and (divergencePoint == 0):
br = br - 1
x = x - step
sol[br] = (
2.0 * (1.0 - 5.0 / 12.0 * step**2 * kfun(x)) * sol[br + 1]
- (1.0 + 1.0 / 12.0 * step**2 * kfun(x + step)) * sol[br + 2]
) / (1.0 + 1.0 / 12.0 * step**2 * kfun(x - step))
rad[br] = x
if (divergencePoint == 0) and (abs(sol[br] * sqrt(x)) > maxValue):
divergencePoint = br
while (
abs(sol[divergencePoint]) > abs(sol[divergencePoint + 1])
) and (divergencePoint < checkPoint):
divergencePoint += 1
if divergencePoint > checkPoint:
print("Numerov error")
exit()
br = divergencePoint
while br > 0:
rad[br] = rad[br + 1] - step
sol[br] = 0
br -= 1
# convert R(r)*r^{3/4} to R(r)*r
sol = np.multiply(sol, np.sqrt(rad))
# convert \sqrt(r) to r
rad = np.multiply(rad, rad)
return rad, sol
def _atomLightAtomCoupling(
n,
l,
j,
nn,
ll,
jj,
n1,
l1,
j1,
n2,
l2,
j2,
atom1,
atom2=None,
s=0.5,
s2=None,
):
"""
Calculates radial part of atom-light coupling
This function might seem redundant, since similar function exist for
each of the atoms. Function that is not connected to specific
atomic species is provided in order to provides route to implement
inter-species coupling.
"""
if atom2 is None:
# if not explicitly inter-species, assume it's the same species
atom2 = atom1
if s2 is None:
s2 = s
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole couplings1
elif (dl == 0 or dl == 2 or dl == 1) and (dj < 2.1):
c1 = 2 # quadrupole coupling
else:
return False
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1) and (dj < 2.1):
c2 = 2 # quadrupole coupling
else:
return False
radial1 = atom1.getRadialCoupling(n, l, j, n1, l1, j1, s=s)
radial2 = atom2.getRadialCoupling(nn, ll, jj, n2, l2, j2, s=s2)
# TO-DO: check exponent of the Boht radius (from where it comes?!)
coupling = (
C_e**2
/ (4.0 * pi * epsilon_0)
* radial1
* radial2
* (physical_constants["Bohr radius"][0]) ** (c1 + c2)
)
return coupling
# ================== Saving and loading calculations (START) ==================
def saveCalculation(calculation, fileName: str):
"""
Saves calculation for future use.
Saves :obj:`calculations_atom_pairstate.PairStateInteractions` and
:obj:`calculations_atom_single.StarkMap`
calculations in compact binary format in file named `filename`. It uses
cPickle serialization library in Python, and also zips the final file.
Calculation can be retrieved and used with :obj:`loadSavedCalculation`
Args:
calculation: class instance of calculations (instance of
:obj:`calculations_atom_pairstate.PairStateInteractions`
or :obj:`calculations_atom_single.StarkMap`)
to be saved.
fileName: name of the file where calculation will be saved
Example:
Let's suppose that we did the part of the
:obj:`calculation_atom_pairstate.PairStateInteractions`
calculation that involves generation of the interaction
matrix. After that we can save the full calculation in a single file::
calc = PairStateInteractions(Rubidium(),
60,0,0.5,
60,0,0.5,
0.5,0.5)
calc.defineBasis(0,0, 5,5, 25.e9)
calc.diagonalise(np.linspace(0.5,10.0,200),150)
saveCalculation(calc, "mySavedCalculation.pkl")
Then, at a later time, and even on the another machine, we can load
that file and continue with calculation. We can for example explore
the calculated level diagram::
calc = loadSavedCalculation("mySavedCalculation.pkl")
calc.plotLevelDiagram()
calc.showPlot()
rvdw = calc.getVdwFromLevelDiagram(0.5,14,
minStateContribution=0.5,
showPlot = True)
Or, we can do additional matrix diagonalization, in some new range,
then and find C6 by fitting the obtained level diagram::
calc = loadSavedCalculation("mySavedCalculation.pkl")
calc.diagonalise(np.linspace(3,6.0,200),20)
calc.getC6fromLevelDiagram(3,6.0,showPlot=True)
Note that for all loading of saved calculations we've been using
function :obj:`loadSavedCalculation` .
Note:
This doesn't save results of :obj:`plotLevelDiagram` for the
corresponding calculations. Call the plot function before calling
:obj:`showPlot` function for the corresponding calculation.
"""
try:
ax = calculation.ax
fig = calculation.fig
calculation.ax = 0
calculation.fig = 0
# close database connections
atomNumber = 0
if hasattr(calculation, "atom"):
atomNumber = 1
atomDatabaseConn1 = calculation.atom.conn
calculation.atom.conn = False
elif hasattr(calculation, "atom1"):
atomNumber = 2
atomDatabaseConn1 = calculation.atom1.conn
calculation.atom1.conn = False
atomDatabaseConn2 = calculation.atom2.conn
calculation.atom2.conn = False
output = gzip.GzipFile(fileName, "wb")
pickle.dump(calculation, output, pickle.HIGHEST_PROTOCOL)
output.close()
calculation.ax = ax
calculation.fig = fig
if atomNumber == 1:
calculation.atom.conn = atomDatabaseConn1
elif atomNumber == 2:
calculation.atom1.conn = atomDatabaseConn1
calculation.atom2.conn = atomDatabaseConn2
except Exception as ex:
print(ex)
print("ERROR: saving of the calculation failed.")
print(sys.exc_info())
return 1
return 0
def loadSavedCalculation(fileName: str):
"""
Loads previously saved calculation.
Loads :obj:`calculations_atom_pairstate.PairStateInteractions` and
:obj:`calculations_atom_single.StarkMap`
calculation instance from file named `filename` where it was previously
saved with :obj:`saveCalculation` .
Example:
See example for :obj:`saveCalculation`.
Args:
fileName: name of the file where calculation will be saved
Returns:
saved calculation
"""
calculation = False
try:
calcInput = gzip.GzipFile(fileName, "rb")
calculation = pickle.load(calcInput)
except Exception as ex:
print(ex)
print("ERROR: loading of the calculation from '%s' failed" % fileName)
print(sys.exc_info())
return False
print(
"Loading of "
+ calculation.__class__.__name__
+ " from '"
+ fileName
+ "' successful."
)
# establish conneciton to the database
if hasattr(calculation, "atom"):
calculation.atom._databaseInit()
elif hasattr(calculation, "atom"):
calculation.atom1._databaseInit()
calculation.atom2._databaseInit()
return calculation
# =================== Saving and loading calculations (END) ===================
# =================== State generation and printing (START) ===================
def singleAtomState(j, m):
a = np.zeros((round(2.0 * j + 1.0), 1), dtype=np.complex128)
a[round(j + m)] = 1
return a
def compositeState(s1, s2):
return np.kron(s1, s2).reshape((s1.shape[0] * s2.shape[0], 1))
def printState(n: int, l: int, j: float, s=None):
"""
Prints state spectroscopic label for numeric :math:`n`,
:math:`l`, :math:`s` label of the state
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
s (float): (optional) total spin momentum
"""
print(printStateString(n, l, j, s=s))
def printStateString(n: int, l: int, j: float, s=None):
"""
Returns state spectroscopic label for numeric :math:`n`,
:math:`l`, :math:`j` label of the state.
Optionally users can define :math:`s`, prompting printing :math:`2S+1`
index too (commonly used for Alkaline Earth atoms, while it is usually
omitted for Alkali atoms)
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
s (float): (optional) total spin momentum
Returns:
string: label for the state in standard spectroscopic notation
"""
if s is None:
return str(n) + " " + printStateLetter(l) + (" %.0d/2" % (j * 2))
else:
if abs(floor(j) - j) < 0.1:
subscript = " %.0d" % (j)
else:
subscript = " %.0d/2" % (j * 2)
return (
str(n)
+ (" %d" % (round(2 * s + 1)))
+ printStateLetter(l)
+ subscript
)
def printStateStringLatex(n: int, l: int, j: float, s=None):
"""
Returns latex code for spectroscopic label for numeric :math:`n`,
:math:`l`, :math:`j` label of the state.
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
s (float): (optional) total spin momentum
Returns:
string: label for the state in standard spectroscopic notation
"""
if s is None:
return str(n) + printStateLetter(l) + ("_{%.0d/2}" % (j * 2))
else:
if abs(floor(j) - j) < 0.1:
subscript = "_{%.0d}" % (j)
else:
subscript = "_{%.0d/2}" % (j * 2)
return (
str(n)
+ (" ^{%d}" % (round(2 * s + 1)))
+ printStateLetter(l)
+ subscript
)
def printStateLetter(l: int):
let = ""
if l == 0:
let = "S"
elif l == 1:
let = "P"
elif l == 2:
let = "D"
elif l == 3:
let = "F"
elif l == 4:
let = "G"
elif l == 5:
let = "H"
elif l == 6:
let = "I"
elif l == 7:
let = "K"
elif l == 8:
let = "L"
elif l == 9:
let = "M"
elif l == 10:
let = "N"
else:
let = " l=%d" % l
return let
def formatNumberSI(datum, precision=4):
# format datum with SI abbreviation to specified precision (# digits)
exponent = np.floor(np.log10(np.abs(datum)))
expInt = np.floor(exponent / 3).astype("int")
expRange = (expInt * 3).astype("double")
digitsLeftOfDecimal = exponent - expRange + 1
digitsRightOfDecimal = np.max((precision - digitsLeftOfDecimal, 0))
newDatum = datum * 10 ** (-expRange)
sisym = (
"y",
"z",
"a",
"f",
"p",
"n",
r"\mu",
"m",
"",
"k",
"M",
"G",
"T",
"P",
"E",
"Z",
"Y",
)
if np.abs(expRange) <= 24:
sym = " " + sisym[expInt + 8]
else:
sym = " x 10^{%d}" % expRange
if (
digitsLeftOfDecimal == precision
): # if the last significant figure is in the
# ones place, add the decimal to indicate
# it as such
sym = "." + sym
# Formally, if digitsLeftOfDecimal > precision, newDatum should be rounded off
# to requested precision, but since we are showing no more than 3 digits left
# of the decimal, it's probably better not to round off
fmtString = "%%%d.%df%s" % (digitsLeftOfDecimal, digitsRightOfDecimal, sym)
return fmtString % (newDatum)
# =================== State generation and printing (END) ===================
# =================== E FIELD Coupling (START) ===================
class _EFieldCoupling:
dataFolder = DPATH
def __init__(self, theta=0.0, phi=0.0):
self.theta = theta
self.phi = phi
# STARK memoization
self.conn = sqlite3.connect(
os.path.join(self.dataFolder, "precalculated_stark.db")
)
# ANGULAR PARTS
c = self.conn.cursor()
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table'
AND name='eFieldCoupling_angular';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS eFieldCoupling_angular
(l1 TINYINT UNSIGNED, j1_x2 TINYINT UNSIGNED,
j1_mj1 TINYINT UNSIGNED,
l2 TINYINT UNSIGNED, j2_x2 TINYINT UNSIGNED,
j2_mj2 TINYINT UNSIGNED, s_x2 TINYINT UNSIGNED,
sumPart DOUBLE,
PRIMARY KEY (l1,j1_x2,j1_mj1,l2,j2_x2,j2_mj2, s_x2)
) """
)
self.conn.commit()
# COUPLINGS IN ROTATED BASIS (depend on theta, phi)
self.wgd = WignerDmatrix(self.theta, self.phi)
c.execute("""DROP TABLE IF EXISTS eFieldCoupling""")
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='eFieldCoupling';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS eFieldCoupling
(l1 TINYINT UNSIGNED, j1_x2 TINYINT UNSIGNED,
j1_mj1 TINYINT UNSIGNED,
l2 TINYINT UNSIGNED, j2_x2 TINYINT UNSIGNED,
j2_mj2 TINYINT UNSIGNED, s_x2 TINYINT_UNSIGNED,
coupling DOUBLE,
PRIMARY KEY (l1,j1_x2,j1_mj1,l2,j2_x2,j2_mj2, s_x2)
) """
)
self.conn.commit()
def getAngular(self, l1, j1, mj1, l2, j2, mj2, s=0.5):
c = self.conn.cursor()
c.execute(
"""SELECT sumPart FROM eFieldCoupling_angular WHERE
l1= ? AND j1_x2 = ? AND j1_mj1 = ? AND
l2 = ? AND j2_x2 = ? AND j2_mj2 = ? AND s_x2 = ?
""",
(l1, 2 * j1, j1 + mj1, l2, j2 * 2, j2 + mj2, s * 2),
)
answer = c.fetchone()
if answer:
return answer[0]
# calulates sum (See PRA 20:2251 (1979), eq.(10))
sumPart = 0.0
for ml in np.linspace(mj1 - s, mj1 + s, round(2 * s + 1)):
if (abs(ml) - 0.1 < l1) and (abs(ml) - 0.1 < l2):
angularPart = 0.0
if abs(l1 - l2 - 1) < 0.1:
angularPart = (
(l1**2 - ml**2)
/ ((2.0 * l1 + 1.0) * (2.0 * l1 - 1.0))
) ** 0.5
elif abs(l1 - l2 + 1) < 0.1:
angularPart = (
(l2**2 - ml**2)
/ ((2.0 * l2 + 1.0) * (2.0 * l2 - 1.0))
) ** 0.5
sumPart += (
CG(l1, ml, s, mj1 - ml, j1, mj1)
* CG(l2, ml, s, mj1 - ml, j2, mj2)
* angularPart
)
c.execute(
""" INSERT INTO eFieldCoupling_angular
VALUES (?,?,?, ?,?,?, ?, ?)""",
[l1, 2 * j1, j1 + mj1, l2, j2 * 2, j2 + mj2, s * 2, sumPart],
)
self.conn.commit()
return sumPart
def getCouplingDivEDivDME(self, l1, j1, mj1, l2, j2, mj2, s=0.5):
# returns angular coupling without radial part and electric field
# if calculated before, retrieve from memory
c = self.conn.cursor()
c.execute(
"""SELECT coupling FROM eFieldCoupling WHERE
l1= ? AND j1_x2 = ? AND j1_mj1 = ? AND
l2 = ? AND j2_x2 = ? AND j2_mj2 = ? AND s_x2 = ?
""",
(l1, 2 * j1, j1 + mj1, l2, j2 * 2, j2 + mj2, s * 2),
)
answer = c.fetchone()
if answer:
return answer[0]
# if it is not calculated before, calculate now
coupling = 0.0
# rotate individual states
statePart1 = singleAtomState(j1, mj1)
dMatrix = self.wgd.get(j1)
statePart1 = np.conj(dMatrix.dot(statePart1))
statePart2 = singleAtomState(j2, mj2)
dMatrix = self.wgd.get(j2)
statePart2 = dMatrix.dot(statePart2)
# find first common index and start summation
start = min(j1, j2)
for mj in np.linspace(-start, start, floor(2 * start + 1)):
coupling += (
self.getAngular(l1, j1, mj, l2, j2, mj)
* (statePart1[j1 + mj] * statePart2[j2 + mj])[0].real
)
# save in memory for later use
c.execute(
""" INSERT INTO eFieldCoupling
VALUES (?,?,?, ?,?,?, ?, ?)""",
[l1, 2 * j1, j1 + mj1, l2, j2 * 2, j2 + mj2, s * 2, coupling],
)
self.conn.commit()
# return result
return coupling
def _closeDatabase(self):
self.conn.commit()
self.conn.close()
self.conn = False
# =================== E FIELD Coupling (END) ===================
# we copy the data files to the user home at first run. This avoids
# permission trouble.
setup_data_folder() | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/alkali_atom_functions.py | alkali_atom_functions.py |
import numpy as np
import os
from .alkali_atom_functions import DPATH
class OpticalMaterial(object):
"""
Abstract class implementing calculation of basic properties for optical
materials.
"""
#: Human-friendly name of material
name = ""
#: List of .csv files listing refractive index measurements
#: first column in these files is wavelength (in mu m), the second
#: refractive index
sources = []
# This array is loaded automatically based on sources list
sourcesN = []
#: Any notes about measured values
sourcesComment = []
#: Array of max and minimal wavelegth pairs [lambdaMin, lambdaMax]
#: for each of the sources. Automatically loaded from sources list
sourcesRange = []
def __init__(self):
for s in self.sources:
self.sourcesN.append(
np.loadtxt(
os.path.join(DPATH, "refractive_index_data", s),
skiprows=1,
delimiter=",",
unpack=True,
)
)
self.sourcesRange.append(
[self.sourcesN[-1][0].min(), self.sourcesN[-1][0].max()]
)
def getN(self, *args, **kwargs):
"""
Refractive index of material
"""
return "To-do: refractive index"
def getRho(self):
return "To-do: density"
def getElectricConductance(self):
return "To-do: electric condctance"
def getThermalConductance(self):
return "To-do: thermal conductance"
class Air(OpticalMaterial):
"""
Air as an optical material at normal conditions
"""
name = "Air (dry, normal conditions)"
sources = [
"Mathar-1.3.csv",
"Mathar-2.8.csv",
"Mathar-4.35.csv",
"Mathar-7.5.csv",
]
sourcesComment = ["vacuum", "vacuum", "vacuum", "vacuum"]
def getN(self, vacuumWavelength=None, *args, **kwargs):
"""
Assumes temperature: 15 °C, pressure: 101325 Pa
"""
if vacuumWavelength is not None:
x = vacuumWavelength
else:
raise ValueError("wavelength not specified for refractive index")
if (x > 0.23) and (x < 1.690):
return (
1
+ 0.05792105 / (238.0185 - x ** (-2))
+ 0.00167917 / (57.362 - x ** (-2))
)
else:
for i, rangeN in enumerate(self.sourcesRange):
if (x > rangeN[0]) and (x < rangeN[1]):
return np.interp(
x, self.sourcesN[i][0], self.sourcesN[i][1]
)
raise ValueError(
"No refrative index data available for requested"
" wavelength %.3f mum" % x
)
class Sapphire(OpticalMaterial):
"""
Sapphire as optical material.
"""
name = "Sapphire"
# data from: https://refractiveindex.info
sources = ["Querry-o.csv", "Querry-e.csv"]
sourcesN = []
sourcesComment = ["o", "e"]
def getN(
self,
vacuumWavelength=None,
airWavelength=None,
axis="ordinary",
*args,
**kwargs
):
""" """
if vacuumWavelength is not None:
air = Air()
x = vacuumWavelength / air.getN(vacuumWavelength=vacuumWavelength)
elif airWavelength is not None:
x = airWavelength
else:
raise ValueError("wavelength not specified for refractive index")
if (axis == "ordinary") or (axis == "o"):
# electric field polarisation perpendicular to cristal axis
if (x > 0.2) and (x < 5.0):
return (
1
+ 1.4313493 / (1 - (0.0726631 / x) ** 2)
+ 0.65054713 / (1 - (0.1193242 / x) ** 2)
+ 5.3414021 / (1 - (18.028251 / x) ** 2)
) ** 0.5
else:
for i, rangeN in enumerate(self.sourcesRange):
if (
(x > rangeN[0])
and (x < rangeN[1])
and (self.sourcesComment[i] == "o")
):
return np.interp(
x, self.sourcesN[i][0], self.sourcesN[i][1]
)
raise ValueError(
"No refrative index data available for "
"requested wavelength %.3f mum" % x
)
elif (axis == "extraordinary") or (axis == "e"):
# electric field polarisation along cristal axis
if (x > 0.2) or (x < 5.0):
return (
1
+ 1.5039759 / (1 - (0.0740288 / x) ** 2)
+ 0.55069141 / (1 - (0.1216529 / x) ** 2)
+ 6.5927379 / (1 - (20.072248 / x) ** 2)
) ** 0.5
else:
for i, rangeN in enumerate(self.sourcesRange):
if (
(x > rangeN[0])
and (x < rangeN[1])
and (self.sourcesComment[i] == "e")
):
return np.interp(
x, self.sourcesN[i][0], self.sourcesN[i][1]
)
raise ValueError(
"No refrative index data available for "
"requested wavelength %.3f mum" % x
)
else:
raise ValueError("Uknown axis") | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/materials.py | materials.py |
from __future__ import division, print_function, absolute_import
from scipy.special import jv, legendre, sph_harm, jacobi
from math import pi
from numpy import conj as conjugate
from numpy import floor, sqrt, sin, cos, exp, power
from scipy.special import comb
from scipy.special import factorial
from sympy.physics.wigner import wigner_3j as Wigner3j_sympy
from sympy.physics.wigner import wigner_6j as Wigner6j_sympy
from sympy import N as sympyEvaluate
import numpy as np
import os
from scipy.sparse import csr_matrix
from scipy.sparse import eye as sparse_eye
import sys
if sys.version_info > (2,):
xrange = range
def roundPy2(x):
return round(x + 1.0e-15)
else:
roundPy2 = round
__all__ = ["Wigner3j", "Wigner6j", "TriaCoeff", "CG", "WignerDmatrix"]
wignerPrecal = (
True # use precalculated values - tested only for the main algorithm calls
)
wignerPrecalJmax = 23
wignerPrecal3j = np.load(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"data",
"precalculated3j.npy",
),
encoding="latin1",
allow_pickle=True,
)
wignerPrecal6j = np.load(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"data",
"precalculated6j.npy",
),
encoding="latin1",
allow_pickle=True,
)
def Wigner3j(j1, j2, j3, m1, m2, m3):
r"""
Evaluates Wigner 3-j symbol
Args:
j1,j2,j3,m1,m2,m3 (float): parameters of
:math:`\begin{pmatrix}j_1 & j_2 & j_2 \\ m_1 & m_2 & m_3\end{pmatrix}`
"""
# use precalculated values
if wignerPrecal and (
(j2 < 2.1) and abs(m2) < 2.1 and (j1 < wignerPrecalJmax)
):
# we shoud have precalculated value
if (
(abs(j1 - j2) - 0.1 < j3)
and (j3 < j1 + j2 + 0.1)
and abs(m1 + m2 + m3) < 0.1
):
# return precalculated value
return wignerPrecal3j[
round(roundPy2(2 * j1)),
round(roundPy2(2 * (wignerPrecalJmax + m1))),
round(roundPy2(2.0 * j2)),
round(roundPy2(m2 + j2)),
round(roundPy2(2 - j3 + j1)),
]
else:
# that value is 0
return 0
if j1 > 40 or j2 > 40 or j3 > 40 or m1 > 40 or m2 > 40 or m3 > 40:
# usual implementation of coefficient calculation that uses factorials
# would fail (overflow). Use instead something slower verion from Sympy
return float(
sympyEvaluate(Wigner3j_sympy(j1, j2, j3, m1, m2, m3).doit())
)
# print "unknown %.1f %.1f %.1f %.1f %.1f %.1f " % (j1,j2,j3,m1,m2,m3)
# ======================================================================
# Wigner3j.m by David Terr, Raytheon, 6-17-04
#
# Compute the Wigner 3j symbol using the Racah formula [1].
#
# Usage:
# from wigner import Wigner3j
# wigner = Wigner3j(j1,j2,j3,m1,m2,m3)
#
# / j1 j2 j3 \
# | |
# \ m1 m2 m3 /
#
# Reference: Wigner 3j-Symbol entry of Eric Weinstein's Mathworld:
# http://mathworld.wolfram.com/Wigner3j-Symbol.html
# ======================================================================
# Error checking
if (
(2 * j1 != floor(2 * j1))
| (2 * j2 != floor(2 * j2))
| (2 * j3 != floor(2 * j3))
| (2 * m1 != floor(2 * m1))
| (2 * m2 != floor(2 * m2))
| (2 * m3 != floor(2 * m3))
):
raise ValueError("All arguments must be integers or half-integers.")
# Additional check if the sum of the second row equals zero
if m1 + m2 + m3 != 0:
# print('3j-Symbol unphysical')
return 0
if j1 - m1 != floor(j1 - m1):
raise ValueError("2*j1 and 2*m1 must have the same parity")
if j2 - m2 != floor(j2 - m2):
raise ValueError("2*j2 and 2*m2 must have the same parity")
if j3 - m3 != floor(j3 - m3):
raise ValueError("2*j3 and 2*m3 must have the same parity")
if (j3 > j1 + j2) | (j3 < abs(j1 - j2)):
raise ValueError("j3 is out of bounds.")
if abs(m1) > j1:
raise ValueError("m1 is out of bounds.")
if abs(m2) > j2:
raise ValueError("m2 is out of bounds.")
if abs(m3) > j3:
raise ValueError("m3 is out of bounds.")
t1 = j2 - m1 - j3
t2 = j1 + m2 - j3
t3 = j1 + j2 - j3
t4 = j1 - m1
t5 = j2 + m2
tmin = max(0, max(t1, t2))
tmax = min(t3, min(t4, t5))
tvec = np.arange(tmin, tmax + 1, 1)
wigner = 0
for t in tvec:
wigner += (-1) ** t / (
factorial(t)
* factorial(t - t1)
* factorial(t - t2)
* factorial(t3 - t)
* factorial(t4 - t)
* factorial(t5 - t)
)
return (
wigner
* (-1) ** (j1 - j2 - m3)
* sqrt(
factorial(j1 + j2 - j3)
* factorial(j1 - j2 + j3)
* factorial(-j1 + j2 + j3)
/ factorial(j1 + j2 + j3 + 1)
* factorial(j1 + m1)
* factorial(j1 - m1)
* factorial(j2 + m2)
* factorial(j2 - m2)
* factorial(j3 + m3)
* factorial(j3 - m3)
)
)
def Wigner6j(j1, j2, j3, J1, J2, J3):
r"""
Evaluates Wigner 6-j symbol
Args:
j1,j2,j3,J1,J2,J3 (float): parameters of
:math:`\left\{ \begin{matrix}j_1 & j_2 & j_3\
\\ J_1 & J_2 & J_3\end{matrix}\right\}`
"""
# ======================================================================
# Calculating the Wigner6j-Symbols using the Racah-Formula
# Author: Ulrich Krohn
# Date: 13th November 2009
#
# Based upon Wigner3j.m from David Terr, Raytheon
# Reference: http://mathworld.wolfram.com/Wigner6j-Symbol.html
#
# Usage:
# from wigner import Wigner6j
# WignerReturn = Wigner6j(j1,j2,j3,J1,J2,J3)
#
# / j1 j2 j3 \
# < >
# \ J1 J2 J3 /
#
# ======================================================================
# Check that the js and Js are only integer or half integer
if (
(2 * j1 != roundPy2(2 * j1))
| (2 * j2 != roundPy2(2 * j2))
| (2 * j3 != roundPy2(2 * j3))
| (2 * J1 != roundPy2(2 * J1))
| (2 * J2 != roundPy2(2 * J2))
| (2 * J3 != roundPy2(2 * J3))
):
raise ValueError("All arguments must be integers or half-integers.")
# Check if the 4 triads ( (j1 j2 j3), (j1 J2 J3), (J1 j2 J3), (J1 J2 j3) )
# satisfy the triangular inequalities
if (
(abs(j1 - j2) > j3)
| (j1 + j2 < j3)
| (abs(j1 - J2) > J3)
| (j1 + J2 < J3)
| (abs(J1 - j2) > J3)
| (J1 + j2 < J3)
| (abs(J1 - J2) > j3)
| (J1 + J2 < j3)
):
raise ValueError("6j-Symbol is not triangular!")
# Check if the sum of the elements of each traid is an integer
if (
(2 * (j1 + j2 + j3) != roundPy2(2 * (j1 + j2 + j3)))
| (2 * (j1 + J2 + J3) != roundPy2(2 * (j1 + J2 + J3)))
| (2 * (J1 + j2 + J3) != roundPy2(2 * (J1 + j2 + J3)))
| (2 * (J1 + J2 + j3) != roundPy2(2 * (J1 + J2 + j3)))
):
raise ValueError("6j-Symbol is not triangular!")
# if possible, use precalculated values
global wignerPrecal
if wignerPrecal and (
(roundPy2(2 * j2) >= -0.1)
and (roundPy2(2 * j2) <= 2.1)
and (J2 == 1 or J2 == 2)
and (j1 <= wignerPrecalJmax)
and (J3 <= wignerPrecalJmax)
and (abs(roundPy2(j1) - j1) < 0.1)
and (abs(roundPy2(J3) - J3) < 0.1)
and abs(j1 - J3) < 2.1
):
# we have precalculated value
return wignerPrecal6j[
j1,
2 + j1 - J3,
round(roundPy2(2 + 2 * (j3 - j1))),
round(roundPy2(2 + 2 * (J1 - J3))),
J2 - 1,
round(roundPy2(2 * j2)),
]
# print("not in database %1.f %1.f %1.f %1.f %1.f %1.f" % (j1,j2,j3,J1,J2,J3))
if j1 > 50 or j2 > 50 or j3 > 50 or J1 > 50 or J2 > 50 or J3 > 50:
# usual implementation of coefficient calculation that uses factorials
# would fail (overflow). Use instead something slower verion from Sympy
return float(
sympyEvaluate(Wigner6j_sympy(j1, j2, j3, J1, J2, J3).doit())
)
# Arguments for the factorials
t1 = j1 + j2 + j3
t2 = j1 + J2 + J3
t3 = J1 + j2 + J3
t4 = J1 + J2 + j3
t5 = j1 + j2 + J1 + J2
t6 = j2 + j3 + J2 + J3
t7 = j1 + j3 + J1 + J3
# Finding summation borders
tmin = max(0, max(t1, max(t2, max(t3, t4))))
tmax = min(t5, min(t6, t7))
tvec = np.arange(tmin, tmax + 1, 1)
# Calculation the sum part of the 6j-Symbol
WignerReturn = 0
for t in tvec:
WignerReturn += (
(-1) ** t
* factorial(t + 1)
/ (
factorial(t - t1)
* factorial(t - t2)
* factorial(t - t3)
* factorial(t - t4)
* factorial(t5 - t)
* factorial(t6 - t)
* factorial(t7 - t)
)
)
# Calculation of the 6j-Symbol
return WignerReturn * sqrt(
TriaCoeff(j1, j2, j3)
* TriaCoeff(j1, J2, J3)
* TriaCoeff(J1, j2, J3)
* TriaCoeff(J1, J2, j3)
)
def TriaCoeff(a, b, c):
# Calculating the triangle coefficient
return (
factorial(a + b - c)
* factorial(a - b + c)
* factorial(-a + b + c)
/ (factorial(a + b + c + 1))
)
# copied from https://sites.google.com/site/theodoregoetz/notes/wignerdfunction
# Jojann Goetz
def _wignerd(j, m, n=0, approx_lim=10):
"""
Wigner "small d" matrix. (Euler z-y-z convention)
example::
j = 2
m = 1
n = 0
beta = linspace(0,pi,100)
wd210 = _wignerd(j,m,n)(beta)
some conditions have to be met::
j >= 0
-j <= m <= j
-j <= n <= j
The approx_lim determines at what point
bessel functions are used. Default is when::
j > m+10
# and
j > n+10
for integer l and n=0, we can use the spherical harmonics. If in
addition m=0, we can use the ordinary legendre polynomials.
"""
if (j < 0) or (abs(m) > j) or (abs(n) > j):
raise ValueError(
"_wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j, m, n)
+ " Valid range for parameters: j>=0, -j<=m,n<=j."
)
if (j > (m + approx_lim)) and (j > (n + approx_lim)):
# print('bessel (approximation)')
return lambda beta: jv(m - n, j * beta)
if (floor(j) == j) and (n == 0):
if m == 0:
# print('legendre (exact)')
return lambda beta: legendre(j)(cos(beta))
elif False:
# print('spherical harmonics (exact)')
a = sqrt(4.0 * pi / (2.0 * j + 1.0))
return lambda beta: a * conjugate(sph_harm(m, j, beta, 0.0))
jmn_terms = {
j + n: (m - n, m - n),
j - n: (n - m, 0.0),
j + m: (n - m, 0.0),
j - m: (m - n, m - n),
}
k = min(jmn_terms)
a, lmb = jmn_terms[k]
b = 2.0 * j - 2.0 * k - a
if (a < 0) or (b < 0):
raise ValueError(
"_wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j, m, n)
+ " Encountered negative values in (a,b) = ({0},{1})".format(a, b)
)
coeff = (
power(-1.0, lmb)
* sqrt(comb(2.0 * j - k, k + a))
* (1.0 / sqrt(comb(k + b, b)))
)
# print('jacobi (exact)')
return (
lambda beta: coeff
* power(sin(0.5 * beta), a)
* power(cos(0.5 * beta), b)
* jacobi(k, a, b)(cos(beta))
)
def _wignerD(j, m, n=0, approx_lim=10):
"""
Wigner D-function. (Euler z-y-z convention)
This returns a function of 2 to 3 Euler angles:
(alpha, beta, gamma)
gamma defaults to zero and does not need to be
specified.
The approx_lim determines at what point
bessel functions are used. Default is when:
j > m+10
and
j > n+10
usage::
from numpy import linspace, meshgrid
a = linspace(0, 2*pi, 100)
b = linspace(0, pi, 100)
aa,bb = meshgrid(a,b)
j,m,n = 1,1,1
zz = _wignerD(j,m,n)(aa,bb)
"""
return (
lambda alpha, beta, gamma=0: exp(-1j * m * alpha)
* _wignerd(j, m, n, approx_lim)(beta)
* exp(-1j * n * gamma)
)
def CG(j1, m1, j2, m2, j3, m3):
r"""
Clebsch–Gordan (CG) coefficients
Args:
j1,m1,j2,m2,j3,m3: parameters of
:math:`\langle j_1, m_1, j_2, m_2 | j_1, j_2, j_3, m_3 \rangle`
"""
return (
Wigner3j(j1, j2, j3, m1, m2, -m3)
* sqrt(2 * j3 + 1)
* (-1) ** (j1 - j2 + m3)
)
class WignerDmatrix:
"""
WignerD matrices for different `j` states in a specified rotated basis.
This matrix converts components of angular momentum `j` givne in one
basis into components of angular momentum calculated in the basis
which is rotated by `theta` around y-axis, and then by `phi` around
z-axis. Use::
wgd = WignerDmatrix(theta,phi)
# let's rotate state with angular momentum 1
dMatrix = wgd.get(j)
stateNewBasis = dMatrix.dot(stateOldBasis)
Args:
theta (float): rotation around y-axis
phi (float): rotation around z-axis
gamma (flaot): optional, first rotation around z-axis (rotations are
in order z-y-z, by gamma, theta and phi respectively)
By default 0.
"""
def __init__(self, theta, phi, gamma=0.0):
self.matSaved = []
self.matLoc = np.zeros(100, dtype=np.int8)
self.theta = theta
self.phi = phi
self.gamma = gamma
if (
abs(self.theta) < 1e-5
and abs(self.phi) < 1e-5
and abs(self.gamma) < 1e-5
):
self.trivial = True
else:
self.trivial = False
def get(self, j):
"""
WignerD matrix for specified basis for states with angular
momenutum `j`.
Args:
j (float): angular momentum of states.
Returns:
matrix of dimensions (2*j+1,2*j+1).
`state in new basis = wignerDmatrix * state in original basis`
"""
if self.trivial:
return sparse_eye(
round(roundPy2(2.0 * j + 1.0)),
round(roundPy2(2.0 * j + 1.0)),
dtype=np.complex128,
)
savedIndex = self.matLoc[round(roundPy2(2 * j))]
if savedIndex != 0:
return self.matSaved[savedIndex - 1]
# bacause 0 marks no entry; but matrix numbers starts from zero,
# saved Index array is actually offsetted by 1
# else
mat = np.zeros(
(round(roundPy2(2.0 * j + 1.0)), round(roundPy2(2.0 * j + 1.0))),
dtype=np.complex128,
)
jrange = np.linspace(-j, j, round(2 * j) + 1)
maxIndex = round(2 * j) + 1
for index1 in xrange(maxIndex):
for index2 in xrange(maxIndex):
mat[index1, index2] = _wignerD(
j, jrange[index1], jrange[index2]
)(self.phi, self.theta, self.gamma)
mat = csr_matrix(mat)
self.matSaved.append(mat)
self.matLoc[round(roundPy2(2 * j))] = len(self.matSaved)
return mat | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/wigner.py | wigner.py |
from __future__ import division, print_function, absolute_import
from .alkali_atom_functions import AlkaliAtom
from scipy.constants import physical_constants
from scipy.constants import Rydberg as C_Rydberg
from scipy.constants import m_e as C_m_e
from scipy.constants import c as C_c
from math import log
__all__ = [
"Hydrogen",
"Caesium",
"Cesium",
"Rubidium85",
"Rubidium",
"Rubidium87",
"Lithium6",
"Lithium7",
"Sodium",
"Potassium",
"Potassium39",
"Potassium40",
"Potassium41",
]
class Hydrogen(AlkaliAtom):
"""
Properties of hydrogen atoms
"""
ionisationEnergy = 13.598433 #: (eV), Ref. [#c8]_.
Z = 1
scaledRydbergConstant = (
109677.5834
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
# NOTE: below data is from NIST Atomic Spectra Database (ASD, ver. 5.5.6)
# Even more precise Hydrogen level data is available if needed on
# U.D. Jentschura, S. Kotochigova, E.O. LeBigot, P.J. Mohr, and B.N. Taylor (2005),
# The Energy Levels of Hydrogen and Deuterium (version 2.1). [Online]
# Available: http://physics.nist.gov/HDEL [2018, May 3].
# National Institute of Standards and Technology, Gaithersburg, MD.
levelDataFromNIST = "h_NIST_level_data.ascii"
NISTdataLevels = 12
precalculatedDB = "h_precalculated.db"
dipoleMatrixElementFile = "h_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "h_quadrupole_matrix_elements.npy"
groundStateN = 1
minQuantumDefectN = 8
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 1.00782503223 * physical_constants["atomic mass constant"][0]
a1 = [0.0, 0.0, 0.0, 0.0]
a2 = [0.0, 0.0, 0.0, 0.0]
a3 = [0.0, 0.0, 0.0, 0.0]
a4 = [0.0, 0.0, 0.0, 0.0]
rc = [0.0, 0.0, 0.0, 0.0]
def potential(self, l, s, j, r):
# Returns total potential that electron feels = core potential + Spin-Orbit interaction
return (
-self.Z / r
+ self.alpha**2
/ (2.0 * r**3)
* (j * (j + 1.0) - l * (l + 1.0) - s * (s + 1))
/ 2.0
)
def stateQuantumDefect(self, n, l, j):
defect = 0.0
return defect
class Caesium(AlkaliAtom):
"""
Properties of caesium atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 15.6440
"""
model potential parameters from [#c1]_
"""
#
a1 = [3.49546309, 4.69366096, 4.32466196, 3.01048361]
"""
model potential parameters from [#c1]_
"""
a2 = [1.47533800, 1.71398344, 1.61365288, 1.40000001]
"""
model potential parameters from [#c1]_
"""
a3 = [-9.72143084, -24.65624280, -6.70128850, -3.20036138]
"""
model potential parameters from [#c1]_
"""
a4 = [0.02629242, -0.09543125, -0.74095193, 0.00034538]
"""
model potential parameters from [#c1]_
"""
rc = [1.92046930, 2.13383095, 0.93007296, 1.99969677]
"""
model potential parameters from [#c1]_
"""
Z = 55
I = 3.5 # 7/2
#: (eV), Ref. [#jd2016]_.
ionisationEnergy = (
31406.4677325
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
NISTdataLevels = 25
# first index [0]: j-1/2 [1]: j+1/2
# second index [0..4] : s,p,d,f,g
# third index [delta0,delta2...]
quantumDefect = [
[
[4.04935665, 0.2377037, 0.255401, 0.00378, 0.25486, 0.0],
[3.59158950, 0.360926, 0.41905, 0.64388, 1.45035, 0.0],
[2.4754562, 0.009320, -0.43498, -0.76358, -18.0061, 0.0],
[0.03341424, -0.198674, 0.28953, -0.2601, 0.0, 0.0],
[0.00703865, -0.049252, 0.01291, 0.0, 0.0, 0.0],
],
[
[4.04935665, 0.2377037, 0.255401, 0.00378, 0.25486, 0.0],
[3.5589599, 0.392469, -0.67431, 22.3531, -92.289, 0.0],
[2.46631524, 0.013577, -0.37457, -2.1867, -1.5532, -56.6739],
[0.03341424, -0.198674, 0.28953, -0.2601, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects for :math:`S_{1/2}`, :math:`nP_{1/2}`, :math:`D_{5/2}`,
:math:`F_{5/2}` and :math:`G_{7/2}` are from [#Weber1987]_, while
quantum defects for :math:`nP_{3/2}`,:math:`D_{3/2}` are from [#Lorenzen1984]_,
Note:
f_7/2 quantum defects are PUT TO BE EXACTLY the same as f_5/2 (~10MHz difference?!)
"""
minQuantumDefectN = 9
levelDataFromNIST = "cs_NIST_level_data.ascii"
precalculatedDB = "cs_precalculated.db"
dipoleMatrixElementFile = "cs_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "cs_quadrupole_matrix_elements.npy"
literatureDMEfilename = "caesium_literature_dme.csv"
#: levels that are for smaller n than ground level, but are above in energy due to angular part
extraLevels = [
[5, 2, 2 + 0.5],
[5, 2, 2 - 0.5],
[5, 3, 3 + 0.5],
[5, 3, 3 - 0.5],
[5, 4, 4 + 0.5],
[5, 4, 4 - 0.5],
[4, 3, 3 + 0.5],
[4, 3, 3 - 0.5],
]
groundStateN = 6
mass = 132.9054519610 * physical_constants["atomic mass constant"][0]
abundance = 1.000
#: in eV
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "Cs133"
meltingPoint = 28.44 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "cs_hfs_data.csv"
gL = 0.99999587 #: Electron orbital g-factor [#SteckCs]_
gI = -0.00039885395 #: Nuclear g-factor [#SteckCs]_
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 2.
(accuracy +- 5%) are used for Cs in solid phase. Values from table 3.
(accuracy +-1 %) are used for Cs in liquid phase.
"""
# returns pressure in Pa for temperature in K
if temperature < self.meltingPoint:
# Cs is in solid phase (from table 2. for recommended equations / +-5%)
return 10.0 ** (2.881 + 4.711 - 3999.0 / temperature) * 133.322368
elif temperature < 550.0 + 273.15:
# Cs is in liquid phase (from table 3. of the cited reference "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.232
- 4062.0 / temperature
- 1.3359 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: Cs vapour pressure above 550 C is unknown \
(limits of experimental interpolation)"
)
return 0
def getPressureOld(self, temperature):
# returns pressure in Pa for temperature in K
# from A.N.Nesmeyanov, Vapor Pressure of the Chemical Elements (Elsevier, Amsterdam, 1963). English edition
# edited by Robert Gary
# as was found in Steck Alkali metal data, revision 1.6, 14 October 2003
print(
"WARNING: getPressureOld is provided just for reference for \
the old versions of the programme"
)
print("New programmes should use getPressure function instead !")
if temperature < 28.44 + 273.15:
# Cs is in solid phase
return (
10.0
** (
-219.482
+ 1088.676 / temperature
- 0.08336185 * temperature
+ 94.88752 * log(temperature) / log(10.0)
)
* 133.322368
)
elif temperature < 671 + 273.15:
# Cs is in liquid phase
return (
10.0
** (
8.22127
- 4006.048 / temperature
- 0.00060194 * temperature
- 0.19623 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: Cs vapour pressure above 671 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Cesium(Caesium):
"""
support for American English spelling
"""
pass
class Rubidium85(AlkaliAtom):
"""
Properites of rubidium 85 atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 9.0760
"""
model potential parameters from [#c1]_
"""
a1 = [3.69628474, 4.44088978, 3.78717363, 2.39848933]
"""
model potential parameters from [#c1]_
"""
a2 = [1.64915255, 1.92828831, 1.57027864, 1.76810544]
"""
model potential parameters from [#c1]_
"""
a3 = [-9.86069196, -16.79597770, -11.65588970, -12.07106780]
"""
model potential parameters from [#c1]_
"""
a4 = [0.19579987, -0.8163314, 0.52942835, 0.77256589]
"""
model potential parameters from [#c1]_
"""
rc = [1.66242117, 1.50195124, 4.86851938, 4.79831327]
"""
model potential parameters from [#c1]_
"""
Z = 37
I = 2.5 # 5/2
NISTdataLevels = 77
#: (eV) Ref. [#Sanguinetti2009]_
ionisationEnergy = (
(1010.024700e12)
/ C_c
* physical_constants["inverse meter-electron volt relationship"][0]
)
quantumDefect = [
[
[3.1311804, 0.1784, 0.0, 0.0, 0.0, 0.0],
[2.6548849, 0.2900, 0.0, 0.0, 0.0, 0.0],
[1.34809171, -0.60286, 0.0, 0.0, 0.0, 0.0],
[0.0165192, -0.085, 0.0, 0.0, 0.0, 0.0],
[0.0039990, -0.0202, 0.0, 0.0, 0.0, 0.0],
],
[
[3.1311804, 0.1784, 0.0, 0.0, 0.0, 0.0],
[2.6416737, 0.2950, 0.0, 0.0, 0.0, 0.0],
[1.34646572, -0.59600, 0.0, 0.0, 0.0, 0.0],
[0.0165437, -0.086, 0.0, 0.0, 0.0, 0.0],
[0.0039990, -0.0202, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects for :math:`nF` states are
from [#c5]_. Quantum defects for :math:`nG` states are
from [#Raithel2020]_. All other quantum defects are from from [#c4]_
"""
levelDataFromNIST = "rb_NIST_level_data.ascii"
dipoleMatrixElementFile = "rb_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "rb_quadrupole_matrix_elements.npy"
minQuantumDefectN = 8
precalculatedDB = "rb85_precalculated.db"
literatureDMEfilename = "rubidium_literature_dme.csv"
#: levels that are for smaller n than ground level, but are above in energy due to angular part
extraLevels = [
[4, 2, 2 + 0.5],
[4, 2, 2 - 0.5],
[4, 3, 3 + 0.5],
[4, 3, 3 - 0.5],
]
groundStateN = 5
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 84.9117897379 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 0.7217
#: in eV
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "Rb85"
meltingPoint = 39.31 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "rb85_hfs_data.csv"
gL = 0.99999354 #: Electron orbital g-factor [#Steck85Rb]_
gI = -0.00029364000 #: Nuclear g-factor [#Steck85Rb]_
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 2.
(accuracy +- 5%) are used for Rb in solid phase. Values from table 3.
(accuracy +-1 %) are used for Rb in liquid phase.
"""
if temperature < self.meltingPoint:
# Rb is in solid phase (from table 2. for recommended equations / +-5%)
return 10.0 ** (2.881 + 4.857 - 4215.0 / temperature) * 133.322368
elif temperature < 550.0 + 273.15:
# Rb is in liquid phase (from table 3. of the cited reference "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.316
- 4275.0 / temperature
- 1.3102 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: Rb vapour pressure above 550 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Rubidium(Rubidium85):
"""
backward compatibility:
before there was only one Rubidium class, and that one corresponded
to Rubidium85
"""
pass
class Rubidium87(AlkaliAtom):
"""
Properites of rubidium 87 atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 9.0760
"""
model potential parameters from [#c1]_
"""
a1 = [3.69628474, 4.44088978, 3.78717363, 2.39848933]
"""
model potential parameters from [#c1]_
"""
a2 = [1.64915255, 1.92828831, 1.57027864, 1.76810544]
"""
model potential parameters from [#c1]_
"""
a3 = [-9.86069196, -16.79597770, -11.65588970, -12.07106780]
"""
model potential parameters from [#c1]_
"""
a4 = [0.19579987, -0.8163314, 0.52942835, 0.77256589]
"""
model potential parameters from [#c1]_
"""
rc = [1.66242117, 1.50195124, 4.86851938, 4.79831327]
"""
model potential parameters from [#c1]_
"""
Z = 37
I = 1.5 # 3/2
NISTdataLevels = 77
#: (eV) Ref. [#Mack2011]_
ionisationEnergy = (
(1010.0291646e12 - 4.271676631815181e9)
/ C_c
* physical_constants["inverse meter-electron volt relationship"][0]
)
quantumDefect = [
[
[3.1311804, 0.1784, 0.0, 0.0, 0.0, 0.0],
[2.6548849, 0.2900, 0.0, 0.0, 0.0, 0.0],
[1.34809171, -0.60286, 0.0, 0.0, 0.0, 0.0],
[0.0165192, -0.085, 0.0, 0.0, 0.0, 0.0],
[0.00405, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[3.1311804, 0.1784, 0.0, 0.0, 0.0, 0.0],
[2.6416737, 0.2950, 0.0, 0.0, 0.0, 0.0],
[1.34646572, -0.59600, 0.0, 0.0, 0.0, 0.0],
[0.0165437, -0.086, 0.0, 0.0, 0.0, 0.0],
[0.00405, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects for :math:`nF` states are
from [#c5]_. Quantum defects for :math:`nG` states are
from [#Afrousheh2006a]_. All other quantum defects are from from [#c4]_
"""
levelDataFromNIST = "rb_NIST_level_data.ascii"
dipoleMatrixElementFile = "rb_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "rb_quadrupole_matrix_elements.npy"
minQuantumDefectN = 8
precalculatedDB = "rb87_precalculated.db"
literatureDMEfilename = "rubidium_literature_dme.csv"
#: levels that are for smaller n than ground level, but are above in energy due to angular part
extraLevels = [
[4, 2, 2 + 0.5],
[4, 2, 2 - 0.5],
[4, 3, 3 + 0.5],
[4, 3, 3 - 0.5],
]
groundStateN = 5
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 86.9091805310 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 0.2783
#: in eV (M_ion core = m_atomic - m_electron)
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
# 109736.605*1.e2 \
# *physical_constants["inverse meter-electron volt relationship"][0]
elementName = "Rb87"
meltingPoint = 39.31 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "rb87_hfs_data.csv"
gL = 0.99999369 #: Electron orbital g-factor [#Steck87Rb]_
gI = -0.0009951414 #: Nuclear g-factor [#Steck87Rb]_
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 2.
(accuracy +- 5%) are used for Rb in solid phase. Values from table 3.
(accuracy +-1 %) are used for Rb in liquid phase.
"""
if temperature < self.meltingPoint:
# Rb is in solid phase (from table 2. for recommended equations / +-5%)
return 10.0 ** (2.881 + 4.857 - 4215.0 / temperature) * 133.322368
elif temperature < 550.0 + 273.15:
# Rb is in liquid phase (from table 3. of the cited reference "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.316
- 4275.0 / temperature
- 1.3102 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: Rb vapour pressure above 550 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Lithium6(AlkaliAtom): # Li
"""
Properties of lithium 6 atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 0.1923
"""
model potential parameters from [#c1]_
"""
# model potential parameters from Marinescu et.al, PRA 49:982 (1994)
a1 = [2.47718079, 3.45414648, 2.51909839, 2.51909839]
"""
model potential parameters from [#c1]_
"""
a2 = [1.84150932, 2.55151080, 2.43712450, 2.43712450]
"""
model potential parameters from [#c1]_
"""
a3 = [-0.02169712, -0.21646561, 0.32505524, 0.32505524]
"""
model potential parameters from [#c1]_
"""
a4 = [-0.11988362, -0.06990078, 0.10602430, 0.10602430]
"""
model potential parameters from [#c1]_
"""
rc = [0.61340824, 0.61566441, 2.34126273, 2.34126273]
"""
model potential parameters from [#c1]_
"""
Z = 3
I = 1.0
NISTdataLevels = 42
# (eV) from Ref. [#c7]_
ionisationEnergy = (
43487.15
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
# PRA 34, 2889 (1986); and (for D_J and F_J) from Physica Scripta 27:300-305 (1983)
quantumDefect = [
[
[0.3995101, 0.0290, 0.0, 0.0, 0.0, 0.0],
[0.0471835, -0.024, 0.0, 0.0, 0.0, 0.0],
[0.002129, -0.01491, 0.1759, -0.8507, 0.0, 0.0],
[-0.000077, 0.021856, -0.4211, 2.3891, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.3995101, 0.0290, 0.0, 0.0, 0.0, 0.0],
[0.0471720, -0.024, 0.0, 0.0, 0.0, 0.0],
[0.002129, -0.01491, 0.1759, -0.8507, 0.0, 0.0],
[-0.000077, 0.021856, -0.4211, 2.3891, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects for :math:`nS` and :math:`nP` are from Ref. [#c6]_ .
Quantum defects for :math:`D_j` and :math:`F_j` are from Ref. [#c7]_
(note that this defects in Ref. [#c7]_ are for Li7, differences
are expected not be too big).
"""
levelDataFromNIST = "li_NIST_level_data.ascii"
dipoleMatrixElementFile = "li6_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "li6_quadrupole_matrix_elements.npy"
minQuantumDefectN = 4
precalculatedDB = "li6_precalculated.db"
# levels that are for smaller n than ground level, but are above in energy
# due to angular part
extraLevels = []
groundStateN = 2
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 6.0151228874 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 0.0759
gL = 1 - physical_constants["electron mass"][0] / mass
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "Li6"
meltingPoint = 180.54 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "li6_hfs_data.csv"
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 3.
(accuracy +-1 %) are used both for liquid and solid phase of Li.
"""
if temperature < self.meltingPoint:
# Li is in solid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 7.790
- 8423.0 / temperature
- 0.7074 * log(temperature) / log(10.0)
)
* 133.322368
)
elif temperature < 1000.0 + 273.15:
# Li is in liquid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.409
- 8320.0 / temperature
- 1.0255 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: Li vapour pressure above 1000 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Lithium7(AlkaliAtom): # Li
"""
Properties of lithium 7 atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
# model potential parameters from Marinescu et.al, PRA 49:982 (1994)
alphaC = 0.1923
"""
model potential parameters from [#c1]_
"""
a1 = [2.47718079, 3.45414648, 2.51909839, 2.51909839]
"""
model potential parameters from [#c1]_
"""
a2 = [1.84150932, 2.55151080, 2.43712450, 2.43712450]
"""
model potential parameters from [#c1]_
"""
a3 = [-0.02169712, -0.21646561, 0.32505524, 0.32505524]
"""
model potential parameters from [#c1]_
"""
a4 = [-0.11988362, -0.06990078, 0.10602430, 0.10602430]
"""
model potential parameters from [#c1]_
"""
rc = [0.61340824, 0.61566441, 2.34126273, 2.34126273]
"""
model potential parameters from [#c1]_
"""
Z = 3
I = 1.5 # 3/2
NISTdataLevels = 42
ionisationEnergy = 5.391719 #: (eV) NIST Ref. [#c11]_.
quantumDefect = [
[
[0.3995101, 0.0290, 0.0, 0.0, 0.0, 0.0],
[0.0471780, -0.024, 0.0, 0.0, 0.0, 0.0],
[0.002129, -0.01491, 0.1759, -0.8507, 0.0, 0.0],
[-0.000077, 0.021856, -0.4211, 2.3891, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.3995101, 0.0290, 0.0, 0.0, 0.0, 0.0],
[0.0471665, -0.024, 0.0, 0.0, 0.0, 0.0],
[0.002129, -0.01491, 0.1759, -0.8507, 0.0, 0.0],
[-0.000077, 0.021856, -0.4211, 2.3891, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects for :math:`nS` and :math:`nP` states are
from Ref. [#c6]_. Quantum defects for :math:`D_j` and :math:`F_j`
states are from [#c7]_.
"""
levelDataFromNIST = "li_NIST_level_data.ascii"
dipoleMatrixElementFile = "li7_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "li7_quadrupole_matrix_elements.npy"
minQuantumDefectN = 4
precalculatedDB = "li7_precalculated.db"
# levels that are for smaller n than ground level,
# but are above in energy due to angular part
extraLevels = []
groundStateN = 2
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 7.0160034366 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 0.9241
gL = 1 - physical_constants["electron mass"][0] / mass
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "Li7"
meltingPoint = 180.54 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "li7_hfs_data.csv"
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature (in K).
Uses equation and values from [#c3]_. Values from table 3.
(accuracy +-1 %) are used for both liquid and solid phase of Li.
"""
if temperature < self.meltingPoint:
# Li is in solid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 7.790
- 8423.0 / temperature
- 0.7074 * log(temperature) / log(10.0)
)
* 133.322368
)
elif temperature < 1000.0 + 273.15:
# Li is in liquid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.409
- 8320.0 / temperature
- 1.0255 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: Li vapour pressure above 1000 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Sodium(AlkaliAtom): # Na23
"""
Properties of sodium 23 atoms
"""
#: ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 0.9448
"""
model potential parameters from [#c1]_
"""
a1 = [4.82223117, 5.08382502, 3.53324124, 1.11056646]
"""
model potential parameters from [#c1]_
"""
a2 = [2.45449865, 2.18226881, 2.48697936, 1.05458759]
"""
model potential parameters from [#c1]_
"""
a3 = [-1.12255048, -1.19534623, -0.75688448, 1.73203428]
"""
model potential parameters from [#c1]_
"""
a4 = [-1.42631393, -1.03142861, -1.27852357, -0.09265696]
"""
model potential parameters from [#c1]_
"""
rc = [0.45489422, 0.45798739, 0.71875312, 28.6735059]
"""
model potential parameters from [#c1]_
"""
Z = 11
I = 1.5 # 3/2
NISTdataLevels = 20
#: (eV) from Ref. [#c7]_
ionisationEnergy = (
41449.44
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
quantumDefect = [
[
[1.347964, 0.060673, 0.0233, -0.0085, 0.0, 0.0],
[0.855380, 0.11363, 0.0384, 0.1412, 0.0, 0.0],
[0.015543, -0.08535, 0.7958, -4.0513, 0.0, 0.0],
[0.001453, 0.017312, -0.7809, 7.021, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[1.347964, 0.060673, 0.0233, -0.0085, 0.0, 0.0],
[0.854565, 0.114195, 0.0352, 0.1533, 0.0, 0.0],
[0.015543, -0.08535, 0.7958, -4.0513, 0.0, 0.0],
[0.001453, 0.017312, -0.7809, 7.021, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
Quantum defects are from Ref. [#c7]_. Note that we are using modified
Rydberg-Ritz formula. In literature both modified and non-modified
coefficients appear. For more details about the two equations see
page 301. of Ref. [#c7]_.
"""
levelDataFromNIST = "na_NIST_level_data.ascii"
dipoleMatrixElementFile = "na23_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "na23_quadrupole_matrix_elements.npy"
precalculatedDB = "na23_precalculated.db"
literatureDMEfilename = "sodium_literature_dme.csv"
# levels that are for smaller n than ground level, but are above in
# energy due to angular part
extraLevels = []
groundStateN = 3
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 22.9897692820 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 1.00
gL = 1 - physical_constants["electron mass"][0] / mass
#: (eV)
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "Na23"
meltingPoint = 97.794 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "na23_hfs_data.csv"
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 2.
(accuracy +- 5%) are used for Na in solid phase. Values from table 3.
(accuracy +-1 %) are used for Na in liquid phase.
"""
if temperature < self.meltingPoint:
# Na is in solid phase (from table 2. of the cited reference / +- 5%)
return 10.0 ** (2.881 + 5.298 - 5603.0 / temperature) * 133.322368
elif temperature < 700.0 + 273.15:
# Na is in liquid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.400
- 5634.0 / temperature
- 1.1748 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: Na vapour pressure above 700 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Potassium39(AlkaliAtom):
"""
Properties of potassium 39 atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 5.3310
"""
model potential parameters from [#c1]_
"""
a1 = [3.56079437, 3.65670429, 4.12713694, 1.42310446]
"""
model potential parameters from [#c1]_
"""
a2 = [1.83909642, 1.67520788, 1.79837462, 1.27861156]
"""
model potential parameters from [#c1]_
"""
a3 = [-1.74701102, -2.07416615, -1.69935174, 4.77441476]
"""
model potential parameters from [#c1]_
"""
a4 = [-1.03237313, -0.89030421, -0.98913582, -0.94829262]
"""
model potential parameters from [#c1]_
"""
rc = [0.83167545, 0.85235381, 0.83216907, 6.50294371]
"""
model potential parameters from [#c1]_
"""
Z = 19
I = 1.5 # 3/2
NISTdataLevels = 46
#: (eV), weighted average of values in Ref. [#c7]_.
ionisationEnergy = (
35009.8139375
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
# quantum defects from Physica Scripta 27:300 (1983)
quantumDefect = [
[
[2.1801985, 0.13558, 0.0759, 0.117, -0.206, 0.0],
[1.713892, 0.233294, 0.16137, 0.5345, -0.234, 0.0],
[0.27697, -1.024911, -0.709174, 11.839, -26.689, 0.0],
[0.010098, -0.100224, 1.56334, -12.6851, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[2.1801985, 0.13558, 0.0759, 0.117, -0.206, 0.0],
[1.710848, 0.235437, 0.11551, 1.1015, -2.0356, 0.0],
[0.2771580, -1.025635, -0.59201, 10.0053, -19.0244, 0.0],
[0.010098, -0.100224, 1.56334, -12.6851, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects from Ref. [#c7]_.
"""
levelDataFromNIST = "k_NIST_level_data.ascii"
dipoleMatrixElementFile = "k_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "k_quadrupole_matrix_elements.npy"
precalculatedDB = "k39_precalculated.db"
literatureDMEfilename = "potassium_literature_dme.csv"
#: levels that are for smaller n than ground level, but are above in energy due to angular part
extraLevels = [[3, 2, 2 + 0.5], [3, 2, 2 - 0.5]]
groundStateN = 4
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 38.9637064864 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 0.932581
gL = 1 - physical_constants["electron mass"][0] / mass
# in eV
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "K39"
meltingPoint = 63.5 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "k39_hfs_data.csv"
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 2.
(accuracy +- 5%) are used for Na in solid phase. Values from table 3.
(accuracy +-1 %) are used for Na in liquid phase.
"""
if temperature < self.meltingPoint:
# K is in solid phase (from table 2. of the cited reference / +- 5%)
return 10.0 ** (2.881 + 4.961 - 4646.0 / temperature) * 133.322368
elif temperature < 600.0 + 273.15:
# K is in liquid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.233
- 4693.0 / temperature
- 1.2403 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: K vapour pressure above 600 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Potassium(Potassium39):
"""
backward compatibility:
before only one class for Potassium existed and
it corresponded to Potassium 39
"""
pass
class Potassium40(AlkaliAtom):
"""
Properties of potassium 40 atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 5.3310
"""
model potential parameters from [#c1]_
"""
a1 = [3.56079437, 3.65670429, 4.12713694, 1.42310446]
"""
model potential parameters from [#c1]_
"""
a2 = [1.83909642, 1.67520788, 1.79837462, 1.27861156]
"""
model potential parameters from [#c1]_
"""
a3 = [-1.74701102, -2.07416615, -1.69935174, 4.77441476]
"""
model potential parameters from [#c1]_
"""
a4 = [-1.03237313, -0.89030421, -0.98913582, -0.94829262]
"""
model potential parameters from [#c1]_
"""
rc = [0.83167545, 0.85235381, 0.83216907, 6.50294371]
"""
model potential parameters from [#c1]_
"""
Z = 19
I = 4
NISTdataLevels = 46
#: (eV), weighted average of values in Ref. [#c7]_.
ionisationEnergy = (
35009.8139375
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
# quantum defects from Physica Scripta 27:300 (1983)
quantumDefect = [
[
[2.1801985, 0.13558, 0.0759, 0.117, -0.206, 0.0],
[1.713892, 0.233294, 0.16137, 0.5345, -0.234, 0.0],
[0.27697, -1.024911, -0.709174, 11.839, -26.689, 0.0],
[0.010098, -0.100224, 1.56334, -12.6851, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[2.1801985, 0.13558, 0.0759, 0.117, -0.206, 0.0],
[1.710848, 0.235437, 0.11551, 1.1015, -2.0356, 0.0],
[0.2771580, -1.025635, -0.59201, 10.0053, -19.0244, 0.0],
[0.010098, -0.100224, 1.56334, -12.6851, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects from Ref. [#c7]_.
"""
levelDataFromNIST = "k_NIST_level_data.ascii"
dipoleMatrixElementFile = "k_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "k_quadrupole_matrix_elements.npy"
precalculatedDB = "k40_precalculated.db"
literatureDMEfilename = "potassium_literature_dme.csv"
#: levels that are for smaller n than ground level, but are above in energy due to angular part
extraLevels = [[3, 2, 2 + 0.5], [3, 2, 2 - 0.5]]
groundStateN = 4
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 39.963998166 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 0.000117
gL = 1 - physical_constants["electron mass"][0] / mass
#: in eV
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "K40"
meltingPoint = 63.5 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "k40_hfs_data.csv"
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 2.
(accuracy +- 5%) are used for Na in solid phase. Values from table 3.
(accuracy +-1 %) are used for Na in liquid phase.
"""
if temperature < self.meltingPoint:
# K is in solid phase (from table 2. of the cited reference / +- 5%)
return 10.0 ** (2.881 + 4.961 - 4646.0 / temperature) * 133.322368
elif temperature < 600.0 + 273.15:
# K is in liquid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.233
- 4693.0 / temperature
- 1.2403 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: K vapour pressure above 600 C is unknown \
(limits of experimental interpolation)"
)
return 0
class Potassium41(AlkaliAtom):
"""
Properties of potassium 41 atoms
"""
# ALL PARAMETERES ARE IN ATOMIC UNITS (HATREE)
alphaC = 5.3310
"""
model potential parameters from [#c1]_
"""
a1 = [3.56079437, 3.65670429, 4.12713694, 1.42310446]
"""
model potential parameters from [#c1]_
"""
a2 = [1.83909642, 1.67520788, 1.79837462, 1.27861156]
"""
model potential parameters from [#c1]_
"""
a3 = [-1.74701102, -2.07416615, -1.69935174, 4.77441476]
"""
model potential parameters from [#c1]_
"""
a4 = [-1.03237313, -0.89030421, -0.98913582, -0.94829262]
"""
model potential parameters from [#c1]_
"""
rc = [0.83167545, 0.85235381, 0.83216907, 6.50294371]
"""
model potential parameters from [#c1]_
"""
Z = 19
I = 1.5 # 3/2
NISTdataLevels = 46
#: (eV), weighted average of values in Ref. [#c7]_.
ionisationEnergy = (
35009.8139375
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
# quantum defects from Physica Scripta 27:300 (1983)
quantumDefect = [
[
[2.1801985, 0.13558, 0.0759, 0.117, -0.206, 0.0],
[1.713892, 0.233294, 0.16137, 0.5345, -0.234, 0.0],
[0.27697, -1.024911, -0.709174, 11.839, -26.689, 0.0],
[0.010098, -0.100224, 1.56334, -12.6851, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[2.1801985, 0.13558, 0.0759, 0.117, -0.206, 0.0],
[1.710848, 0.235437, 0.11551, 1.1015, -2.0356, 0.0],
[0.2771580, -1.025635, -0.59201, 10.0053, -19.0244, 0.0],
[0.010098, -0.100224, 1.56334, -12.6851, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
"""
quantum defects from Ref. [#c7]_.
"""
levelDataFromNIST = "k_NIST_level_data.ascii"
dipoleMatrixElementFile = "k_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "k_quadrupole_matrix_elements.npy"
precalculatedDB = "k41_precalculated.db"
literatureDMEfilename = "potassium_literature_dme.csv"
#: levels that are for smaller n than ground level, but are above in energy due to angular part
extraLevels = [[3, 2, 2 + 0.5], [3, 2, 2 - 0.5]]
groundStateN = 4
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
mass = 40.9618252579 * physical_constants["atomic mass constant"][0]
#: source NIST, Atomic Weights and Isotopic Compositions [#c14]_
abundance = 0.067302
gL = 1 - physical_constants["electron mass"][0] / mass
#: in eV
scaledRydbergConstant = (
(mass - C_m_e)
/ (mass)
* C_Rydberg
* physical_constants["inverse meter-electron volt relationship"][0]
)
elementName = "K41"
meltingPoint = 63.5 + 273.15 #: in K
#: source of HFS magnetic dipole and quadrupole constants
hyperfineStructureData = "k41_hfs_data.csv"
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Uses equation and values from [#c3]_. Values from table 2.
(accuracy +- 5%) are used for Na in solid phase. Values from table 3.
(accuracy +-1 %) are used for Na in liquid phase.
"""
if temperature < self.meltingPoint:
# K is in solid phase (from table 2. of the cited reference / +- 5%)
return 10.0 ** (2.881 + 4.961 - 4646.0 / temperature) * 133.322368
elif temperature < 600.0 + 273.15:
# K is in liquid phase (from table 3. of the cited reference
# "precisely fitted equations / +- 1%)
return (
10.0
** (
2.881
+ 8.233
- 4693.0 / temperature
- 1.2403 * log(temperature) / log(10.0)
)
* 133.322368
)
else:
print(
"ERROR: K vapour pressure above 600 C is unknown \
(limits of experimental interpolation)"
)
return 0 | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/alkali_atom_data.py | alkali_atom_data.py |
from math import log
from scipy.constants import physical_constants
from scipy.constants import h as C_h
from scipy.constants import e as C_e
from arc.divalent_atom_functions import DivalentAtom
__all_ = ["Strontium88", "Calcium40", "Ytterbium174"]
class Strontium88(DivalentAtom):
"""
Properties of Strontium 88 atoms
"""
alphaC = 15
ionisationEnergy = 1377012721e6 * C_h / C_e #: (eV) Ref. [#c10]_
Z = 38
I = 0.0
#: Ref. [#c10]_
scaledRydbergConstant = (
109736.631
* 1.0e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
quantumDefect = [
[
[3.269123, -0.177769, 3.4619, 0.0, 0.0, 0.0],
[2.72415, -3.390, -220.0, 0.0, 0.0, 0.0],
[2.384667, -42.03053, -619.0, 0.0, 0.0, 0.0],
[0.090886, -2.4425, 61.896, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[3.3707725, 0.41979, -0.421377, 0.0, 0.0, 0.0],
[2.88673, 0.433745, -1.800, 0.0, 0.0, 0.0],
[2.675236, -13.23217, -4418.0, 0.0, 0.0, 0.0],
[0.120588, -2.1847, 102.98, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[3.3707725, 0.41979, -0.421377, 0.0, 0.0, 0.0],
[2.88265, 0.39398, -1.1199, 0.0, 0.0, 0.0],
[2.661488, -16.8524, -6629.26, 0.0, 0.0, 0.0],
[0.11899, -2.0446, 103.26, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[3.3707725, 0.41979, -0.421377, 0.0, 0.0, 0.0],
[2.88163, -2.462, 145.18, 0.0, 0.0, 0.0],
[2.655, -65.317, -13576.7, 0.0, 0.0, 0.0],
[0.12000, -2.37716, 118.97, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
""" Contains list of modified Rydberg-Ritz coefficients for calculating
quantum defects for
[[ :math:`^1S_{0},^1P_{1},^1D_{2},^1F_{3}`],
[ :math:`^3S_{1},^3P_{0},^3D_{1},^3F_{2}`],
[ :math:`^3S_{1},^3P_{1},^3D_{2},^3F_{3}`],
[ :math:`^3S_{1},^3P_{2},^3D_{3},^3F_{4}`]]."""
groundStateN = 5
# levels that are for smaller n than ground level, but are above in energy
# due to angular part
extraLevels = [
[4, 2, 3, 1],
[4, 2, 1, 1],
[4, 3, 3, 0],
[4, 3, 4, 1],
[4, 3, 3, 1],
[4, 3, 2, 1],
[4, 2, 2, 0],
]
#: Sources Refs. [#c1]_, [#c2]_, [#c3]_, [#c4]_, [#c5]_, [#c6]_, [#c7]_,
#: [#c8]_ , [#c10]_
levelDataFromNIST = "sr_level_data.csv"
precalculatedDB = "sr88_precalculated.db"
dipoleMatrixElementFile = "sr_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "sr_quadrupole_matrix_elements.npy"
literatureDMEfilename = "strontium_literature_dme.csv"
elementName = "Sr88"
meltingPoint = 777 + 273.15 #: in K
#: Ref. [#nist]_
mass = 87.905619 * physical_constants["atomic mass constant"][0]
#: Quantum defect principal quantum number fitting ranges for different
#: series
defectFittingRange = {
"1S0": [14, 34],
"3S1": [15, 50],
"1P1": [10, 29],
"3P2": [19, 41],
"3P1": [8, 21],
"3P0": [8, 15],
"1D2": [20, 50],
"3D3": [20, 37],
"3D2": [28, 50],
"3D1": [28, 50],
"1F3": [10, 28],
"3F4": [10, 28],
"3F3": [10, 24],
"3F2": [10, 24],
}
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Calculates pressure based on Ref. [#pr]_ (accuracy +- 5%).
"""
if temperature < 298:
print("WARNING: Sr vapour pressure below 298 K is unknown (small)")
return 0
if temperature < self.meltingPoint:
return 10 ** (
5.006
+ 9.226
- 8572 / temperature
- 1.1926 * log(temperature) / log(10.0)
)
else:
raise ValueError(
"ERROR: Sr vapour pressure above %.0f C is unknown"
% self.meltingPoint
)
class Calcium40(DivalentAtom):
"""
Properties of Calcium 40 atoms
"""
#: eV Ref. [#ca4]_
ionisationEnergy = (
49305.91966
* 1e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
Z = 20
I = 0
#: eV Ref. [#ca2]_
scaledRydbergConstant = (
109735.81037
* 1e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
quantumDefect = [
[
[2.33793, -0.1142, 0.0, 0.0, 0.0, 0.0],
[1.885584, -0.3240, -23.8, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.09864, -1.29, 36, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[2.440956, 0.35, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.8833, -0.02, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[2.440956, 0.35, 0.0, 0.0, 0.0, 0.0],
[1.964709, 0.228, 0.0, 0.0, 0.0, 0.0],
[0.8859, 0.13, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[2.440956, 0.35, 0.0, 0.0, 0.0, 0.0],
[1.9549, 2.5, -1.60e2, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
""" Contains list of modified Rydberg-Ritz coefficients for calculating
quantum defects for
[[ :math:`^1S_{0},^1P_{1},^1D_{2},^1F_{3}`],
[ :math:`^3S_{1},^3P_{0},^3D_{1},^3F_{2}`],
[ :math:`^3S_{1},^3P_{1},^3D_{2},^3F_{3}`],
[ :math:`^3S_{1},^3P_{2},^3D_{3},^3F_{4}`]]."""
groundStateN = 4
extraLevels = [] #: TODO unkown if such exist at time of writing
#: Sources Refs. [#c1]_, [#c5]_, [#c9]_, [#ca1]_, [#ca5]_
levelDataFromNIST = "ca_level_data.csv"
precalculatedDB = "ca40_precalculated.db"
dipoleMatrixElementFile = "ca_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "ca_quadrupole_matrix_elements.npy"
literatureDMEfilename = "calcium_literature_dme.csv"
elementName = "Ca40"
meltingPoint = 842 + 273.15 #: in K
#: Ref. [#nist]_
mass = 39.962591 * physical_constants["atomic mass constant"][0]
#: Quantum defect principal quantum number fitting ranges for different
#: series
defectFittingRange = {
"1S0": [22, 55],
"3S1": [22, 55],
"1P1": [22, 55],
"3P2": [8, 18],
"3P1": [22, 55],
"3D2": [22, 55],
"3D1": [22, 55],
"1F3": [20, 150],
}
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Calculates pressure based on Ref. [#pr]_ (accuracy +- 5%).
"""
if temperature < 298:
print("WARNING: Ca vapour pressure below 298 K is unknown (small)")
return 0
if temperature < self.meltingPoint:
return 10 ** (
5.006
+ 10.127
- 9517 / temperature
- 1.4030 * log(temperature) / log(10.0)
)
else:
raise ValueError(
"ERROR: Ca vapour pressure above %.0f C is unknown"
% self.meltingPoint
)
class Ytterbium174(DivalentAtom):
"""
Properties of Ytterbium 174 atoms
"""
ionisationEnergy = (
50443.07041
* 1e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
#: eV Ref. [#yb3]_
Z = 70
I = 0
#: eV Ref. [#yb3]_
scaledRydbergConstant = (
109736.96959
* 1e2
* physical_constants["inverse meter-electron volt relationship"][0]
)
quantumDefect = [
[
[4.278367, -5.60943, -258.5, 0.0, 0.0, 0.0],
[3.953434, -10.58286, 728.100, 0.0, 0.0, 0.0],
[2.7130117, -0.929878, -636.4, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[2.7485996, 0.0137, -106.55, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
""" Contains list of modified Rydberg-Ritz coefficients for calculating
quantum defects for
[[ :math:`^1S_{0},^1P_{1},^1D_{2},^1F_{3}`],
[ :math:`^3S_{1},^3P_{0},^3D_{1},^3F_{2}`],
[ :math:`^3S_{1},^3P_{1},^3D_{2},^3F_{3}`],
[ :math:`^3S_{1},^3P_{2},^3D_{3},^3F_{4}`]]."""
groundStateN = 6
extraLevels = [] #: TODO unkown if such exist at time of writing
#: Sources Refs. [#yb1]_, [#yb2]_, [#yb3]_, [#MT78]_
levelDataFromNIST = "yb_level_data.csv"
precalculatedDB = "yb174_precalculated.db"
dipoleMatrixElementFile = "yb_dipole_matrix_elements.npy"
quadrupoleMatrixElementFile = "yb_quadrupole_matrix_elements.npy"
literatureDMEfilename = "ytterbium_literature_dme.csv"
elementName = "Yb174"
meltingPoint = 819 + 273.15 #: in K
#: Ref. [#nist]_
mass = 173.9388664 * physical_constants["atomic mass constant"][0]
#: Quantum defect principal quantum number fitting ranges for different
#: series
defectFittingRange = {
"1S0": [34, 80],
"1P1": [35, 54],
"1D2": [40, 80],
"3D2": [35, 80],
}
def getPressure(self, temperature):
"""
Pressure of atomic vapour at given temperature.
Calculates pressure based on Ref. [#pr]_ (accuracy +- 5%).
"""
if temperature < 298:
print("WARNING: Yb vapour pressure below 298 K is unknown (small)")
return 0
if temperature < 900:
return 10 ** (
5.006
+ 9.111
- 8111 / temperature
- 1.0849 * log(temperature) / log(10.0)
)
else:
raise ValueError("ERROR: Yb vapour pressure above 900 K is unknown") | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/divalent_atom_data.py | divalent_atom_data.py |
from __future__ import print_function
from .alkali_atom_functions import (
printStateString,
_EFieldCoupling,
printStateLetter,
printStateStringLatex,
formatNumberSI,
)
import datetime
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
from math import sqrt
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
import warnings
from scipy.constants import physical_constants, pi, epsilon_0, hbar
from scipy.constants import c as C_c
from scipy.constants import h as C_h
from scipy.constants import e as C_e
from scipy.constants import m_e as C_m_e
from scipy.optimize import curve_fit
from scipy import interpolate
# for matrices
from numpy.linalg import eigh
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from scipy.special import sph_harm
import sys
from arc._database import UsedModulesARC
from arc.divalent_atom_functions import DivalentAtom
from arc.wigner import Wigner6j, CG
if sys.version_info > (2,):
xrange = range
def Ylm(l, m, theta, phi):
return sph_harm(m, l, phi, theta)
__all__ = [
"Ylm",
"Wavefunction",
"StarkMap",
"LevelPlot",
"AtomSurfaceVdW",
"OpticalLattice1D",
"DynamicPolarizability",
"StarkBasisGenerator",
"ShirleyMethod",
"RWAStarkShift",
]
class Wavefunction:
r"""
Calculates and plots electron wavefunctions.
For an example see `wavefunction plotting example snippet`_.
.. _`wavefunction plotting example snippet`:
./ARC_3_0_introduction.html#Wavefunction-calculations-for-Alkali-atom-Rydberg-states
Args:
atom: atom type considered (for example :obj:`Rubidum87()`)
basisStates (array): array of states in fine basis that contribute\
to the state whose wavefunction is requested.
:math:`[[n_1, \ell_1, j_1, m_{j1}], ...]` For efficient
calculation **do not** pass all the possible basis states, but
just the once that have significant contribution to the
reqested state.
coefficients (array): array `[c1, ...]` of complex coefficents
:math:`c_i = \langle \psi_i |\psi\rangle` corresponding to
decomposition of required state :math:`|\psi\rangle` on basis
states :math:`|\psi_i \rangle` .
"""
def __init__(self, atom, basisStates, coefficients):
# n, l, j, mj
UsedModulesARC.arc3_0_methods = True
self.atom = atom
if (
len(basisStates) == 0
or len(basisStates[0]) != 4
or len(basisStates) != len(coefficients)
):
raise ValueError(
"basisStates should be defined as array of"
"states in fine basis [[n1, l1, j1, mj1], ... ]"
"contributing to the required states "
"(do not use unecessarily whole basis) "
"while coefficients corresponding to decomposition "
"of requested state on these basis state "
"should be given as"
"separete array [c1, ...]"
)
self.basisStates = basisStates
self.coef = coefficients
self.basisWavefunctions = []
for state in self.basisStates:
n = state[0]
l = state[1]
j = state[2]
# calculate radial wavefunction
step = 0.001
r, rWavefunc = atom.radialWavefunction(
l,
0.5,
j,
self.atom.getEnergy(n, l, j) / 27.211,
self.atom.alphaC ** (1 / 3.0),
2.0 * n * (n + 15.0),
step,
)
suma = np.trapz(rWavefunc**2, x=r)
rWavefunc = rWavefunc / (sqrt(suma))
self.basisWavefunctions.append(
interpolate.interp1d(
r, rWavefunc, bounds_error=False, fill_value=(0, 0)
)
)
def getRtimesPsiSpherical(self, theta, phi, r):
r"""
Calculates list of :math:`r \cdot \psi_{m_s} (\theta, \phi, r)`
At point defined by spherical coordinates, returns list of
:math:`r \cdot \psi_{m_s} (\theta, \phi, r)`
wavefunction values for different electron spin projection
values :math:`m_s`.
Coordinates are defined relative to atomic core.
Args:
theta (float): polar angle (angle between :math:`z` axis and
vector pointing towards selected point)
(in units of radians).
phi (float): azimuthal angle (angle between :math:`x` axis and
projection at :math:`x-y` plane of vector pointing towards
selected point) (in units of radians).
r (float): distance between coordinate origin and selected
point. (in atomic units of Bohr radius :math:`a_0`)
Returns:
list of complex values corresponding to
:math:`\psi_{m_s} (\theta, \phi, r)` for different
spin states :math:`m_s` contributing to the state in **decreasing**
order of :math:`m_s`. For example, for :obj:`arc.AlkaliAtom`
returns :math:`r \cdot \psi_{m_s=+1/2} (\theta, \phi, r)` and
:math:`r \cdot \psi_{m_s=-1/2} (\theta, \phi, r) `.
)`
"""
wfElectronP = 0 + 0j # electron spin +1/2
wfElectronM = 0 + 0j # electron spin -1/2
for i, state in enumerate(self.basisStates):
l = state[1]
j = state[2]
mj = state[3]
if abs(mj - 0.5) - 0.1 < l:
wfElectronP += (
CG(l, mj - 0.5, 0.5, +0.5, j, mj)
* Ylm(l, mj - 0.5, theta, phi)
* self.basisWavefunctions[i](r)
* self.coef[i]
)
if abs(mj + 0.5) - 0.1 < l:
wfElectronM += (
CG(l, mj + 0.5, 0.5, -0.5, j, mj)
* Ylm(l, mj + 0.5, theta, phi)
* self.basisWavefunctions[i](r)
* self.coef[i]
)
return wfElectronP, wfElectronM
def getRtimesPsi(self, x, y, z):
r"""
Calculates list of :math:`r \cdot \psi_{m_s} (x, y, z)`
At a point defined by Cartesian coordinates returns list of
:math:`r \cdot \psi_{m_s} (x, y, z)`
wavefunction values for different
electron spin projection values :math:`m_s`.
Args:
x (float): Cartesian coordinates of selected point,
relative to the atom core.
(in atomic units of Bohr radius :math:`a_0`)
y (float): Cartesian coordinates of selected point,
relative to the atom core.
(in atomic units of Bohr radius :math:`a_0`)
z (float): Cartesian coordinates of selected point,
relative to the atom core.
(in atomic units of Bohr radius :math:`a_0`)
Returns:
list of complex values corresponding to
:math:`r \cdot \psi_{m_s} (\theta, \phi, r)` for different
spin states :math:`m_s` contributing to the state in
**decreasing** order of :math:`m_s`.
For example, for :obj:`arc.AlkaliAtom`
returns :math:`r \cdot \psi_{m_s=+1/2} (\theta, \phi, r)` and
:math:`r \cdot \psi_{m_s=-1/2} (\theta, \phi, r)` .
)`, where :math:`r=\sqrt{x^2+y^2+z^2}`.
"""
theta = np.arctan2((x**2 + y**2) ** 0.5, z)
phi = np.arctan2(y, x)
r = np.sqrt(x**2 + y**2 + z**2)
return self.getRtimesPsiSpherical(theta, phi, r)
def getPsi(self, x, y, z):
r"""
Calculates list of :math:`\psi_{m_s} (x,y,z)`
At point define by Cartesian coordinates returns list of
:math:`\psi_{m_s} (x,y,z)` wavefunction values corresponding
to different electron spin projection values :math:`m_s`.
Args:
x (float): Cartesian coordinates of selected point,
relative to the atom core.
(in atomic units of Bohr radius :math:`a_0`)
y (float): Cartesian coordinates of selected point,
relative to the atom core.
(in atomic units of Bohr radius :math:`a_0`)
z (float): Cartesian coordinates of selected point,
relative to the atom core.
(in atomic units of Bohr radius :math:`a_0`)
Returns:
list of complex values corresponding to
:math:`\psi_{m_s} (\theta, \phi, r)` for different
spin states :math:`m_s` contributing to the state in
**decreasing** order of :math:`m_s`.
For example, for :obj:`arc.AlkaliAtom`
returns :math:`\psi_{m_s=+1/2} (\theta, \phi, r)` and
:math:`\psi_{m_s=-1/2} (\theta, \phi, r)` .
)`.
"""
r = np.sqrt(x * x + y * y + z * z)
return self.getRtimesPsi(x, y, z) / r
def getRtimesPsiSquaredInPlane(
self, plane="x-z", pointsPerAxis=150, axisLength=None, units="atomic"
):
r"""
Calculates :math:`|r \cdot \psi|^2` on a mesh in a given plane.
Args:
plane (str): optiona, set's calculation plane to `'x-y'` or
`'x-z'`. Default value `'x-y'`
pointsPerAxis (int): optional, a number of mesh points per
Carthesian axis. Default value of 150, gives a mesh with total
size of :math:`150 \times 150 = 22500` points.
axisLength (float): optional, length of the square in the selected
plane on which wavefunction will be calculated. By default it
is largw enough to fit the whole wavefunction
(in atomic units of Bohr radius :math:`a_0`).
units (str): optional, units of length in which calculated mesh
will be **returned** (note that `axisLength` is on the other
hand always in atomi units.). Supported values are
`'atomic'` or `'nm'`. Default value `'atomic'` .
Returns:
meshCoordinate1, meshCoordinate2 and
:math:`|r \cdot \psi|^2 = \sum_{m_s} |r \cdot \psi_{m_s}|^2`,
where sum is over possible electron spin projection values
:math:`m_s`.
"""
if axisLength is None:
nMax = 1
for state in self.basisStates:
nMax = max(nMax, state[0])
axisLength = 2.0 * 2.0 * nMax * (nMax + 15.0)
coord1 = np.linspace(-axisLength / 2.0, axisLength / 2.0, pointsPerAxis)
coord2 = np.linspace(-axisLength / 2.0, axisLength / 2.0, pointsPerAxis)
meshCoord1, meshCoord2 = np.meshgrid(coord1, coord2)
coord = []
if plane == "x-z":
coord = [meshCoord1, 0, meshCoord2]
elif plane == "x-y":
coord = [meshCoord1, meshCoord2, 0]
else:
raise ValueError("Only 'x-y' and 'x-z' planes are supported.")
wfP, wfM = self.getRtimesPsi(*coord)
# change units
if units == "nm":
scale = physical_constants["Bohr radius"][0] * 1e9
meshCoord1 *= scale
meshCoord2 *= scale
wfP /= scale
wfM /= scale
elif units == "atomic":
pass
else:
raise ValueError(
"Only 'atomic' (a_0) and 'nm' are recognised"
"as possible units. Received: %s" % units
)
f = np.power(np.abs(wfP), 2) + np.power(np.abs(wfM), 2)
return meshCoord1, meshCoord2, f
def plot2D(
self,
plane="x-z",
pointsPerAxis=150,
axisLength=None,
units="atomic",
colorbar=True,
labels=True,
):
r"""
2D colour plot of :math:`|r \cdot \psi|^2` wavefunction in a
requested plane.
Args:
plane (str): optiona, set's calculation plane to `'x-y'` or `'x-z'`.
Default value `'x-y'`
pointsPerAxis (int): optional, a number of mesh points per Carthesian
axis. Default value of 150, gives a mesh with total size of
:math:`150 \times 150 = 22500` points.
axisLength (float): optional, length of the square in the selected
plane on which wavefunction will be calculated. By default it
is large enough to fit the whole wavefunction
(in atomic units of Bohr radius :math:`a_0`).
units (str): optional, units of length in which calculated mesh
will be **returned** (note that `axisLength` is on the other
hand always in atomi units.). Supported values are
`'atomic'` or `'nm'`. Default value `'atomic'` .
colorbar (bool): optional, determens if the colour bar scale of
should be shown. Default value is `True`.
labels (bool): optional, determines if the labels on the axis
of the plot should be shown. Default value is `True`.
Returns:
:obj:`matplotlib.pyplot.figure` object with a requested plot. Use `show()`
method to see figure.
"""
x, y, f = self.getRtimesPsiSquaredInPlane(
plane=plane,
pointsPerAxis=pointsPerAxis,
axisLength=axisLength,
units=units,
)
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(1, 1, 1)
cp = ax.pcolor(x, y, f, vmin=0, vmax=f.max(), cmap="viridis")
if labels:
if units == "atomic":
unitLabel = r"$a_0$"
else:
unitLabel = "nm"
if plane == "x-y":
plt.xlabel(r"$x$ (%s)" % unitLabel)
plt.ylabel(r"$y$ (%s)" % unitLabel)
elif plane == "x-z":
plt.xlabel(r"$x$ (%s)" % unitLabel)
plt.ylabel(r"$z$ (%s)" % unitLabel)
else:
raise ValueError(
"Only 'atomic' (a_0) and 'nm' are recognised"
"as possible units. Received: %s" % units
)
ax.set_aspect("equal", "box")
if colorbar:
cb = fig.colorbar(cp)
cb.set_label(
r"$|r\cdot\psi(x,y,z)|^2$"
) # NOTE: change label if plotting Imaginart part!
return fig
# return figure
def plot3D(
self,
plane="x-z",
pointsPerAxis=150,
axisLength=None,
units="atomic",
labels=True,
):
r"""
3D colour surface plot of :math:`|r \cdot \psi|^2` wavefunction in a
requested plane.
Args:
plane (str): optiona, set's calculation plane to `'x-y'` or `'x-z'`.
Default value `'x-y'`
pointsPerAxis (int): optional, a number of mesh points per Carthesian
axis. Default value of 150, gives a mesh with total size of
:math:`150 \times 150 = 22500` points.
axisLength (float): optional, length of the square in the selected
plane on which wavefunction will be calculated. By default it
is large enough to fit the whole wavefunction
(in atomic units of Bohr radius :math:`a_0`).
units (str): optional, units of length in which calculated mesh
will be **returned** (note that `axisLength` is on the other
hand always in atomi units.). Supported values are
`'atomic'` or `'nm'`. Default value `'atomic'` .
labels (bool): optional, determines if the labels on the axis
of the plot should be shown. Default value is `True`.
Returns:
:obj:`matplotlib.pyplot.figure` object with a requested plot. Use `show()`
method to see figure.
"""
x, y, f = self.getRtimesPsiSquaredInPlane(
plane=plane,
pointsPerAxis=pointsPerAxis,
axisLength=axisLength,
units=units,
)
fig = plt.figure(figsize=(6, 4))
ax = fig.gca(projection="3d")
ax.view_init(40, -35)
# Plot the surface.
ax.plot_surface(
x,
y,
f,
cmap="Reds",
vmin=0,
vmax=f.max(),
linewidth=0,
antialiased=False,
rstride=1,
cstride=1,
)
ax.plot_wireframe(
x, y, f, rstride=10, cstride=10, alpha=0.05, color="k"
)
if labels:
if units == "atomic":
unitLabel = r"$a_0$"
else:
unitLabel = "nm"
if plane == "x-y":
plt.xlabel(r"$x$ (%s)" % unitLabel)
plt.ylabel(r"$y$ (%s)" % unitLabel)
elif plane == "x-z":
plt.xlabel(r"$x$ (%s)" % unitLabel)
plt.ylabel(r"$z$ (%s)" % unitLabel)
else:
raise ValueError(
"Only 'atomic' (a_0) and 'nm' are recognised"
"as possible units. Received: %s" % units
)
plt.xlim(x.min(), x.max())
plt.ylim(y.min(), y.max())
return fig
class StarkMap:
"""
Calculates Stark maps for single atom in a field
This initializes calculation for the atom of a given type. For details
of calculation see Zimmerman [1]_. For a quick working example
see `Stark map example snippet`_.
Args:
atom (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`): ={
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Select the alkali metal for energy level
diagram calculation
Examples:
State :math:`28~S_{1/2}~|m_j|=0.5` polarizability calculation
>>> from arc import *
>>> calc = StarkMap(Caesium())
>>> calc.defineBasis(28, 0, 0.5, 0.5, 23, 32, 20)
>>> calc.diagonalise(np.linspace(00.,6000,600))
>>> print("%.5f MHz cm^2 / V^2 " % calc.getPolarizability())
0.76705 MHz cm^2 / V^2
Stark map calculation
>>> from arc import *
>>> calc = StarkMap(Caesium())
>>> calc.defineBasis(28, 0, 0.5, 0.5, 23, 32, 20)
>>> calc.diagonalise(np.linspace(00.,60000,600))
>>> calc.plotLevelDiagram()
>>> calc.showPlot()
<< matplotlib plot will open containing a Stark map >>
Examples:
**Advanced interfacing of Stark map calculations (StarkMap class)**
Here we show one easy way to obtain the Stark matrix (from diagonal
:obj:`mat1` and off-diagonal part :obj:`mat2` ) and basis states
(stored in :obj:`basisStates` ), if this middle-product of the
calculation is needed for some code build on top of the existing
ARC package.
>>> from arc import *
>>> calc = StarkMap(Caesium())
>>> calc.defineBasis(28, 0, 0.5, 0.5, 23, 32, 20)
>>> # Now we have matrix and basis states, that we can used in our own code
>>> # Let's say we want Stark map at electric field of 0.2 V/m
>>> eField = 0.2 # V/m
>>> # We can easily extract Stark matrix
>>> # as diagonal matrix (state detunings)
>>> # + off-diagonal matrix (propotional to electric field)
>>> matrix = calc.mat1+calc.mat2*eField
>>> # and the basis states as array [ [n,l,j,mj] , ...]
>>> basisStates = calc.basisStates
>>> # you can do your own calculation now...
References:
.. [1] M. L. Zimmerman et.al, PRA **20**:2251 (1979)
https://doi.org/10.1103/PhysRevA.20.2251
.. _`Stark map example snippet`:
./Rydberg_atoms_a_primer_notebook.html#Rydberg-Atom-Stark-Shifts
"""
def __init__(self, atom):
self.atom = atom
self.basisStates = []
"""
List of basis states for calculation in the form [ [n,l,j,mj], ...].
Calculated by :obj:`defineBasis` .
"""
self.mat1 = []
"""
diagonal elements of Stark-matrix (detuning of states) calculated by
:obj:`defineBasis` in the basis :obj:`basisStates`.
"""
self.mat2 = []
"""
off-diagonal elements of Stark-matrix divided by electric
field value. To get off diagonal elemements multiply this matrix
with electric field value. Full Stark matrix is obtained as
`fullStarkMatrix` = :obj:`mat1` + :obj:`mat2` *`eField`. Calculated by
:obj:`defineBasis` in the basis :obj:`basisStates`.
"""
self.indexOfCoupledState = []
"""
Index of coupled state (initial state passed to :obj:`defineBasis`)
in :obj:`basisStates` list of basis states
"""
# finding energy levels
self.eFieldList = []
"""
Saves electric field (in units of V/m) for which energy levels are calculated
See also:
:obj:`y`, :obj:`highlight`, :obj:`diagonalise`
"""
self.y = [] # eigenValues
"""
`y[i]` is an array of eigenValues corresponding to the energies of the
atom states at the electric field `eFieldList[i]`. For example `y[i][j]` is
energy of the `j` eigenvalue (energy of the state) measured in
cm :math:`{}^{-1}` relative to the ionization threshold.
See also:
:obj:`eFieldList`, :obj:`highlight`, :obj:`diagonalise`
"""
self.highlight = (
[]
) # contribution of initial state there (overlap |<original state | given state>|^2)
"""
`highlight[i]` is an array of values measuring highlighted feature in the
eigenstates at electric field intensity `eFieldList[i]`. E.g. `highlight[i][j]`
measures highlighted feature of the state with energy `y[i][j]` at electric
field `eFieldList[i]`. What will be highlighted feature is defined in the
call of :obj:`diagonalise` (see that part of documentation for details).
See also:
:obj:`eFieldList`, :obj:`y`, :obj:`diagonalise`
"""
#: pointer towards matplotlib figure after :obj:`plotLevelDiagram`
#: is called to create figure
self.fig = 0
#: pointer towards matplotlib figure axis after :obj:`plotLevelDiagram`
#: is called to create figure
self.ax = 0
# values used for fitting polarizability, and fit
self.fitX = []
self.fitY = []
self.fittedCurveY = []
self.drivingFromState = [0, 0, 0, 0, 0]
self.maxCoupling = 0.0
# STARK memoization
self.eFieldCouplingSaved = False
#: spin manifold in which we are working
#: default value of 0.5 is correct for Alkaline Atoms. Otherwise it has
#: to be specified when calling `defineBasis` as `s=0` or `s=1` for
#: singlet and triplet states respectively
self.s = 0.5
def _eFieldCouplingDivE(self, n1, l1, j1, mj1, n2, l2, j2, mj2, s=0.5):
# eFied coupling devided with E (witout actuall multiplication to getE)
# delta(mj1,mj2') delta(l1,l2+-1)
if (abs(mj1 - mj2) > 0.1) or (abs(l1 - l2) != 1):
return 0
# matrix element
result = (
self.atom.getRadialMatrixElement(n1, l1, j1, n2, l2, j2, s=s)
* physical_constants["Bohr radius"][0]
* C_e
)
sumPart = self.eFieldCouplingSaved.getAngular(
l1, j1, mj1, l2, j2, mj2, s=s
)
return result * sumPart
def _eFieldCoupling(self, n1, l1, j1, mj1, n2, l2, j2, mj2, eField, s=0.5):
return (
self._eFieldCouplingDivE(n1, l1, j1, mj1, n2, l2, j2, mj2, s=s)
* eField
)
def defineBasis(
self,
n,
l,
j,
mj,
nMin,
nMax,
maxL,
Bz=0,
progressOutput=False,
debugOutput=False,
s=0.5,
):
"""
Initializes basis of states around state of interest
Defines basis of states for further calculation. :math:`n,l,j,m_j`
specify state whose neighbourhood and polarizability we want
to explore. Other parameters specify basis of calculations.
This method stores basis in :obj:`basisStates`, while corresponding
interaction matrix is stored in two parts. First part is diagonal
electric-field independent part stored in :obj:`mat1`, while the
second part :obj:`mat2` corresponds to off-diagonal elements that are
propotional to electric field. Overall interaction matrix for
electric field `eField` can be then obtained as
`fullStarkMatrix` = :obj:`mat1` + :obj:`mat2` *`eField`
Args:
n (int): principal quantum number of the state
l (int): angular orbital momentum of the state
j (flaot): total angular momentum of the state
mj (float): projection of total angular momentum of the state
nMin (int): *minimal* principal quantum number of the states to
be included in the basis for calculation
nMax (int): *maximal* principal quantum number of the states to
be included in the basis for calculation
maxL (int): *maximal* value of orbital angular momentum for the
states to be included in the basis for calculation
Bz (float): optional, magnetic field directed along z-axis in
units of Tesla. Calculation will be correct only for weak
magnetic fields, where paramagnetic term is much stronger
then diamagnetic term. Diamagnetic term is neglected.
progressOutput (:obj:`bool`, optional): if True prints the
progress of calculation; Set to false by default.
debugOutput (:obj:`bool`, optional): if True prints additional
information usefull for debuging. Set to false by default.
s (float): optional. Total spin angular momentum for the state.
Default value of 0.5 is correct for Alkaline Atoms, but
value **has to** be specified explicitly for divalent atoms
(e.g. `s=0` or `s=1` for singlet and triplet states,
that have total spin angular momenutum equal to 0 or 1
respectively).
"""
global wignerPrecal
wignerPrecal = True
self.eFieldCouplingSaved = _EFieldCoupling()
states = []
# save calculation details START
self.n = n
self.l = l
self.j = j
self.mj = mj
self.nMin = nMin
self.nMax = nMax
self.maxL = maxL
self.Bz = Bz
self.s = s
# save calculation details END
for tn in xrange(nMin, nMax):
for tl in xrange(min(maxL + 1, tn)):
for tj in np.linspace(tl - s, tl + s, round(2 * s + 1)):
if (abs(mj) - 0.1 <= tj) and (
tn >= self.atom.groundStateN
or [tn, tl, tj] in self.atom.extraLevels
):
states.append([tn, tl, tj, mj])
dimension = len(states)
if progressOutput:
print("Found ", dimension, " states.")
if debugOutput:
print(states)
indexOfCoupledState = 0
index = 0
for st in states:
if (
(st[0] == n)
and (abs(st[1] - l) < 0.1)
and (abs(st[2] - j) < 0.1)
and (abs(st[3] - mj) < 0.1)
):
indexOfCoupledState = index
index += 1
if debugOutput:
print("Index of initial state")
print(indexOfCoupledState)
print("Initial state = ")
print(states[indexOfCoupledState])
self.mat1 = np.zeros((dimension, dimension), dtype=np.double)
self.mat2 = np.zeros((dimension, dimension), dtype=np.double)
self.basisStates = states
self.indexOfCoupledState = indexOfCoupledState
if progressOutput:
print("Generating matrix...")
progress = 0.0
for ii in xrange(dimension):
if progressOutput:
progress += (dimension - ii) * 2 - 1
sys.stdout.write(
"\r%d%%" % (float(progress) / float(dimension**2) * 100)
)
sys.stdout.flush()
# add diagonal element
self.mat1[ii][ii] = (
self.atom.getEnergy(
states[ii][0], states[ii][1], states[ii][2], s=self.s
)
* C_e
/ C_h
* 1e-9
+ self.atom.getZeemanEnergyShift(
states[ii][1],
states[ii][2],
states[ii][3],
self.Bz,
s=self.s,
)
/ C_h
* 1.0e-9
)
# add off-diagonal element
for jj in xrange(ii + 1, dimension):
coupling = (
self._eFieldCouplingDivE(
states[ii][0],
states[ii][1],
states[ii][2],
mj,
states[jj][0],
states[jj][1],
states[jj][2],
mj,
s=self.s,
)
* 1.0e-9
/ C_h
)
self.mat2[jj][ii] = coupling
self.mat2[ii][jj] = coupling
if progressOutput:
print("\n")
if debugOutput:
print(self.mat1 + self.mat2)
print(self.mat2[0])
self.atom.updateDipoleMatrixElementsFile()
self.eFieldCouplingSaved._closeDatabase()
self.eFieldCouplingSaved = False
return 0
def diagonalise(
self,
eFieldList,
drivingFromState=[0, 0, 0, 0, 0],
progressOutput=False,
debugOutput=False,
upTo=4,
totalContributionMax=0.95,
):
"""
Finds atom eigenstates in a given electric field
Eigenstates are calculated for a list of given electric fields. To
extract polarizability of the originaly stated state see
:obj:`getPolarizability` method. Results are saved in
:obj:`eFieldList`, :obj:`y` and :obj:`highlight`.
Args:
eFieldList (array): array of electric field strength (in V/m)
for which we want to know energy eigenstates
progressOutput (:obj:`bool`, optional): if True prints the
progress of calculation; Set to false by default.
debugOutput (:obj:`bool`, optional): if True prints additional
information usefull for debuging. Set to false by default.
upTo ('int', optional): Number of top contributing bases states
to be saved into composition attribute; Set to 4 by default.
To keep all contributing states, set upTo = -1.
totalContributionMax ('float', optional): Ceiling for
contribution to the wavefunction from basis states included
in composition attribute. Composition will contain a list
of [coefficient, state index] pairs for top contributing
unperturbed basis states until the number of states reaches
upTo or their total contribution reaches totalContributionMax,
whichever comes first. totalContributionMax is ignored if
upTo = -1.
"""
# if we are driving from some state
# ========= FIND LASER COUPLINGS (START) =======
coupling = []
dimension = len(self.basisStates)
self.maxCoupling = 0.0
self.drivingFromState = drivingFromState
if self.drivingFromState[0] != 0:
if progressOutput:
print("Finding driving field coupling...")
# get first what was the state we are calculating coupling with
state1 = drivingFromState
n1 = round(state1[0])
l1 = round(state1[1])
j1 = state1[2]
m1 = state1[3]
q = state1[4]
for i in xrange(dimension):
thisCoupling = 0.0
if progressOutput:
sys.stdout.write(
"\r%d%%" % (i / float(dimension - 1) * 100.0)
)
sys.stdout.flush()
if (
(round(abs(self.basisStates[i][1] - l1)) == 1)
and (round(abs(self.basisStates[i][2] - j1)) <= 1)
and (round(abs(self.basisStates[i][3] - m1 - q)) == 0)
):
state2 = self.basisStates[i]
n2 = round(state2[0])
l2 = round(state2[1])
j2 = state2[2]
m2 = state2[3]
if debugOutput:
print(
n1,
" ",
l1,
" ",
j1,
" ",
m1,
" < - ",
q,
" - >",
n2,
" ",
l2,
" ",
j2,
" ",
m2,
"\n",
)
dme = self.atom.getDipoleMatrixElement(
n1, l1, j1, m1, n2, l2, j2, m2, q, s=self.s
)
thisCoupling += dme
thisCoupling = abs(thisCoupling) ** 2
if thisCoupling > self.maxCoupling:
self.maxCoupling = thisCoupling
if (thisCoupling > 0.00000001) and debugOutput:
print("coupling = ", thisCoupling)
coupling.append(thisCoupling)
if progressOutput:
print("\n")
if self.maxCoupling < 0.00000001:
raise Exception(
"State that you specified in drivingFromState, for a "
+ "given laser polarization, is uncoupled from the specified Stark "
+ "manifold. If you just want to see the specified Stark manifold "
+ "remove driveFromState optional argument from call of function "
+ "diagonalise. Or specify state and driving that is coupled "
+ "to a given manifold to see coupling strengths."
)
# ========= FIND LASER COUPLINGS (END) =======
indexOfCoupledState = self.indexOfCoupledState
self.eFieldList = eFieldList
self.y = []
self.highlight = []
self.composition = []
if progressOutput:
print("Finding eigenvectors...")
progress = 0.0
for eField in eFieldList:
if progressOutput:
progress += 1.0
sys.stdout.write(
"\r%d%%" % (float(progress) / float(len(eFieldList)) * 100)
)
sys.stdout.flush()
m = self.mat1 + self.mat2 * eField
ev, egvector = eigh(m)
self.y.append(ev)
if drivingFromState[0] < 0.1:
sh = []
comp = []
for i in xrange(len(ev)):
sh.append(abs(egvector[indexOfCoupledState, i]) ** 2)
comp.append(
self._stateComposition2(
egvector[:, i],
upTo=upTo,
totalContributionMax=totalContributionMax,
)
)
self.highlight.append(sh)
self.composition.append(comp)
else:
sh = []
comp = []
for i in xrange(len(ev)):
sumCoupledStates = 0.0
for j in xrange(dimension):
sumCoupledStates += abs(
coupling[j] / self.maxCoupling
) * abs(egvector[j, i] ** 2)
comp.append(
self._stateComposition2(
egvector[:, i],
upTo=upTo,
totalContributionMax=totalContributionMax,
)
)
sh.append(sumCoupledStates)
self.highlight.append(sh)
self.composition.append(comp)
if progressOutput:
print("\n")
return
def exportData(self, fileBase, exportFormat="csv"):
"""
Exports StarkMap calculation data.
Only supported format (selected by default) is .csv in a
human-readable form with a header that saves details of calculation.
Function saves three files: 1) `filebase` _eField.csv;
2) `filebase` _energyLevels
3) `filebase` _highlight
For more details on the format, see header of the saved files.
Args:
filebase (string): filebase for the names of the saved files
without format extension. Add as a prefix a directory path
if necessary (e.g. saving outside the current working directory)
exportFormat (string): optional. Format of the exported file. Currently
only .csv is supported but this can be extended in the future.
"""
fmt = "on %Y-%m-%d @ %H:%M:%S"
ts = datetime.datetime.now().strftime(fmt)
commonHeader = "Export from Alkali Rydberg Calculator (ARC) %s.\n" % ts
commonHeader += "\n *** Stark Map for %s %s m_j = %d/2. ***\n\n" % (
self.atom.elementName,
printStateString(self.n, self.l, self.j),
round(2.0 * self.mj),
)
commonHeader += (
" - Included states - principal quantum number (n) range [%d-%d].\n"
% (self.nMin, self.nMax)
)
commonHeader += (
" - Included states with orbital momentum (l) in range [%d,%d] (i.e. %s-%s).\n"
% (0, self.maxL, printStateLetter(0), printStateLetter(self.maxL))
)
commonHeader += (
" - Calculated in manifold where total spin angular momentum is s = %.1d\n"
% (self.s)
)
if self.drivingFromState[0] < 0.1:
commonHeader += (
" - State highlighting based on the relative contribution \n"
+ " of the original state in the eigenstates obtained by diagonalization."
)
else:
commonHeader += (
" - State highlighting based on the relative driving strength \n"
+ " to a given energy eigenstate (energy level) from state\n"
+ " %s m_j =%d/2 with polarization q=%d.\n"
% (
printStateString(*self.drivingFromState[0:3]),
round(2.0 * self.drivingFromState[3]),
self.drivingFromState[4],
)
)
if exportFormat == "csv":
print("Exporting StarkMap calculation results as .csv ...")
commonHeader += " - Export consists of three (3) files:\n"
commonHeader += " 1) %s,\n" % (
fileBase + "_eField." + exportFormat
)
commonHeader += " 2) %s,\n" % (
fileBase + "_energyLevels." + exportFormat
)
commonHeader += " 3) %s.\n\n" % (
fileBase + "_highlight." + exportFormat
)
filename = fileBase + "_eField." + exportFormat
np.savetxt(
filename,
self.eFieldList,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(commonHeader + " - - - eField (V/m) - - -"),
comments="# ",
)
print(" Electric field values (V/m) saved in %s" % filename)
filename = fileBase + "_energyLevels." + exportFormat
headerDetails = " NOTE : Each row corresponds to eigenstates for a single specified electric field"
np.savetxt(
filename,
self.y,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader + " - - - Energy (GHz) - - -\n" + headerDetails
),
comments="# ",
)
print(
" Lists of energies (in GHz relative to ionisation) saved in %s"
% filename
)
filename = fileBase + "_highlight." + exportFormat
np.savetxt(
filename,
self.highlight,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader
+ " - - - Highlight value (rel.units) - - -\n"
+ headerDetails
),
comments="# ",
)
print(" Highlight values saved in %s" % filename)
print("... data export finished!")
else:
raise ValueError("Unsupported export format (.%s)." % format)
def plotLevelDiagram(
self,
units="cm",
highlightState=True,
progressOutput=False,
debugOutput=False,
highlightColour="red",
addToExistingPlot=False,
):
r"""
Makes a plot of a stark map of energy levels
To save this plot, see :obj:`savePlot`. To print this plot see
:obj:`showPlot`. Pointers (handles) towards matplotlib figure
and axis used are saved in :obj:`fig` and :obj:`ax` variables
respectively.
Args:
units (:obj:`char`,optional): possible values {'*cm*','GHz','eV'};
[case insensitive] if the string contains 'cm' (default) Stark
diagram will be plotted in energy units cm :math:`{}^{-1}`; if
value is 'GHz', Stark diagram will be plotted as energy
:math:`/h` in units of GHz; if the value is 'eV', Stark diagram
will be plotted as energy in units eV.
highlightState (:obj:`bool`, optional): False by default. If
True, scatter plot colour map will map in red amount of
original state for the given eigenState
progressOutput (:obj:`bool`, optional): if True prints the
progress of calculation; Set to False by default.
debugOutput (:obj:`bool`, optional): if True prints additional
information usefull for debuging. Set to False by default.
addToExistingPlot (:obj:`bool`, optional): if True adds points to
existing old plot. Note that then interactive plotting
doesn't work. False by default.
"""
rvb = LinearSegmentedColormap.from_list(
"mymap", ["0.9", highlightColour, "black"]
)
# for back-compatibilirt with versions <= 3.0.11
# where units were chosen as integer 1 or 2
if not isinstance(units, str):
units = ["ev", "ghz", "cm"][units - 1]
if units.lower() == "ev":
self.units = "eV"
self.scaleFactor = 1e9 * C_h / C_e
Elabel = ""
elif units.lower() == "ghz":
self.units = "GHz"
self.scaleFactor = 1
Elabel = "/h"
elif "cm" in units.lower():
self.units = "cm$^{-1}$"
self.scaleFactor = 1e9 / (C_c * 100)
Elabel = "/(h c)"
self.addToExistingPlot = addToExistingPlot
if progressOutput:
print("plotting...")
originalState = self.basisStates[self.indexOfCoupledState]
n = originalState[0]
l = originalState[1]
j = originalState[2]
existingPlot = False
if self.fig == 0 or not addToExistingPlot:
if self.fig != 0:
plt.close()
self.fig, self.ax = plt.subplots(1, 1, figsize=(11.0, 5))
else:
existingPlot = True
eFieldList = []
y = []
yState = []
for br in xrange(len(self.y)):
for i in xrange(len(self.y[br])):
eFieldList.append(self.eFieldList[br])
y.append(self.y[br][i])
yState.append(self.highlight[br][i])
yState = np.array(yState)
sortOrder = yState.argsort(kind="heapsort")
eFieldList = np.array(eFieldList)
y = np.array(y)
eFieldList = eFieldList[sortOrder]
y = y[sortOrder]
yState = yState[sortOrder]
if not highlightState:
self.ax.scatter(
eFieldList / 100.0,
y * self.scaleFactor,
s=1,
color="k",
picker=5,
)
else:
cm = rvb
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
self.ax.scatter(
eFieldList / 100,
y * self.scaleFactor,
c=yState,
s=5,
norm=cNorm,
cmap=cm,
lw=0,
picker=5,
)
if not existingPlot:
cax = self.fig.add_axes([0.91, 0.1, 0.02, 0.8])
cb = matplotlib.colorbar.ColorbarBase(cax, cmap=cm, norm=cNorm)
if self.drivingFromState[0] < 0.1:
cb.set_label(
r"$|\langle %s | \mu \rangle |^2$"
% printStateStringLatex(n, l, j, s=self.s)
)
else:
cb.set_label(r"$( \Omega_\mu | \Omega )^2$")
self.ax.set_xlabel("Electric field (V/cm)")
eV2GHz = C_e / C_h * 1e-9
halfY = 300
# GHz, half Y range
upperY = (
self.atom.getEnergy(n, l, j, s=self.s) * eV2GHz + halfY
) * self.scaleFactor
lowerY = (
self.atom.getEnergy(n, l, j, s=self.s) * eV2GHz - halfY
) * self.scaleFactor
self.ax.set_ylabel(r"State energy, $E%s$ (%s)" % (Elabel, self.units))
self.ax.set_ylim(lowerY, upperY)
##
self.ax.set_xlim(min(eFieldList) / 100.0, max(eFieldList) / 100.0)
return 0
def savePlot(self, filename="StarkMap.pdf"):
"""
Saves plot made by :obj:`plotLevelDiagram`
Args:
filename (:obj:`str`, optional): file location where the plot
should be saved
"""
if self.fig != 0:
self.fig.savefig(filename, bbox_inches="tight")
else:
print("Error while saving a plot: nothing is plotted yet")
return 0
def showPlot(self, interactive=True):
"""
Shows plot made by :obj:`plotLevelDiagram`
"""
if self.fig != 0:
if interactive:
if self.addToExistingPlot:
print(
"NOTE: Interactive plotting doesn't work with"
" addToExistingPlot option set to True"
"\nPlease turn off this option in plotLevelDiagram.\n"
)
else:
self.ax.set_title("Click on state to see state composition")
self.clickedPoint = 0
self.fig.canvas.draw()
self.fig.canvas.mpl_connect("pick_event", self._onPick)
plt.show()
else:
print("Error while showing a plot: nothing is plotted yet")
return 0
def _onPick(self, event):
if isinstance(event.artist, matplotlib.collections.PathCollection):
scaleFactor = self.scaleFactor
x = event.mouseevent.xdata * 100.0
y = event.mouseevent.ydata / scaleFactor
i = np.searchsorted(self.eFieldList, x)
if i == len(self.eFieldList):
i -= 1
if (i > 0) and (
abs(self.eFieldList[i - 1] - x) < abs(self.eFieldList[i] - x)
):
i -= 1
j = 0
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < abs(self.y[i][j] - y):
j = jj
# now choose the most higlighted state in this area
distance = abs(self.y[i][j] - y) * 1.5
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < distance and (
abs(self.highlight[i][jj]) > abs(self.highlight[i][j])
):
j = jj
if self.clickedPoint != 0:
self.clickedPoint.remove()
(self.clickedPoint,) = self.ax.plot(
[self.eFieldList[i] / 100.0],
[self.y[i][j] * scaleFactor],
"bs",
linewidth=0,
zorder=3,
)
self.ax.set_title(
("[%s] = " % self.atom.elementName)
+ self._stateComposition(self.composition[i][j])
+ (" Colourbar value = %.2f" % self.highlight[i][j]),
fontsize=11,
)
event.canvas.draw()
def _stateComposition(self, stateVector):
i = 0
totalContribution = 0
value = "$"
while (i < len(stateVector)) and (totalContribution < 0.95):
if i != 0 and stateVector[i][0] > 0:
value += "+"
value = (
value
+ ("%.2f" % stateVector[i][0])
+ self._addState(*self.basisStates[stateVector[i][1]])
)
totalContribution += abs(stateVector[i][0]) ** 2
i += 1
if totalContribution < 0.999:
value += "+\\ldots"
return value + "$"
def _stateComposition2(
self, stateVector, upTo=300, totalContributionMax=0.999
):
contribution = np.absolute(stateVector)
order = np.argsort(contribution, kind="heapsort")
index = -1
totalContribution = 0
mainStates = [] # [state Value, state index]
if upTo == -1:
for index in range(len(order)):
i = order[-index - 1]
mainStates.append([stateVector[i], i])
else:
while (index > -upTo) and (
totalContribution < totalContributionMax
):
i = order[index]
mainStates.append([stateVector[i], i])
totalContribution += contribution[i] ** 2
index -= 1
return mainStates
def _addState(self, n1, l1, j1, mj1):
if abs(self.s - 0.5) < 0.1:
# we have Alkali Atoms
return "|%s m_j=%d/2\\rangle" % (
printStateStringLatex(n1, l1, j1),
round(2 * mj1),
)
else:
# we have singlets or triplets states of divalent atoms
return "|%s m_j=%d\\rangle" % (
printStateStringLatex(n1, l1, j1, s=self.s),
round(mj1),
)
def getPolarizability(
self,
maxField=1.0e10,
showPlot=False,
debugOutput=False,
minStateContribution=0.0,
):
r"""
Returns the polarizability of the state (set during the
initalization process).
Fits offset of the energy level of the state to
:math:`\frac{1}{2} \alpha_0 E^2`, where
:math:`E` is the applied static electric field,
and returns fitted value :math:`\alpha_0`
Parameters:
maxField (:obj:`float`, optional):
maximum field (in V/m) to be
used for fitting the polarizability. By default, max field
is very large, so it will use eigenvalues calculated in the
whole range.
showPlot (:obj:`bool`, optional):
shows plot of calculated
eigenValues of the given state (dots), and the fit (solid
line) for extracting polarizability
debugOutput (:obj:`bool`, optional):
if True prints additional
information usefull for debuging. Set to false by default.
Returns:
float: scalar polarizability in units of MHz cm :math:`^2` / V \
:math:`^2`
"""
if self.drivingFromState[0] != 0:
raise Exception(
"Program can only find Polarizability of the original "
+ "state if you highlight original state. You can do so by NOT "
+ "specifying drivingFromState in diagonalise function."
)
eFieldList = self.eFieldList
yState = self.highlight
y = self.y
originalState = self.basisStates[self.indexOfCoupledState]
n = originalState[0]
l = originalState[1]
j = originalState[2]
energyOfOriginalState = (
self.atom.getEnergy(n, l, j, s=self.s) * C_e / C_h * 1e-9
) # in GHz
if debugOutput:
print("finding original state for each electric field value")
stopFitIndex = 0
while (
stopFitIndex < len(eFieldList) - 1
and eFieldList[stopFitIndex] < maxField
):
stopFitIndex += 1
xOriginalState = []
yOriginalState = []
for ii in xrange(stopFitIndex):
maxPortion = 0.0
yval = 0.0
jj = 0
for jj in xrange(len(y[ii])):
if yState[ii][jj] > maxPortion:
maxPortion = yState[ii][jj]
yval = y[ii][jj]
# measure state energy relative to the original state
if minStateContribution < maxPortion:
xOriginalState.append(eFieldList[ii])
yOriginalState.append(yval - energyOfOriginalState)
xOriginalState = np.array(xOriginalState) / 100.0 # converts to V/cm
yOriginalState = np.array(yOriginalState) # in GHz
# in GHz
uppery = 5.0
lowery = -5.0
if debugOutput:
print("found ", len(xOriginalState))
if showPlot:
self.fig, self.ax = plt.subplots(1, 1, figsize=(6.5, 3))
self.ax.scatter(xOriginalState, yOriginalState, s=2, color="k")
self.ax.set_xlabel("E field (V/cm)")
self.ax.set_ylim(lowery, uppery)
self.ax.set_ylabel(r"Energy/$h$ (GHz)")
self.ax.set_xlim(xOriginalState[0], xOriginalState[-1])
def polarizabilityFit(eField, offset, alpha):
return offset - 0.5 * alpha * eField**2
try:
popt, pcov = curve_fit(
polarizabilityFit, xOriginalState, yOriginalState, [0, 0]
)
except Exception as ex:
print(ex)
print(
"\nERROR: fitting energy levels for extracting polarizability\
of the state failed. Please check the range of electric \
fields where you are trying to fit polarizability and ensure\
that there is only one state with continuous energy change\
that has dominant contribution of the initial state.\n\n"
)
return 0
if debugOutput:
print(
"Scalar polarizability = ", popt[1] * 1.0e3, " MHz cm^2 / V^2 "
)
y_fit = []
for val in xOriginalState:
y_fit.append(polarizabilityFit(val, popt[0], popt[1]))
y_fit = np.array(y_fit)
if showPlot:
self.ax.plot(xOriginalState, y_fit, "r--")
self.ax.legend(
("fitted model function", "calculated energy level"),
loc=1,
fontsize=10,
)
self.ax.set_ylim(min(yOriginalState), max(yOriginalState))
plt.show()
self.fitX = xOriginalState
self.fitY = yOriginalState
self.fittedCurveY = y_fit
return popt[1] * 1.0e3 # returned value is in MHz cm^2 / V^2
def getState(
self,
state,
electricField,
minN,
maxN,
maxL,
accountForAmplitude=0.95,
debugOutput=False,
):
r"""
Returns basis states and coefficients that make up for a given electric
field the eigenstate with largest contribution of the original state.
Args:
state (array): target basis state in format :math:`[n,\ell,j,m_j]`
corresponding to the state whose composition we want to track
as we apply the electric field
electricField (float): applied DC electric field in units of V/m.
minN (int): minimal principal quantum number to be taken for calculation
of the Stark mixing
maxN (int): maximal principal quantum nunber to be take for calculation
of the Start mixing
maxL (int): maximal orbital angular momentum of states that should be
taken in calculation of the Stark mixing
accountForAmplitude (float): optinal, relative amplitude of state
that should be reached with the subset of the eigen states
returned. The returned eigen states will be sorted in the
declining relative contribution to the final eigen state, and
once total accounted amplitude of the state reaches 0.95,
further output of additional small contribution of the other
basis states to the final states will be supressed. Default
value of 0.95 will force output until basis state accounts
for 95\% of the state amplitude.
debugOutput (bool): optional, prints additional debug information
if True. Default False.
Returns:
**array of states** in format [[n1, l1, j1, mj1], ...] and
**array of complex coefficients** in format [c1, c2, ...] corresponding
the projections of the eigenstate (thas has largest contribution
of the original state in the given electric field) on the basis
states,
and **energy** of the found state in (eV)
"""
self.defineBasis(
state[0], state[1], state[2], state[3], minN, maxN, maxL
)
m = self.mat1 + self.mat2 * electricField
ev, egvector = eigh(m)
# find which state in the electric field has strongest contribution
# of the requested state?
maxOverlap = 0
eigenvectorIndex = 0
for i in range(len(ev)):
if abs(egvector[self.indexOfCoupledState, i]) ** 2 > maxOverlap:
maxOverlap = abs(egvector[self.indexOfCoupledState, i]) ** 2
eigenvectorIndex = i
energy = ev[eigenvectorIndex] * 1e9 * C_h / C_e
if debugOutput:
print("Max overlap = %.3f" % maxOverlap)
print(
"Eigen energy (state index %d) = %.2f eV"
% (eigenvectorIndex, energy)
)
contributions = egvector[:, eigenvectorIndex]
sortedContributions = np.argsort(abs(contributions))
if debugOutput:
print("Maximum contributions to this state")
for i in range(4):
index = sortedContributions[-i - 1]
print(contributions[index])
print(self.basisStates[index])
print("===========\n")
i = 0
coef = []
contributingStates = []
while accountForAmplitude > 0 and i < len(self.basisStates):
index = sortedContributions[-i - 1]
coef.append(contributions[index])
accountForAmplitude -= abs(coef[-1]) ** 2
contributingStates.append(self.basisStates[index])
i += 1
return contributingStates, coef, energy
# ================= Level plots, decays, cascades etc =======================
class LevelPlot:
"""
Single atom level plots and decays (a Grotrian diagram, or term diagram)
For an example see `Rydberg energy levels example snippet`_.
.. _`Rydberg energy levels example snippet`:
./Rydberg_atoms_a_primer_notebook.html#Rydberg-Atom-Energy-Levels
Args:
atom (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`): ={
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Alkali atom type whose levels we
want to examine
"""
def __init__(self, atomType):
self.atom = atomType
self.nFrom = 0
self.nTo = 0
self.lFrom = 0
self.lTo = 0
self.sList = []
self.listX = []
self.listY = [] # list of energies
self.levelLabel = []
self.fig = 0
self.ax = 0
self.width = 0.2
self.state1 = [0, 0, 0]
self.state2 = [0, -1, 0]
self.transitionMatrix = []
self.populations = []
self.transitionMatrixWavelength3 = []
# characterization of the graph
self.spectraX = []
self.spectraY = []
self.spectraLine = []
def makeLevels(self, nFrom, nTo, lFrom, lTo, sList=[0.5]):
"""
Constructs energy level diagram in a given range
Args:
nFrom (int): minimal principal quantum number of the
states we are interested in
nTo (int): maximal principal quantum number of the
states we are interested in
lFrom (int): minimal orbital angular momentum
of the states we are interested in
lTo (int): maximal orbital angular momentum
of the states we are interested in
sList (float): optional, spin angular momentum. Default value
of [0.5] corresponds to Alkali atoms. For Alkaline Earth it
has to be specified. For divalent atoms one can plot either
one spin state by setting for example `sList=[0]``,
or both spin states `sList=[0,1]``
"""
if (
issubclass(type(self.atom), DivalentAtom)
and abs(sList[0] - 0.5) < 0.1
):
raise ValueError(
"For divalent atoms requested spin state(s) have "
"to be explicitly specified e.g. sList=[0] or "
"sList=[0,1]"
)
# save local copy of the space restrictions
self.nFrom = nFrom
self.nTo = nTo
self.lFrom = lFrom
self.lTo = lTo
self.sList = sList
# find all the levels within this space restrictions
xPositionOffset = 0
for s in sList:
n = max(self.nFrom, self.atom.groundStateN)
while n <= nTo:
l = lFrom
if l == 0 and s == 1 and n == self.atom.groundStateN:
# for ground state S state, there is only singlet
l += 1
while l <= min(lTo, n - 1):
for j in np.linspace(l - s, l + s, round(2 * s + 1)):
if j > -0.1:
self.listX.append(l - lFrom + xPositionOffset)
self.listY.append(self.atom.getEnergy(n, l, j, s=s))
self.levelLabel.append([n, l, j, s])
l = l + 1
n += 1
# if user requested principal quantum nuber below theself.listX_l.append(l)
# ground state principal quantum number
# add those L states that are higher in energy then the ground state
for state in self.atom.extraLevels:
if (
state[1] <= lTo
and state[0] >= self.nFrom
and (len(state) == 3 or state[3] == s)
):
# last line means: either is Alkali, when we don't need to
# check the spin, or it's divalent, when we do need to check
# the spin
self.listX.append(state[1] - lFrom + xPositionOffset)
self.listY.append(
self.atom.getEnergy(state[0], state[1], state[2], s=s)
)
self.levelLabel.append([state[0], state[1], state[2], s])
xPositionOffset += lTo + 1 - lFrom
def makeTransitionMatrix(
self, environmentTemperature=0.0, printDecays=True
):
self.transitionMatrix = []
for i in xrange(len(self.levelLabel)):
state1 = self.levelLabel[i]
transitionVector = []
# decay of the stay
decay = 0.0
for state2 in self.levelLabel:
dipoleAllowed = (abs(state1[1] - state2[1]) == 1) and (
abs(state1[2] - state2[2]) <= 1.01
)
if dipoleAllowed:
# decay to this state
rate = self.atom.getTransitionRate(
state2[0],
state2[1],
state2[2],
state1[0],
state1[1],
state1[2],
temperature=environmentTemperature,
)
transitionVector.append(rate)
# decay from this state
rate = self.atom.getTransitionRate(
state1[0],
state1[1],
state1[2],
state2[0],
state2[1],
state2[2],
temperature=environmentTemperature,
)
decay = decay - rate
else:
transitionVector.append(0.0)
transitionVector[i] = decay
if printDecays:
print("Decay time of ")
printStateString(state1[0], state1[1], state1[2])
if decay < -1e-20:
print("\t is\t", -1.0e9 / decay, " ns")
self.transitionMatrix.append(transitionVector)
np.array(self.transitionMatrix)
self.transitionMatrix = np.transpose(self.transitionMatrix)
def drawSpectra(self):
self.fig, self.ax = plt.subplots(1, 1, figsize=(16, 5))
lineWavelength = []
lineStrength = []
lineName = []
i = 0
while i < len(self.levelLabel):
j = 0
while j < len(self.levelLabel):
if i != j:
wavelength = self.atom.getTransitionWavelength(
self.levelLabel[i][0],
self.levelLabel[i][1],
self.levelLabel[i][2],
self.levelLabel[j][0],
self.levelLabel[j][1],
self.levelLabel[j][2],
)
intensity = self.atom.getTransitionRate(
self.levelLabel[i][0],
self.levelLabel[i][1],
self.levelLabel[i][2],
self.levelLabel[j][0],
self.levelLabel[j][1],
self.levelLabel[j][2],
)
lineWavelength.append(abs(wavelength) * 1.0e9)
lineStrength.append(abs(intensity))
lineName.append(
printStateString(
self.levelLabel[i][0],
self.levelLabel[i][1],
self.levelLabel[i][2],
)
+ " -> "
+ printStateString(
self.levelLabel[j][0],
self.levelLabel[j][1],
self.levelLabel[j][2],
)
)
j = j + 1
i = i + 1
self.spectraX = np.copy(lineWavelength)
self.spectraY = np.copy(lineStrength)
self.spectraLine = np.copy(lineName)
def drawSpectraConvoluted(
self, lowerWavelength, higherWavelength, points, gamma
):
wavelengths = np.linspace(lowerWavelength, higherWavelength, points)
spectra = np.zeros(points)
i = 0
while i < len(wavelengths):
value = 0
j = 0
while j < len(self.spectraX):
value = value + self.spectraY[j] * gamma / (
(self.spectraX[j] - wavelengths[i]) ** 2 + gamma**2
)
j = j + 1
spectra[i] = value
i = i + 1
self.ax.plot(wavelengths, spectra, "g-")
def showSpectra(self, saveInFile="", showTransitionPoints=True):
if showTransitionPoints:
self.ax.plot(self.spectraX, self.spectraY, "ro", picker=5)
self.ax.set_xlabel("Wavelength (nm)")
self.ax.set_ylabel("Intensity (arb.un)")
self.fig.subplots_adjust(right=0.95, left=0.1)
# self.ax.set_xlim(300,600)
self.fig.canvas.mpl_connect("pick_event", self.onpick3)
if saveInFile != "":
self.fig.savefig(saveInFile)
plt.show()
def drawLevels(self, units="eV"):
r"""
Draws a level diagram plot
Arg:
units (:obj:`char`,optional): possible values {'eV','*cm*','GHz'};
[case insensitive] if the value is 'eV' (default), Stark
diagram will be plotted as energy in units eV; if the string
contains 'cm' Stark diagram will be plotted in energy units cm
:math:`{}^{-1}`; if value is 'GHz', Stark diagram will be
plotted as energy :math:`/h` in units of GHz;
"""
self.fig, self.ax = plt.subplots(1, 1, figsize=(9.0, 11.5))
if units.lower() == "ev":
self.scaleFactor = 1
self.units = "eV"
elif units.lower() == "ghz":
self.scaleFactor = C_e / C_h * 1e-9
self.units = "GHz"
elif "cm" in units.lower():
self.scaleFactor = C_e / (C_h * C_c * 100)
self.units = "cm$^{-1}$"
i = 0
while i < len(self.listX):
self.ax.plot(
[self.listX[i] - self.width, self.listX[i] + self.width],
[
self.listY[i] * self.scaleFactor,
self.listY[i] * self.scaleFactor,
],
"b-",
picker=True,
)
if i < len(self.populations) and (self.populations[i] > 1e-3):
self.ax.plot(
[self.listX[i]],
[self.listY[i] * self.scaleFactor],
"ro",
alpha=self.populations[i],
)
i = i + 1
# Y AXIS
self.listX = np.array(self.listX)
self.ax.set_ylabel("Energy (%s)" % self.units)
self.ax.set_xlim(-0.5 + np.min(self.listX), np.max(self.listX) + 0.5)
# X AXIS
majorLocator = MultipleLocator(1)
self.ax.xaxis.set_major_locator(majorLocator)
tickNames = []
for s in self.sList:
sNumber = round(2 * s + 1)
for l in xrange(self.lFrom, self.lTo + 1):
tickNames.append("$^%d %s$" % (sNumber, printStateLetter(l)))
tickNum = len(tickNames)
self.fig.canvas.draw()
self.ax.set_xticks(np.arange(tickNum))
self.ax.set_xticklabels(tickNames)
self.ax.set_xlim(-0.5 + np.min(self.listX), np.max(self.listX) + 0.5)
# TITLE
self.ax.set_title(
"%s: $n \\in [%d,%d]$"
% (self.atom.elementName, self.nFrom, self.nTo)
)
def showPlot(self):
"""
Shows a level diagram plot
"""
self.fig.canvas.mpl_connect("pick_event", self.onpick2)
self.state1[0] = -1 # initialise for picking
plt.show()
def findState(self, x, y):
y /= self.scaleFactor
distance = 100000000.0
state = [0, 0, 0]
i = 0
while i < len(self.listX):
dx = self.listX[i] - x
dy = self.listY[i] - y
dist = sqrt(dx * dx + dy * dy)
if dist < distance:
distance = dist
state = self.levelLabel[i]
i = i + 1
return state
def findStateNo(self, state):
# returns no of the given state in the basis
i = 0
while i < len(self.levelLabel):
if (
(self.levelLabel[i][0] == state[0])
and (self.levelLabel[i][1] == state[1])
and (abs(self.levelLabel[i][2] - state[2]) < 0.01)
):
return i
i = i + 1
print("Error: requested state ")
print(state)
print("could not be found!")
return -1
def findLine(self, x, y):
distance = 1.0e40
line = ""
i = 0
while i < len(self.spectraLine):
dx = self.spectraX[i] - x
dy = self.spectraY[i] - y
dist = sqrt(dx * dx + dy * dy)
if dist < distance:
distance = dist
line = self.spectraLine[i]
i = i + 1
return line
def onpick2(self, event):
if isinstance(event.artist, matplotlib.lines.Line2D):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
state = self.findState((xdata[0] + xdata[0]) / 2.0, ydata[0])
if self.state1[0] == -1:
if state[1] != self.state2[1] or state[0] != self.state2[0]:
self.state1 = state
self.ax.set_title(
r"$%s \rightarrow$ "
% (
printStateStringLatex(
state[0], state[1], state[2], s=state[3]
)
)
)
self.state2 = [-1, -1, -1]
else:
title = ""
if (state[0] != self.state1[0]) or (state[1] != self.state1[1]):
title = r"$ %s \rightarrow %s $ " % (
printStateStringLatex(
self.state1[0],
self.state1[1],
self.state1[2],
s=self.state1[3],
),
printStateStringLatex(
state[0], state[1], state[2], s=state[3]
),
)
transitionEnergy = (
self.atom.getTransitionFrequency(
self.state1[0],
self.state1[1],
self.state1[2],
state[0],
state[1],
state[2],
s=self.state1[3],
s2=state[3],
)
* C_h
/ C_e
) # in eV
title = title + (
" %sm (%s%s)"
% (
formatNumberSI(
self.atom.getTransitionWavelength(
self.state1[0],
self.state1[1],
self.state1[2],
state[0],
state[1],
state[2],
s=self.state1[3],
s2=state[3],
)
),
formatNumberSI(transitionEnergy * self.scaleFactor),
self.units,
)
)
self.ax.set_title(title)
self.state1 = [-1, 0, 0]
self.state2 = state
event.canvas.draw()
def onpick3(self, event):
if isinstance(event.artist, matplotlib.lines.Line2D):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
print(ind[0])
line = self.findLine(xdata[ind][0], ydata[ind][0])
self.ax.set_title(line)
event.canvas.draw()
class AtomSurfaceVdW:
r"""
Calculates atom-surface Van der Waals interaction.
Energy of atom state :math:`|i\rangle` at distance :math:`z`
from the surface of material is offseted in energy by
:math:`V_{\rm VdW}` at small distances
:math:`z\ll\rm{min}(\lambda_{i,j})` ,
where :math:`\lambda_{i,j}` are the wavelengths from atom state
:math:`|i \rangle` to all strongly-coupled states :math:`j` ,
due to (unretarded) atom-surface interaction, also called
Van der Waals interaction.
The interaction potential can be expressed as
:math:`V_{\rm VdW} = - \frac{C_3}{z^3}`
This class calculates :math:`C_3` for individual states
:math:`|i\rangle`.
See example `atom-surface calculation snippet`_.
.. _`atom-surface calculation snippet`:
./ARC_3_0_introduction.html#Atom-surface-van-der-Waals-interactions-(C3-calculation)
Args:
atom (:obj:`AlkaliAtom` or :obj:`DivalentAtom`): specified
Alkali or Alkaline Earth atom whose interaction with surface
we want to explore
material (from :obj:`arc.materials`): specified surface material
Note:
To find frequecy shift of a transition
:math:`|\rm a \rangle\rightarrow |\rm b \rangle`,
one needs to calculate difference in
:math:`C_3` coefficients obtained for the two states
:math:`|\rm a\rangle` and :math:`|\rm b\rangle` respectively.
See example TODO (TO-DO)
"""
def __init__(self, atom, surfaceMaterial=None):
UsedModulesARC.arc3_0_methods = True
self.atom = atom
if surfaceMaterial is None:
print(
"NOTE: No surface material specified. "
"Assuming perfect mirror."
)
self.surfaceMaterial = surfaceMaterial
def getC3contribution(self, n1, l1, j1, n2, l2, j2, s=0.5):
r"""
Contribution to :math:`C_3` of :math:`|n_1, \ell_1, j_1\rangle` state
due to dipole coupling to :math:`|n_2, \ell_2, j_2\rangle` state.
Calculates
:math:`\frac{1}{4\pi\varepsilon_0}\
\frac{ n(\omega_{\rm ab})^2 - 1}{ n(\omega_{\rm ab})^2 + 1}\
\frac{
\left| \langle a| D_x | b \rangle \right|^2 \
+ \left| \langle a | D_y | b \rangle \right|^2 + \
2 \cdot \left|\langle a |D_z| b \rangle \right|^2}{16}`
where :math:`|{\rm a}\rangle \equiv |n_1, \ell_1, j_1\rangle` ,
:math:`|{\rm b}\rangle \equiv |n_2, \ell_2, j_2\rangle`,
:math:`\mathbf{D} \equiv e \cdot \mathbf{r} \
\equiv \hat{x} D_x + \hat{y} D_y\
+ \hat{z} D_z` is atomic dipole operator and :math:`n(\omega_{\rm ab})`
is refractive index of the considered surface at transition frequency
:math:`\omega_{\rm ab}` .
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number od state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
s (float): optional, spin angular momentum of states. Default value
of 0.5 is correct for AlkaliAtoms. For DivalentAtom it
has to be explicitly stated
Returns:
float, float, float:
contribution to VdW coefficient :math:`C_3` ,\
estimated error :math:`\delta C_3` \
(in units of :math:`{\rm J}\cdot{\rm m}^3`), and refractive \
index :math:`n` of the surface material for the given \
transition.
Warning:
This is just contribution of one transition to the level shift
of a particular state. To calculate total level shift, check
:obj:`AtomSurfaceVdW.getStateC3`
"""
result = 0.0
error = 0.0
hasLiteratureValue, dme, info = self.atom.getLiteratureDME(
n1, l1, j1, n2, l2, j2, s=0.5
)
if hasLiteratureValue:
dme_reduced_J = self.atom.getReducedMatrixElementJ(
n1, l1, j1, n2, l2, j2, s=0.5
)
relativeError = abs(info[1] / dme_reduced_J)
else:
relativeError = (
0.05 # 5 percent for calculated values (note: estimate only!)
)
# sum over mj1
for mj1 in np.linspace(-j1, j1, round(2 * j1 + 1)):
# calculate sum_mj2 |<j1,mj1|Dx|j2,mj2>|^2 + |<j1,mj1|Dy|j2,mj2>|^2 + 2* |<j1,mj1|Dz|j2,mj2>|^2
# which is equal to (check!) |<j1,mj1|D+|j2,mj2>|^2 + |<j1,mj1|D-|j2,mj2>|^2 + 2* |<j1,mj1|Dz|j2,mj2>|^2
for mj2 in np.linspace(-j2, j2, round(2 * j2 + 1)):
for q in [-1, +1]:
result += (
abs(
self.atom.getDipoleMatrixElement(
n1, l1, j1, mj1, n2, l2, j2, mj2, q, s=s
)
* C_e
* physical_constants["Bohr radius"][0]
)
** 2
)
error += (
2
* abs(
self.atom.getDipoleMatrixElement(
n1, l1, j1, mj1, n2, l2, j2, mj2, q, s=s
)
* C_e
* physical_constants["Bohr radius"][0]
)
** 2
* relativeError
)
# for q = 0
q = 0
result += (
2
* abs(
self.atom.getDipoleMatrixElement(
n1, l1, j1, mj1, n2, l2, j2, mj2, q
)
* C_e
* physical_constants["Bohr radius"][0]
)
** 2
)
error += (
2
* abs(
self.atom.getDipoleMatrixElement(
n1, l1, j1, mj1, n2, l2, j2, mj2, q
)
* C_e
* physical_constants["Bohr radius"][0]
)
** 2
* relativeError
)
materialFactor = 1.0
n = 10000
# effectively infinite refractive index would correspond to perfect
# reflector (perfect mirror)
if self.surfaceMaterial is not None:
wavelength = (
np.abs(
self.atom.getTransitionWavelength(
n1, l1, j1, n2, l2, j2, s=s, s2=s
)
)
* 1e6
) # in mum
n = self.surfaceMaterial.getN(vacuumWavelength=wavelength)
materialFactor = (n**2 - 1.0) / (n**2 + 1.0)
# include factor of 16
result = result / (2 * j1 + 1) / 16
error = error / (2 * j1 + 1) / 16
C3 = materialFactor * 1 / (4.0 * pi * epsilon_0) * result
error = materialFactor * 1 / (4.0 * pi * epsilon_0) * error
return C3, error, n # C3 and error in units of J m^3
def getStateC3(self, n, l, j, coupledStatesList, s=0.5, debugOutput=False):
r"""
Van der Waals atom-surface interaction coefficient for
a given state (:math:`C_3` in units of
:math:`\mathrm{J}\cdot\mathrm{m}^3` )
Args:
n (int): principal quantum number of the state
l (int): orbital angular momentum of the state
j (int): total angular momentum of state
coupledStatesList (array): array of states that are strongly
dipole-coupled to the initial state, whose contribution
to :math:`C_3` will be take into account. Format
`[[n1,l1,j1],...]`
s (float, optional): total spin angular momentum for the considered
state. Default value of 0.5 is correct for `AlkaliAtoms`, but
it has to be explicitly specifiied for `DivalentAtom`.
debugOutput (bool, optional): prints additional output information,
False by default.
Returns:
float, float:
:math:`C_3` (in units of :math:`{\rm J}\cdot {\rm m}^3` ),
estimated error :math:`\delta C_3`
"""
if debugOutput:
print(
"%s ->\tC3 contr. (kHz mum^3) \tlambda (mum)\tn"
% (printStateString(n, l, j, s=s))
)
totalShift = 0
sumSqError = 0
for state in coupledStatesList:
c3, err, refIndex = self.getC3contribution(
n, l, j, state[0], state[1], state[2], s=s
)
if debugOutput:
print(
"-> %s\t%.3f +- %.3f \t%.3f\t\t%.3f\n"
% (
printStateString(state[0], state[1], state[2], s=s),
c3 / C_h * (1e6) ** 3 * 1e-3,
err / C_h * (1e6) ** 3 * 1e-3,
self.atom.getTransitionWavelength(
n, l, j, state[0], state[1], state[2], s=s, s2=s
)
* 1e6,
refIndex,
)
)
totalShift += c3
sumSqError += err**2
error = np.sqrt(sumSqError)
if debugOutput:
print(
"= = = = = = \tTotal shift of %s\t= %.3f+-%.4f kHz mum^3\n"
% (
printStateString(n, l, j, s=s),
totalShift / C_h * (1e6) ** 3 * 1e-3,
error / C_h * (1e6) ** 3 * 1e-3,
)
)
return totalShift, error # in J m^3
class OpticalLattice1D:
r"""
Atom properties in optical lattices in 1D.
See example `optical lattice calculations snippet`_.
.. _`optical lattice calculations snippet`:
./ARC_3_0_introduction.html#Optical-lattice-calculations-(Bloch-bands,-Wannier-states...)
Args:
atom: one of AlkaliAtom or DivalentAtom
trapWavenegth (float): wavelength of trapping laser light
(in units of m)
"""
energy = []
"""
energy of states obtained by
:obj:`OpticalLattice1D.diagonalise` method
in format `[[energies for quasimomentum1 ], [energies for quasimomentum2 ], ...]`
"""
quasimomentum = []
"""
list of quzimomentum for which the energies of states was calculated
by :obj:`OpticalLattice1D.diagonalise` method
in format `[quasimomentum1, quasimomentum2, ...]`
"""
savedBlochBand = []
"""
list of saved eigen energy state compositions for each of the Calculated
quasimomentums for the selected index of the Bloch band
in :obj:`OpticalLattice1D.diagonalise` method
in format `[[eigen state decomposition for quasimomentum 1],
[eigen state decomposition for quasimomentum 2], ...]`
"""
trapPotentialDepth = 0
"""
save slattice trap potential depth for which calculation
:obj:`OpticalLattice1D.diagonalise` was done
"""
def __init__(self, atom, trapWavenegth):
UsedModulesARC.arc3_0_methods = True
self.atom = atom
self.trapWavenegth = trapWavenegth
def getRecoilEnergy(self):
"""
Recoil energy for atoms in given optical lattice
Returns:
float: recoil energy in units of J
"""
latticeConstant = self.trapWavenegth / 2
Er = C_h**2 / (8 * self.atom.mass * latticeConstant**2)
return Er
def getTrappingFrequency(self, trapPotentialDepth):
"""
Atom's trapping frequecy for given trapth depth
Args:
trapPotentialDepth (float): lattice depth (in units of J)
Returns:
float: trapping frequency (in Hz)
"""
Er = self.getRecoilEnergy()
return 2.0 * Er / hbar * np.sqrt(trapPotentialDepth / Er)
def _BlochFunction(self, x, stateVector, q, k=1.0):
r"""
Bloch wavefunctions
Args:
x (x): position (in units \2 pi/k, for default value of laser
wavevector unit k=1, one full wavelength is 2\pi)
stateVector: eigen vector obtained by diagonalisation of
interaction Hamiltonian in a subspace given by the selected
quasimomentum
q (float): quasimomentum (in units of driving laser k)
k (float): driving laser wavevector, define units for momentum and
distance;
if k==1 (default value), reciprocal lattice momentum is 2,
and the full range of quasimomentum is from -1 to +1;
one full wavelength is the 2\pi.
Retruns:
float:
"""
index = len(stateVector) // 2 + 2 # Align Bloch functions in phase
angle = np.angle(stateVector[index])
sign = np.exp(-1j * angle)
temp = 0 + 0j
for l in np.arange(-self.lLimit, self.lLimit + 1, 1):
temp += (
sign
* stateVector[l + self.lLimit]
* np.exp(1.0j * (2.0 * k * l + q) * x)
)
return temp
def BlochWavefunction(
self, trapPotentialDepth, quasimomentum, blochBandIndex
):
r"""
Bloch wavefunction as a **function** of 1D coordinate.
Paraeters:
trapPotentialDepth (float):
(in units of recoil energy
:obj:`OpticalLattice1D.getRecoilEnergy`)
quasimomentum (float):
(in units of 2 \pi /
:obj:`OpticalLattice1D.trapWavenegth`; note that
reciprocal lattice momentum in this units is 2, and that
full range of quasimomentum is from -1 to +1)
Returns:
Bloch wavefunction as a **function** of coordinate (see call
example below)
Example:
Returns Bloch wavefunction. Use as following::
trapPotentialDepth = 40 # units of recoil energy
quasimomentum = 0
blochBandIndex = 0 # Bloch band lowest in energy is 0
wf = lattice.BlochWavefunction(trapPotentialDepth,
quasimomentum,
blochBandIndex)
wf(x) # returns complex number corresponding to value of Bloch
# wavefunction at point x (cooridnate given in units of
# 1/k where k = 2 \pi / trapWavenegth )
# by default k=1, so one full wavelength is 2\pi
"""
temp1 = self.energy
temp2 = self.quasimomentum
temp3 = self.savedBlochBand
self.diagonalise(
trapPotentialDepth, [quasimomentum], saveBandIndex=blochBandIndex
)
state = np.copy(self.savedBlochBand[0])
self.energy = temp1
self.quasimomenutm = temp2
self.savedBlochBand = temp3
return lambda x: self._BlochFunction(x, state, quasimomentum)
def defineBasis(self, lLimit=35):
"""
Define basis for Bloch band calculations
Bloch states are calculated suming up all relevant states
with momenta in range
`[-lLimit * 4 * pi /trapWavenegth, +lLimit * 4 * pi /trapWavenegth]`
Note that factor of 4 occurs since potential lattice period is
twice the `trapWavelength` for standing wave.
Args:
lLimit (integer): Optional, defines maximal momentum to be taken
for calculation of Bloch States
as `lLimit * 4 * pi / trapWavenegth` . By default set to 35.
"""
self.lLimit = lLimit
def _getLatticeHamiltonian(self, q, Vlat):
"""
Lattice Hamiltonian
Args:
q (float):
Vlat (float):
lLimit (int):
"""
# assemble Hamiltonian
hConstructor = [[], [], []] # [[values],[columnIndex],[rowIndex]]
for l in np.arange(-self.lLimit, self.lLimit + 1, 1):
# basis index exp(2*l*k*x) state has index lLimit+l
column = self.lLimit + l
if l - 1 >= -self.lLimit:
hConstructor[0].append(-Vlat / 4.0)
hConstructor[1].append(column)
hConstructor[2].append(column - 1)
if l + 1 <= self.lLimit:
hConstructor[0].append(-Vlat / 4.0)
hConstructor[1].append(column)
hConstructor[2].append(column + 1)
# diagonal term
# with global energy offset (- Vlat / 2.) factored out
hConstructor[0].append((2.0 * l + q) ** 2 + Vlat / 2.0)
hConstructor[1].append(column)
hConstructor[2].append(column)
dimension = 2 * self.lLimit + 1
hamiltonianQ = csr_matrix(
(hConstructor[0], (hConstructor[1], hConstructor[2])),
shape=(dimension, dimension),
)
return hamiltonianQ
def diagonalise(
self, trapPotentialDepth, quasimomentumList, saveBandIndex=None
):
r"""
Calculates energy levels (Bloch bands) for given `quasimomentumList`
Energy levels and their quasimomentum are saved in internal variables
`energy` and `quasimomentum`. Energies are saved in units of
recoil energy, and quasimomentum in units of
The optional parameter `saveBandIndex` specifies index of the Bloch
band for which eigenvectrors should be saved. If provided,
eigenvectors for each value `quasimomentumList[i]` are saved in
`savedBlochBand[i]`.
Args:
latticePotential (float): lattice depth formed
by the standing wave of laser, with wavelength specified
during initialisation of the lattice
(in units of recoil energy).
quasimomentumList (array): array of quasimomentum values for
which energy levels will be calculated (in units of
:math:`\hbar \cdot k`,
where :math:`k` is trapping laser wavevector;
since reciprocal lattice has twice the trapping laser
wavevector due to standing wave, full range of
quasimomentum is from -1 to +1)
saveBandIndex (int): optional, default None. If provided,
specifies for which Bloch band should the eignevectors be
also saved. `saveBlochBand=0` corresponds to lowest energy
band.
"""
self.energy = []
self.quasimomentum = quasimomentumList
self.savedBlochBand = []
self.trapPotentialDepth = trapPotentialDepth
for q in quasimomentumList:
hamiltonianQ = self._getLatticeHamiltonian(q, trapPotentialDepth)
ev, egvector = np.linalg.eig(hamiltonianQ.todense())
egvector = np.transpose(np.array(egvector))
orderInEnergy = np.argsort(ev)
ev = ev[orderInEnergy]
egvector = egvector[orderInEnergy]
self.energy.append(ev)
if saveBandIndex is not None:
self.savedBlochBand.append(egvector[saveBandIndex])
def plotLevelDiagram(self):
"""
Plots energy level diagram (Bloch bands).
Based on diagonalisation of the lattice potential, plots descrete
eigen energy spectra obtained for each value of the quasimomentum
used in :obj:`OpticalLattice1D.diagonalise` method.
Returns:
matploltib figure with a Bloch bands
"""
f = plt.figure(figsize=(6, 10))
ax = f.add_subplot(1, 1, 1)
for i, energyLevels in enumerate(self.energy):
ax.plot(
[self.quasimomentum[i]] * len(energyLevels),
energyLevels,
".",
color="0.8",
)
ax.set_xlabel(r"Quasimomentum, $q$ $(\hbar k)$")
ax.set_ylabel(r"State energy, E ($E_{\rm r}$)")
ax.set_ylim(-0.2, 50)
ax.set_xlim(-1, 1)
return f
def getWannierFunction(self, x, latticeIndex=0, k=1):
r"""
Gives value at cooridnate x of a Wannier function localized
at given lattice index.
Args:
x (float): spatial coordinate (in units of :math:`2\pi/k` ; for
default value of laser drivng wavevecto :math:`k=1` , one
trappinWavelength is :math:`2\pi` ). Coordinate origin is
at `latticeIndex=0` .
latticeIndex (int): optional, lattice index at which the
Wannier function is localised. By defualt 0.
k (float): optional; laser driving wavevector, defines unit
of length. Default value is 1, making one trapping laser
wavelenth equal to :math:`2\pi`
"""
value = 0
localizedAt = 2.0 * pi / k * latticeIndex / 2.0
# last division by 2 is because lattice period is
# 2 x smaleler then wavelenth of the driving laser
for i in range(len(self.quasimomentum)):
q = self.quasimomentum[i]
value += np.exp(-1j * q * localizedAt) * self._BlochFunction(
x, self.savedBlochBand[i], q, k=k
)
return value
class DynamicPolarizability:
"""
Calculations of magic wavelengths and dynamic polarizability
(scalar and tensor).
Args:
atom: alkali or alkaline element of choice
n (int): principal quantum number of the selected stated
l (int): orbital angular momentum of the selected state
j (float): total angular momentum of selected state
s (float): optional, spin state of the atom. Default value of
0.5 is correct for Alkali atoms, but it has to be explicitly
specified for DivalentAtom.
"""
def __init__(self, atom, n, l, j, s=0.5):
UsedModulesARC.arc3_0_methods = True
self.atom = atom
self.n = n
self.l = l
self.j = j
self.s = s
def defineBasis(self, nMin, nMax):
"""
Defines basis for calculation of dynamic polarizability
Args:
nMin (int): minimal principal quantum number of states to be
taken into account for calculation
nMax (int): maxi,al principal quantum number of states to be
taken into account for calculation
"""
self.nMin = nMin
self.nMax = nMax
self.basis = []
self.lifetimes = []
for n1 in np.arange(
max(self.nMin, self.atom.groundStateN), self.nMax + 1
):
lmin = self.l - 1
if lmin < -0.1:
lmin = self.l + 1
for l1 in range(lmin, min(self.l + 2, n1)):
j1 = l1 - self.s
if j1 < 0.1:
j1 += 1
while j1 <= l1 + self.s + 0.1:
if self.__isDipoleCoupled(
self.n, self.l, self.j, n1, l1, j1
):
# print([n1, l1, j1, self.s])
self.basis.append([n1, l1, j1, self.s])
j1 += 1
for state in self.atom.extraLevels:
if (
len(state) == 3 or abs(state[3] - self.s) < 0.1
) and self.__isDipoleCoupled(
self.n, self.l, self.j, state[0], state[1], state[2]
):
self.basis.append(state)
def __isDipoleCoupled(self, n1, l1, j1, n2, l2, j2, s=0.5):
if (
not (
abs(l1 - l2) != 1
and (
(
abs(j1 - 0.5) < 0.1 and abs(j2 - 0.5) < 0.1
) # j = 1/2 and j'=1/2 forbidden
or (
abs(j1) < 0.1 and abs(j2 - 1) < 0.1
) # j = 0 and j'=1 forbidden
or (
abs(j1 - 1) < 0.1 and abs(j2) < 0.1
) # j = 1 and j'=0 forbidden
)
)
and not (abs(j1) < 0.1 and abs(j2) < 0.1) # j = 0 and j'=0 forbiden
and not (
abs(l1) < 0.1 and abs(l2) < 0.1
) # l = 0 and l' = 0 is forbiden
):
dl = abs(l1 - l2)
dj = abs(j1 - j2)
if dl == 1 and (dj < 1.1):
return True
else:
return False
return False
def getPolarizability(
self,
driveWavelength,
units="SI",
accountForStateLifetime=False,
mj=None,
):
r"""
Calculates of scalar, vector, tensor, core and pondermotive
polarizability, and returns state corresponding to the closest
transition resonance.
Note that pondermotive polarisability is calculated as
:math:`\alpha_P = e^2 / (2 m_e \omega^2)`, i.e. assumes that the
definition of the energy shift in field :math:`E` is
:math:`\frac{1}{2}\alpha_P E^2`. For more datils check the
preprint `arXiv:2007.12016`_ that introduced the update.
.. _`arXiv:2007.12016`:
https://arxiv.org/abs/2007.12016
Args:
driveWavelength (float): wavelength of driving field
(in units of m)
units (string): optional, 'SI' or 'a.u.' (equivalently 'au'),
switches between SI units for returned result
(:math:`Hz V^{-2} m^2` )
and atomic units (":math:`a_0^3` "). Defaul 'SI'
accountForStateLifetime (bool): optional, should we account
for finite transition linewidths caused by finite state
lifetimes. By default False.
Returns:
scalar, vector, and tensor, polarizabilities of the state
specified, as well as the core, and ponderomotive
polarizabilities of the atom, followed by the atomic state
whose resonance is closest in energy. Returned units depend
on `units` parameter (default SI).
"""
if accountForStateLifetime and len(self.lifetimes) == 0:
for state in self.basis:
self.lifetimes.append(
self.atom.getStateLifetime(
state[0], state[1], state[2], s=self.s
)
)
driveEnergy = C_c / driveWavelength * C_h
initialLevelEnergy = (
self.atom.getEnergy(self.n, self.l, self.j, s=self.s) * C_e
)
# prefactor for vector polarisability
prefactor1 = 1.0 / ((self.j + 1) * (2 * self.j + 1))
# prefactor for tensor polarisability
prefactor2 = (
6
* self.j
* (2 * self.j - 1)
/ (6 * (self.j + 1) * (2 * self.j + 1) * (2 * self.j + 3))
) ** 0.5
alpha0 = 0.0
alpha1 = 0.0
alpha2 = 0.0
closestState = []
closestEnergy = -1
targetStateLifetime = self.atom.getStateLifetime(
self.n, self.l, self.j, s=self.s
)
for i, state in enumerate(self.basis):
n1 = state[0]
l1 = state[1]
j1 = state[2]
if (mj is None) or (abs(mj) < j1 + 0.1):
if abs(j1 - self.j) < 1.1 and (
abs(l1 - self.l) > 0.5 and abs(l1 - self.l) < 1.1
):
coupledLevelEnergy = (
self.atom.getEnergy(n1, l1, j1, s=self.s) * C_e
)
diffEnergy = abs(
(coupledLevelEnergy - initialLevelEnergy) ** 2
- driveEnergy**2
)
if (diffEnergy < closestEnergy) or (closestEnergy < 0):
closestEnergy = diffEnergy
closestState = state
if diffEnergy < 1e-65:
# print("For given frequency we are in exact resonance with state %s" % printStateString(n1,l1,j1,s=s))
return None, None, None, None, None, state
# common factors
if accountForStateLifetime:
transitionLinewidth = (
1 / self.lifetimes[i] + 1 / targetStateLifetime
) * C_h
else:
transitionLinewidth = 0.0
# transitionEnergy
transitionEnergy = coupledLevelEnergy - initialLevelEnergy
d = (
self.atom.getReducedMatrixElementJ(
self.n, self.l, self.j, n1, l1, j1, s=self.s
)
** 2
* (C_e * physical_constants["Bohr radius"][0]) ** 2
* transitionEnergy
* (
transitionEnergy**2
- driveEnergy**2
+ transitionLinewidth**2 / 4
)
/ (
(
transitionEnergy**2
- driveEnergy**2
+ transitionLinewidth**2 / 4
)
** 2
+ transitionLinewidth**2 * driveEnergy**2
)
)
alpha0 += d
# vector polarsizavility
alpha1 += (
(-1)
* (self.j * (self.j + 1) + 2 - j1 * (j1 + 1))
* self.atom.getReducedMatrixElementJ(
self.n, self.l, self.j, n1, l1, j1, s=self.s
)
** 2
* (C_e * physical_constants["Bohr radius"][0]) ** 2
* driveEnergy
* (
transitionEnergy**2
- driveEnergy**2
- transitionLinewidth**2 / 4
)
/ (
(
transitionEnergy**2
- driveEnergy**2
+ transitionLinewidth**2 / 4
)
** 2
+ transitionLinewidth**2 * driveEnergy**2
)
)
# tensor polarizability vanishes for j=1/2 and j=0 states
# because Wigner6j is then zero
if self.j > 0.6:
alpha2 += (
(-1) ** (self.j + j1 + 1)
* self.atom.getReducedMatrixElementJ(
self.n, self.l, self.j, n1, l1, j1, s=self.s
)
** 2
* (C_e * physical_constants["Bohr radius"][0]) ** 2
* Wigner6j(self.j, 1, j1, 1, self.j, 2)
* (coupledLevelEnergy - initialLevelEnergy)
/ (
(coupledLevelEnergy - initialLevelEnergy) ** 2
- driveEnergy**2
)
)
alpha0 = 2.0 * alpha0 / (3.0 * (2.0 * self.j + 1.0))
alpha0 = alpha0 / C_h # Hz m^2 / V^2
alpha1 = prefactor1 * alpha1 / C_h
alpha2 = -4 * prefactor2 * alpha2 / C_h
# core polarizability -> assumes static polarisability
alphaC = self.atom.alphaC * 2.48832e-8 # convert to Hz m^2 / V^2
# podermotive shift
driveOmega = 2 * np.pi / driveWavelength * C_c
alphaP = C_e**2 / (2 * C_m_e * driveOmega**2 * C_h)
if units == "SI":
return (
alpha0,
alpha1,
alpha2,
alphaC,
alphaP,
closestState,
) # in Hz m^2 / V^2
elif units == "a.u." or units == "au":
return (
alpha0 / 2.48832e-8,
alpha1 / 2.48832e-8,
alpha2 / 2.48832e-8,
alphaC / 2.48832e-8,
alphaP / 2.48832e-8,
closestState,
)
else:
raise ValueError(
"Only 'SI' and 'a.u' (atomic units) are recognised"
" as 'units' parameter. Entered value '%s' is"
" not recognised." % units
)
def plotPolarizability(
self,
wavelengthList,
mj=None,
addToPlotAxis=None,
line="b-",
units="SI",
addCorePolarisability=True,
addPondermotivePolarisability=False,
accountForStateLifetime=False,
debugOutput=False,
):
r"""
Plots of polarisability for a range of wavelengths.
Can be combined for different states to allow finding magic wavelengths
for pairs of states. Currently supports only driving with
linearly polarised light. See example
`magic wavelength snippet`_.
.. _`magic wavelength snippet`:
../ARC_3_0_introduction.html#Calculations-of-dynamic-polarisability-and-magic-wavelengths-for-optical-traps
Parameters:
wavelengthList (array): wavelengths for which we want to calculate
polarisability (in units of m).
mj (float): optional, `mj` projection of the total angular
momenutum for the states for which we are calculating
polarisability. By default it's `+j`.
line (string): optional, line style short definition to be passed
to matplotlib when plotting calculated polarisabilities
units (string): optional, 'SI' or 'a.u.' (equivalently 'au'),
switches between SI units for returned result
(:math:`Hz V^-2 m^2` )
and atomic units (":math:`a_0^3` "). Deafault 'SI'.
addCorePolarisability (bool): optional, should ionic core
polarisability be taken into account. By default True.
addPondermotivePolarisability (bool): optional, should pondermotive
polarisability (also called free-electron polarisability)
be added to the total polarisability. Default is
False. It assumes that there is no significant variation of
trapping field intensity over the range of the electric cloud.
If this condition is not satisfied, one has to calculate
total shift as average over the electron wavefunction.
accountForStateLifetime (bool): optional, should we account
for finite transition linewidths caused by finite state
lifetimes. By default False.
debugOutput (bool): optonal. Print additional output on resonances
Default value False.
"""
pFinal = []
wFinal = []
p = []
w = []
resonances = []
if mj is None:
mj = self.j
if self.j > 0.5 + 0.1:
tensorPrefactor = (3 * mj**2 - self.j * (self.j + 1)) / (
self.j * (2 * self.j - 1)
)
else:
tensorPrefactor = 0
for wavelength in wavelengthList:
(
scalarP,
vectorP,
tensorP,
coreP,
pondermotiveP,
state,
) = self.getPolarizability(
wavelength,
accountForStateLifetime=accountForStateLifetime,
units=units,
mj=mj,
)
if scalarP is not None:
# we are not hitting directly the resonance
totalP = scalarP + tensorPrefactor * tensorP
if addCorePolarisability:
totalP += coreP
if addPondermotivePolarisability:
# Subtract pondermotive contribution since the sign convention
# is opposite to that of the dynamical polarizability.
totalP -= pondermotiveP
if (
(len(p) > 0)
and p[-1] * totalP < 0
and (len(p) > 2 and (p[-2] - p[-1]) * totalP > 0)
):
pFinal.append(p)
wFinal.append(w)
p = []
w = []
resonances.append(wavelength)
if debugOutput:
print(
r"Resonance: %.2f nm %s"
% (
wavelength * 1e9,
printStateString(
state[0], state[1], state[2], s=self.s
),
)
)
p.append(totalP)
w.append(wavelength)
pFinal.append(p)
wFinal.append(w)
if addToPlotAxis is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
else:
ax = addToPlotAxis
for i in range(len(wFinal)):
ax.plot(np.array(wFinal[i]) * 1e9, pFinal[i], line, zorder=1)
ax.set_xlabel(r"Driving field wavelength (nm)")
if units == "SI":
ax.set_ylabel(r"Polarizability (Hz/V$^2$ m$^2$)")
else:
ax.set_ylabel(r"Polarizability (a.u.)")
for resonance in resonances:
ax.axvline(
x=resonance * 1e9, linestyle=":", color="0.5", zorder=0
)
return ax
class StarkBasisGenerator:
"""
Base class for determining the basis of the Rydberg manifold and
associated properties.
Defines logic for determining the basis of states to include
in a calculation and obtains the energy levels and dipole moments
to build the Hamiltonian from the provided ARC atom.
This class should be inherited from to create a specific calculation.
Args:
atom (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`): ={
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Select the alkali metal for energy level
diagram calculation
"""
def __init__(self, atom):
UsedModulesARC.ac_stark = True
self.atom = atom
"""
Instance of an ARC atom to perform calculations of the energy levels and coupling strengths.
"""
# basis definitions
self.basisStates = []
"""
List of basis states for calculation in the form [ [n,l,j,mj], ...].
Calculated by :obj:`defineBasis` .
"""
self.indexOfCoupledState = None
"""
Index of coupled state (initial state passed to :obj:`defineBasis`)
in :obj:`basisStates` list of basis states
"""
self.targetState = []
"""
Target state. Found by :obj:`basisStates`[:obj:`indexOfCoupledState`].
"""
self.bareEnergies = []
"""
`bareEnergies` is list of energies corresponding to :obj:`basisStates`.
It is calculated in :obj:`defineBasis` in the basis of :obj:`basisStates` in
units of GHz.
"""
self.targetEnergy = None
"""
`targetEnergy` stores the energy of the target state (initial state passed
to :obj:`defineBasis`)
"""
self.n = None
"""
Stores the principle quantum number of the target state
"""
self.l = None
"""
Stores the orbital quantum number of the target state
"""
self.j = None
"""
Stores the total angular momentum number of the target state
"""
self.mj = None
"""
Stores the projection of the total angular moment of the target state
"""
self.s = None
"""
Stores the total spin angular momentum of the target state
"""
self.nMin = None
"""
Stores the minimum n to consider for the basis
"""
self.nMax = None
"""
Stores the maximum n to consider for the basis
"""
self.maxL = None
"""
Stores the max L to consider for the basis
"""
self.Bz = None
"""
Stores the applied magnetic field used to Zeeman shift states in the basis
"""
self.q = None
"""
Stores polarization of electric field for determining dipole coupled states.
"""
# hamiltonian components
self.H = []
"""
Diagonal elements of Stark-matrix. Not to be confused with :obj:`H0` for the
Time-Independant Formulation of the Floquet Hamiltonian. Given in units of
GHz.
"""
self.V = []
"""
off-diagonal elements of Stark-matrix divided by electric
field value. To get off diagonal elemements multiply this matrix
with electric field value. Full DC Stark matrix is obtained as
`fullStarkMatrix` = :obj:np.diag(`bareEnergies`) + :obj:`V` *`eField`. Calculated by
:obj:`defineBasis` in the basis :obj:`basisStates` in units of GHz/(V/m).
"""
# STARK memoization
self.eFieldCouplingSaved = False
def _eFieldCouplingDivE(self, n1, l1, j1, mj1, n2, l2, j2, mj2, s=0.5):
# eFied coupling devided with E (witout actuall multiplication to getE)
# delta(mj1,mj2') delta(l1,l2+-1)
if (abs(mj1 - mj2) > 0.1) or (abs(l1 - l2) != 1):
return 0
# matrix element
result = (
self.atom.getRadialMatrixElement(n1, l1, j1, n2, l2, j2, s=s)
* physical_constants["Bohr radius"][0]
* C_e
)
sumPart = self.eFieldCouplingSaved.getAngular(
l1, j1, mj1, l2, j2, mj2, s=s
)
return result * sumPart
def _eFieldCoupling(self, n1, l1, j1, mj1, n2, l2, j2, mj2, eField, s=0.5):
return (
self._eFieldCouplingDivE(n1, l1, j1, mj1, n2, l2, j2, mj2, s=s)
* eField
)
def _onePhotonCoupling(self, ns, ls, js, mjs, nt, lt, jt, mjt, q, s=0.5):
"""
Tests if state s can be dipole coupled with a single photon
to target state t.
Given ss==st, true only for
Delta-l==+-1 and (Delta-l==Delta-j or
Delta-j=0 and j=l+s for either state) transitions.
Args:
ns (int): principle quantum number of potentially coupled state
ls (int): orbital quantum number of potentially coupled state
js (float): total angular quantum number of potentially coupled state
mjs (float): projection of total angular momentum of potentially coupled state
nt (int): principle quantum number of target state
lt (int): orbital quantum number of target state
jt (float): total angular quantum number of target state
mjt (float): projection of total angular momentum of target state
q (int): polarization of coupling field, must be -1,0,1
s (float, optional): total spin angular momentum quantum number.
Defaults to 1/2, appropriate for alkali atoms.
Returns:
bool: True if transition is electric dipole allowed via a single photon
"""
# ignore the target state
if (ns == nt) and (ls == lt) and (js == jt) and (mjs == mjt):
return False
# transitions that change l by 1
elif (abs(ls - lt) == 1) and (mjs - mjt == q):
if ls - lt == js - jt:
return True
elif (js == jt) and ((js == ls + s) or (jt == lt + s)):
return True
else:
return False
else:
return False
def _twoPhotonCoupling(self, ns, ls, js, mjs, nt, lt, jt, mjt, q, s=0.5):
"""
Tests if states can be dipole coupled with two photons.
Args:
ns (int): principle quantum number of potentially coupled state
ls (int): angular quantum number of potentially coupled state
js (float): total angular quantum number of potentially coupled state
mjs (float): projection of total angular momentum of potentially coupled state
nt (int): principle quantum number of target state
lt (int): angular quantum number of target state
jt (float): total angular quantum number of target state
mjt (float): projection of total angular momentum of target state
q (int): polarization of coupling light, must be -1,0,1
s (float, optional): total spin angular momentum quantum number.
Defaults to 1/2, appropriate for alkali atoms.
Returns:
bool: True if two photon coupling between states
"""
# ignore target state
if (ns == nt) and (ls == lt) and (js == jt) and (mjs == mjt):
return False
# transitions that change l by 2
elif (
(abs(ls - lt) == 2)
and (ls - lt == js - jt)
and ((mjs - mjt) / 2 == q)
):
return True
# transitions that don't change l
elif ((ls - lt) == 0) and (js == jt) and ((mjs - mjt) / 2 == q):
return True
else:
return False
def defineBasis(
self,
n,
l,
j,
mj,
q,
nMin,
nMax,
maxL,
Bz=0,
edN=0,
progressOutput=False,
debugOutput=False,
s=0.5,
):
"""
Initializes basis of states around state of interest
Defines basis of states for further calculation. :math:`n,l,j,m_j`
specify target state whose neighbourhood and shifts we want to explore.
Other parameters specify breadth of basis.
This method stores basis in :obj:`basisStates`,
then calculates the interaction Hamiltonian of the system.
Args:
n (int): principal quantum number of the state
l (int): angular orbital momentum of the state
j (flaot): total angular momentum of the state
mj (float): projection of total angular momentum of the state
q (int): polarization of coupling field is spherical basis.
Must be -1, 0, or 1: corresponding to sigma-, pi, sigma+
nMin (int): *minimal* principal quantum number of the states to
be included in the basis for calculation
nMax (int): *maximal* principal quantum number of the states to
be included in the basis for calculation
maxL (int): *maximal* value of orbital angular momentum for the
states to be included in the basis for calculation
Bz (float, optional): magnetic field directed along z-axis in
units of Tesla. Calculation will be correct only for weak
magnetic fields, where paramagnetic term is much stronger
then diamagnetic term. Diamagnetic term is neglected.
edN (int, optional): Limits the basis
to electric dipole transitions of the provided photon number.
Default of 0 means include all states. Setting to 1 means
only include single-photon dipole-allowed transitions.
Setting to 2 means include up to 2 photon transitions.
Higher numbers not supported.
progressOutput (:obj:`bool`, optional): if True prints the
progress of calculation; Set to false by default.
debugOutput (:obj:`bool`, optional): if True prints additional
information usefull for debuging. Set to false by default.
s (float, optional): Total spin angular momentum for the state.
Default value of 0.5 is correct for Alkaline Atoms, but
value **has to** be specified explicitly for divalent atoms
(e.g. `s=0` or `s=1` for singlet and triplet states,
that have total spin angular momenutum equal to 0 or 1
respectively).
"""
# save calculation details START
self.n = n
self.l = l
self.j = j
self.mj = mj
self.q = q
if edN in [0, 1, 2]:
self.edN = edN
else:
raise ValueError("EN must be 0, 1, or 2")
self.nMin = nMin
self.nMax = nMax
self.maxL = maxL
self.Bz = Bz
self.s = s
# save calculation details END
self._findBasisStates(progressOutput, debugOutput)
self._buildHamiltonian(progressOutput, debugOutput)
def _findBasisStates(self, progressOutput=False, debugOutput=False):
"""
Creates the list of basis states we want to include.
Details about calculation are taken from class attributes.
Results saved to class attributes are: :obj:`basisStates`,
:obj:`indexOfCoupledState`, and :obj:`targetState`.
Args:
progressOutput (bool, optional): Whether to print calculation progress.
debugOutput (bool, optional): Whether to print debug information.
"""
states = []
n = self.n
l = self.l
j = self.j
mj = self.mj
q = self.q
s = self.s
edN = self.edN
nMin = self.nMin
nMax = self.nMax
maxL = self.maxL
# track where target state is inserted in this list
indexOfCoupledState = 0
index = 0
for tn in range(nMin, nMax):
for tl in range(min(maxL + 1, tn)):
for tj in np.linspace(tl - s, tl + s, round(2 * s + 1)):
# ensure we add the target state
if (n == tn) and (l == tl) and (j == tj):
states.append([tn, tl, tj, mj])
indexOfCoupledState = index
# adding all manifold states
elif (
(edN == 0)
and (abs(mj) + q - 0.1 <= tj)
and (
tn >= self.atom.groundStateN
or [tn, tl, tj] in self.atom.extraLevels
)
):
states.append([tn, tl, tj, mj + q])
index += 1
# add states that are electric dipole allowed
elif (edN == 1 or edN == 2) and self._onePhotonCoupling(
n, l, j, mj, tn, tl, tj, mj + q, q, s
):
states.append([tn, tl, tj, mj + q])
index += 1
# add states that are electric dipole allowed via 2-photon transition
elif edN == 2 and self._twoPhotonCoupling(
n, l, j, mj, tn, tl, tj, mj + 2 * q, q, s
):
states.append([tn, tl, tj, mj + 2 * q])
index += 1
dimension = len(states)
if progressOutput:
print("Found ", dimension, " states.")
if debugOutput:
print(states)
print("Index of initial state")
print(indexOfCoupledState)
print("Initial state = ")
print(states[indexOfCoupledState])
# save info about states
self.basisStates = states
self.indexOfCoupledState = indexOfCoupledState
self.targetState = states[indexOfCoupledState]
def _buildHamiltonian(self, progressOutput=False, debugOutput=False):
"""
Creates the base matrices needed to produce the Floquet Hamiltonians.
Details about calculation are taken from class attributes.
Matrices correspond to two parts: field dependent and independent.
Results saved to class attributes are: :obj:`bareEnergies`,
:obj:`H`, and :obj:`V`.
Args:
progressOutput (bool, optional): Whether to print calculation progress.
debugOutput (bool, optional): Whether to print debug information.
"""
global wignerPrecal
wignerPrecal = True
self.eFieldCouplingSaved = _EFieldCoupling()
dimension = len(self.basisStates)
states = self.basisStates
indexOfCoupledState = self.indexOfCoupledState
self.bareEnergies = np.zeros((dimension), dtype=np.double)
self.V = np.zeros((dimension, dimension), dtype=np.double)
if progressOutput:
print("Generating matrix...")
progress = 0.0
for ii in range(dimension):
if progressOutput:
progress += (dimension - ii) * 2 - 1
print(f"{progress/dimension**2:.0%}", end="\r")
# add diagonal element
self.bareEnergies[ii] = (
self.atom.getEnergy(
states[ii][0], states[ii][1], states[ii][2], s=self.s
)
* C_e
/ C_h
+ self.atom.getZeemanEnergyShift(
states[ii][1],
states[ii][2],
states[ii][3],
self.Bz,
s=self.s,
)
/ C_h
)
# add off-diagonal element
for jj in range(ii + 1, dimension):
coupling = (
0.5
* self._eFieldCouplingDivE(
states[ii][0],
states[ii][1],
states[ii][2],
self.mj,
states[jj][0],
states[jj][1],
states[jj][2],
self.mj,
s=self.s,
)
/ C_h
)
self.V[jj][ii] = coupling
self.V[ii][jj] = coupling
self.H = np.diag(self.bareEnergies)
if progressOutput:
print("\nEnergies and Couplings Generated")
if debugOutput:
print(np.diag(self.bareEnergies) + self.V)
# save info about target state
self.targetEnergy = self.bareEnergies[indexOfCoupledState]
if debugOutput:
print("Target State:", self.targetState, self.targetEnergy)
self.atom.updateDipoleMatrixElementsFile()
self.eFieldCouplingSaved._closeDatabase()
self.eFieldCouplingSaved = False
class ShirleyMethod(StarkBasisGenerator):
"""
Calculates Stark Maps for a single atom in a single oscillating field
Uses Shirley's Time Independent Floquet Hamiltonian Method [1]_.
More detail can be found in the review of Semiclassical Floquet Theories
by Chu [2]_ and its application in Meyer et al [3]_.
For examples demonstrating basic usage
see `Shirley Method Examples`_.
Args:
atom (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`): ={
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Select the alkali metal for energy level
diagram calculation
Examples:
AC Stark Map calculation
>>> from arc import Rubidium85
>>> from ACStarkMap import ACStarkMap
>>> calc = ACStarkMap(Rubidium85())
>>> calc.defineBasis(56, 2, 2.5, 0.5, 45, 70, 10)
>>> calc.defineShirleyHamiltonian(fn=1)
>>> calc.diagonalise(0.01, np.linspace(1.0e9, 40e9, 402))
>>> print(calc.targetShifts.shape)
(402,)
References:
.. [1] J. H. Shirley, Physical Review **138**, B979 (1965)
https://link.aps.org/doi/10.1103/PhysRev.138.B979
.. [2] Shih-I Chu, "Recent Developments in Semiclassical Floquet Theories for Intense-Field Multiphoton Processes",
in Adv. At. Mol. Phys., vol. 21 (1985)
http://www.sciencedirect.com/science/article/pii/S0065219908601438
.. [3] D. H. Meyer, Z. A. Castillo, K. C. Cox, P. D. Kunz, J. Phys. B: At. Mol. Opt. Phys., **53**, 034001 (2020)
https://doi.org/10.1088/1361-6455/ab6051
.. _`Shirley Method Examples`:
./AC_Stark_primer.html#Shirley's-Time-Independent-Floquet-Hamiltonian
"""
def __init__(self, atom):
UsedModulesARC.ac_stark = True
super().__init__(atom)
# Shirley Floquet Hamiltonian components
self.fn = None
"""
Saves rank of Floquet Hamiltonian expansion.
Only fn+1 photon processes are accurately accounted for in the diagonalisation.
"""
self.H0 = []
"""
diagonal elements of Floquet-matrix (detuning of states) calculated by
:obj:`defineShirleyHamiltonian`
with units GHz relative to ionization energy. It is a 'csr' sparse matrix.
"""
self.B = []
"""
off-diagonal elements of Floquet Hamiltonian.
Get final matrix by multiplying by the electric field amplitude in V/m.
Calculated by :obj:`defineShirleyHamiltonian`.
"""
self.dT = []
"""
diagonal prefactors of frequency elements of Floquet Hamiltonian.
To get diagonal elements multiply this matrix diagonal by electric field
frequency. Calculated by :obj:`defineShirleyHamiltonian`
and is unitless. Multiplying frequency should be in GHz.
"""
# calculation inputs
self.eFields = None
"""
Saves electric field (in units of V/m) for which energy levels vs frequency are calculated
See also:
:obj:`diagonalise`
"""
self.freqs = None
"""
Saves frequency (in units of Hz) for which energy levels vs electric field are calculated
See also:
:obj:`diagonalise`
"""
# calculation outputs
self.eigs = []
"""
Array of eigenValues corresponding to the energies of the atom states for the
electric field `eField` at the frequency `freq`. In units of Hz.
"""
self.eigVectors = []
"""
Array of eigenvectors corresponding to the eigenValues of the solve.
"""
self.transProbs = []
"""
Probability to transition from the target state to another state in the basis.
"""
self.targetShifts = []
"""
This is the shift of the target state relative to the zero field energy for an applied
field of :obj:`eField` and :obj:`freq`. Given in units of Hz.
"""
def defineShirleyHamiltonian(self, fn, debugOutput=False):
"""
Create the Shirley time-independent Floquet Hamiltonian.
Uses :obj:`~StarkBasisGenerator.bareEnergies` and
:obj:`~StarkBasisGenerator.V` from :class:`StarkBasisGenerator` to build.
Matrix is stored in three parts.
First part is diagonal electric-field independent part stored in :obj:`H0`,
while the second part :obj:`B` corresponds to off-diagonal elements
that are propotional to electric field amplitude.
The third part is the diagonal Floquet expansion proportional
to electric field frequency.
Overall interaction matrix for electric field `eField` and `freq`
can be then obtained from A B blocks
``A`` = :obj:`H0` + :obj:`dT` * ``freq`` and
``B`` = :obj:`B` * ``eField``.
These matrices are saved as sparse CSR to facilitate calculations
and minimize memory footprint.
Args:
fn (int): rank of Floquet Hamiltonian expansion. Only fn+1
multi-photon processes are accurately accounted for.
"""
self.fn = fn
if not fn >= 1:
raise ValueError(
"Floquet expansion must be greater than 1."
+ " Rank of 0 is equivalent to rotating wave approximation"
+ " solution and is not covered by this method."
)
dimension = len(self.bareEnergies)
# create the sparse building blocks for the Floquet Hamiltonian
# ensure everything is converted to csr format for efficient math
self.H0 = sp.diags(np.tile(self.bareEnergies, 2 * fn + 1)).tocsr()
self.dT = sp.block_diag(
[
sp.diags([i], 0, shape=(dimension, dimension))
for i in range(-fn, fn + 1, 1)
],
dtype=np.double,
).tocsr()
self.B = sp.bmat(
[
[
self.V if abs(i - j) == 1 else None
for i in range(-fn, fn + 1, 1)
]
for j in range(-fn, fn + 1, 1)
],
dtype=np.double,
).tocsr()
if debugOutput:
print(self.H0.shape, self.dT.shape, self.B.shape)
print(self.H0[(0, 0)], self.dT[(0, 0)], self.B[(0, 0)])
def diagonalise(
self, eFields, freqs, progressOutput=False, debugOutput=False
):
"""
Finds atom eigenstates versus electric field and driving frequency
Eigenstates are calculated for the outer product `eFields` and `freqs`.
Inputs are saved in class attributes
:obj:`eFields`, :obj:`freqs`.
Resulting sorted eigenvalues, eigenvectors, transition probabilities, and target state shifts
are saved in the class attributes
:obj:`eigs`, :obj:`eigVectors`, :obj:`transProbs` and :obj:`targetShifts`.
Function automatically produces the outer product space of the inputs.
For example, if `eFields` has two elements and `freqs` and 10,
the output shifts will have a shape of `(2,10)`.
If one of the inputs is a single value,
that dimension is squeezed out.
Args:
eFields (float or sequence of floats): electric field strengths (in V/m)
for which we want to know energy eigenstates
freqs (float or sequence of floats): driving frequency (in Hz)
for which we want to know energy eigenstates
progressOutput (bool, optional): if True prints the
progress of calculation; Set to false by default.
debugOutput (bool, optional): if True prints additional
information usefull for debuging. Set to false by default.
"""
# get basic info about solve structure from class
dim0 = len(self.basisStates)
targetEnergy = self.targetEnergy
# ensure inputs are numpy arrays, if scalars, 0d-arrays
self.eFields = np.array(eFields, ndmin=1)
self.freqs = np.array(freqs, ndmin=1)
# pre-allocation of results array
eig = np.zeros(
(*self.eFields.shape, *self.freqs.shape, dim0 * (2 * self.fn + 1)),
dtype=np.double,
)
eigVec = np.zeros(
(
*self.eFields.shape,
*self.freqs.shape,
dim0 * (2 * self.fn + 1),
dim0 * (2 * self.fn + 1),
),
dtype=np.complex128,
)
transProbs = np.zeros(
(*self.eFields.shape, *self.freqs.shape, dim0), dtype=np.double
)
targetShifts = np.zeros(
(*self.eFields.shape, *self.freqs.shape), dtype=np.double
)
if progressOutput:
print("Finding eigenvectors...")
# create numpy iterator object
it = np.nditer(
[self.eFields, self.freqs],
flags=["multi_index"],
op_flags=[["readonly"], ["readonly"]],
op_axes=[
list(range(self.eFields.ndim)) + [-1] * self.freqs.ndim,
[-1] * self.eFields.ndim + list(range(self.freqs.ndim)),
],
)
with it:
for field, freq in it:
if progressOutput:
print(f"{(it.iterindex+1)/it.itersize:.0%}", end="\r")
# define the Shirley Hamiltonian for this combo of field and frequency
Hf = self.H0 + self.dT * freq + self.B * field
# convert Hf to dense array to get all eigenvectors
ev, egvector = eigh(Hf.toarray())
# save the eigenvalues and eigenvectors
eig[it.multi_index] = ev
eigVec[it.multi_index] = egvector
# get transition probabilities from target state to other basis states
# index of first basis state in k=0 block diagonal
refInd = self.fn * dim0
# index of target state in basis
tarInd = self.indexOfCoupledState + refInd
transProbs[it.multi_index] = np.array(
[
np.sum(
[
np.abs(
np.conj(egvector[refInd + k * dim0 + i])
* egvector[tarInd]
)
** 2
for k in range(-self.fn, self.fn + 1, 1)
]
)
for i in range(0, dim0, 1)
]
)
# get the target shift by finding the max overlap with the target state
evInd = np.argmax(
np.abs(egvector[tarInd].conj() * egvector[tarInd]) ** 2
)
if np.count_nonzero(ev == ev[evInd]) > 1:
warnings.warn(
"Multiple states have same overlap with target. Only saving first one."
)
targetShifts[it.multi_index] = targetEnergy - ev[evInd]
if debugOutput:
print(f"E field {field:.5f} V/m, Freq {freq*1e-9:.3f} GHz")
print(
f"Eigenvalue with largest overlap of target state {evInd}: {ev[evInd]*1e-9:.3f} GHz"
)
print(f"Shift: {(targetEnergy-ev[evInd])*1e-9:.3e} GHz")
print(f"Eigenstate: {egvector[evInd]}")
# squeeze out unused dimensions corresponding to single element inputs
self.eigs = eig.squeeze()
self.eigVectors = eigVec.squeeze()
self.transProbs = transProbs.squeeze()
self.targetShifts = targetShifts.squeeze()
class RWAStarkShift(StarkBasisGenerator):
"""
Approximately calculates Stark Maps for a single atom in a single oscillating field
Assumes the rotating wave approximation applies independently for the
field interaction with all possible dipole transitions.
Approximation is generally reasonable for weak driving fields such
that no more than a single resonance contributes significantly
to the overall Stark shift.
When field is far-detuned from all transitions,
error tends to a factor of 2.
For an example of usage and comparison to other methods
see `RWAStarkShift Example`_.
Args:
atom (:obj:`AlkaliAtom`): ={ :obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium` }
Select the alkali metal for energy level
diagram calculation
Examples:
Approximate AC Stark Map calculation
>>> from arc import Rubidium85, RWAStarkShift
>>> calc = RWAStarkShift(Rubidium85())
>>> calc.defineBasis(56, 2, 2.5, 0.5, 45, 70, 10)
>>> calc.findDipoleCoupledStates()
>>> calc.makeRWA(0.01, np.linspace(1.0e9, 40e9, 402))
>>> print(calc.starkShifts.shape)
(402,)
.. _`RWAStarkShift Example`:
./AC_Stark_primer.html#RWAStarkShift:-Approximating-AC-Stark-Map-Calculations
"""
def __init__(self, atom):
UsedModulesARC.ac_stark = True
super().__init__(atom)
self.dipoleCoupledStates = []
"""
List of basis states that are dipole coupled to the target state.
This is a subset of :obj:`~StarkBasisGenerator.basisStates`.
"""
self.dipoleCoupledFreqs = []
"""
Transition frequencies in Hz between :obj:`targetState` and :obj:`dipoleCoupledStates`.
"""
self.starkShifts = []
"""
Saves results of :obj:`makeRWA` caclulations.
"""
def findDipoleCoupledStates(self, debugOutput=False):
r"""
Finds the states in :obj:`basisStates` that directly couple to
:obj:`targetState` via single photon electric dipole transitions.
Saves the states and their detunings relative to :obj:`targetState`
to :obj:`dipoleCoupledStates` and :obj:`dipoleCoupledFreqs`.
Args:
q (int): laser polarization (-1,0,1 corresponds to :math:`\sigma^-`
:math:`\pi` and :math:`\sigma^+` respectively)
"""
coupledStates = []
coupledFreqs = []
for i, st in enumerate(self.basisStates):
if self._onePhotonCoupling(
self.n,
self.l,
self.j,
self.mj,
st[0],
st[1],
st[2],
self.mj + self.q,
self.q,
self.s,
):
coupledStates.append(st)
coupledFreqs.append(self.targetEnergy - self.bareEnergies[i])
self.dipoleCoupledStates = coupledStates
self.dipoleCoupledFreqs = np.array(coupledFreqs)
if debugOutput:
print(f"Found {len(coupledStates):d} dipole coupled states")
print(
f"Nearest dipole coupled state is detuned by: {np.abs(self.dipoleCoupledFreqs).min()*1e-9:.3f} GHz"
)
def _getRabiFrequency2_broadcast(
self, n1, l1, j1, mj1, n2, l2, j2, q, electricFieldAmplitude, s=0.5
):
eFields = np.array(electricFieldAmplitude, ndmin=1)
rabis = np.array(
[
self.atom.getRabiFrequency2(
n1, l1, j1, mj1, n2, l2, j2, q, eField, s
)
for eField in eFields
]
)
return rabis
def makeRWA(self, efields, freqs, maxRes=0.0, zip_inputs=False):
"""
Calculates the total Rotating-Wave Approximation AC stark shift
Interaction is between :obj:`targetState` with each :obj:`dipoleCoupledStates` ``[i]``.
Resulting shifts are saved in Hz to :obj:`starkShifts` .
Function automatically produces the outer product space of the inputs.
For example, if `eFields` has two elements and `freqs` and 10,
the output shifts will have a shape of `(2,10)`.
If one of the inputs is a single value,
that dimension is squeezed out.
:obj:`findDipoleCoupledStates` must be run fist.
Args:
eFields (float or sequence of floats): electric field amplitude in V/m
freqs (float or sequence of floats): electric field frequency in Hz
maxRes (float, optional): only include dipole transitions with frequences
less than this. Specified in Hz.
zip_inputs (bool, optional): Causes the calculation to zip the inputs
instead of an outer product. Inputs must be of equal shape when `True`.
Default is `False`.
"""
# ensure inputs are numpy arrays, even if single values
eFields = np.array(efields, ndmin=1)
Freqs = np.array(freqs, ndmin=1)
if zip_inputs:
if freqs.shape != eFields.shape:
raise ValueError("Zipped inputs must have same shape")
delta_slice = np.s_[:]
Omega_slice = np.s_[:]
starkShift = np.zeros(Freqs.shape, dtype=np.double)
else:
delta_slice = np.s_[np.newaxis, :]
Omega_slice = np.s_[:, np.newaxis]
starkShift = np.zeros(
(*eFields.shape, *Freqs.shape), dtype=np.double
)
if maxRes != 0.0:
inds = np.where(
(self.dipoleCoupledFreqs > -maxRes)
& (self.dipoleCoupledFreqs < maxRes)
)
states = [self.dipoleCoupledStates[i] for i in inds[0]]
else:
states = self.dipoleCoupledStates
print(
f"Calculating RWA Stark Shift approximation with {len(states):d} levels"
)
for st in states:
Omega = (
self._getRabiFrequency2_broadcast(
*self.targetState, *st[:-1], self.q, eFields
)
/ 2
/ np.pi
)
trans = self.atom.getTransitionFrequency(
*self.targetState[:-1], *st[:-1]
)
if trans > 0.0:
delta = -(trans - freqs)
else:
delta = -(trans + freqs)
starkShiftplus = 0.5 * (
delta[delta_slice]
+ np.sqrt(delta[delta_slice] ** 2 + Omega[Omega_slice] ** 2)
)
starkShiftminus = 0.5 * (
delta[delta_slice]
- np.sqrt(delta[delta_slice] ** 2 + Omega[Omega_slice] ** 2)
)
starkShift += np.where(delta < 0.0, starkShiftplus, starkShiftminus)
self.starkShifts = starkShift.squeeze() | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/calculations_atom_single.py | calculations_atom_single.py |
from __future__ import division, print_function, absolute_import
__version__ = "3.3.0"
__all__ = [
"AlkaliAtom",
"printState",
"printStateString",
"printStateStringLatex",
"printStateLetter",
"formatNumberSI",
"Hydrogen",
"Caesium",
"Cesium",
"Rubidium85",
"Rubidium",
"Rubidium87",
"Lithium6",
"Lithium7",
"Sodium",
"Potassium",
"Potassium39",
"Potassium40",
"Potassium41",
"Strontium88",
"Calcium40",
"Ytterbium174",
"Ylm",
"Wavefunction",
"StarkMap",
"LevelPlot",
"AtomSurfaceVdW",
"OpticalLattice1D",
"DynamicPolarizability",
"StarkBasisGenerator",
"ShirleyMethod",
"RWAStarkShift",
"PairStateInteractions",
"StarkMapResonances",
"Wigner3j",
"Wigner6j",
"TriaCoeff",
"CG",
"WignerDmatrix",
"C_k",
"C_c",
"C_h",
"C_e",
"C_m_e",
"getCitationForARC",
]
from arc.alkali_atom_functions import (
AlkaliAtom,
printState,
printStateString,
printStateStringLatex,
printStateLetter,
formatNumberSI,
)
from arc.alkali_atom_data import (
Hydrogen,
Caesium,
Cesium,
Rubidium85,
Rubidium,
Rubidium87,
Lithium6,
Lithium7,
Sodium,
Potassium,
Potassium39,
Potassium40,
Potassium41,
)
from arc.divalent_atom_data import Strontium88, Calcium40, Ytterbium174
from arc.calculations_atom_single import (
Ylm,
Wavefunction,
StarkMap,
LevelPlot,
AtomSurfaceVdW,
OpticalLattice1D,
DynamicPolarizability,
StarkBasisGenerator,
ShirleyMethod,
RWAStarkShift,
)
from arc.calculations_atom_pairstate import (
PairStateInteractions,
StarkMapResonances,
)
from arc.wigner import Wigner3j, Wigner6j, TriaCoeff, CG, WignerDmatrix
from arc._database import getCitationForARC
from scipy.constants import k as C_k
from scipy.constants import c as C_c
from scipy.constants import h as C_h
from scipy.constants import e as C_e
from scipy.constants import m_e as C_m_e | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/__init__.py | __init__.py |
from __future__ import print_function, absolute_import
import sys
if sys.version_info > (2,):
xrange = range
import numpy as np
from .alkali_atom_functions import printStateString, C_e, C_h, pi
def htmlLiteratureOutput(v, ref):
print(
"<div class='lit'><p>Literature values<p>Radial part of dipole matrix element: %.3f</p>"
% v
)
typeOfSource = "experimental value"
if ref[0] == 1:
typeOfSource = "theoretical value"
print(
"<p>Source: <a class='link' target='_blank' href='http://dx.doi.org/%s'>%s</a>, %s (%s) </p>"
% (ref[4], ref[3], typeOfSource, ref[2])
)
print("</div>")
def rabiFrequencyWidget(atom, n1, l1, j1, n2, l2, j2, laserPower, laserWaist):
sol = []
inputMj = '<p>Rabi frequency $=$ <span id="rabival">0</span><p><form id="polarization" onchange="myFunction()">'
inputMj += '<p>for driving from <select id="mj" onchange="myFunction()">'
index = 0
for mj1 in np.linspace(-j1, j1, int(round(2 * j1 + 1))):
inputMj += '<option value="%d">m_j = %d/2 ' % (
index,
int(round(2.0 * mj1)),
)
arr = []
for q in [-1, 0, 1]:
if abs(mj1 + q) - 0.1 < j2:
rabiFreq = atom.getRabiFrequency(
n1, l1, j1, mj1, n2, l2, j2, q, laserPower, laserWaist
) / (2 * pi)
arr.append(
"$2 \\pi \\times$"
+ printValueString(rabiFreq, "Hz", decimalPlaces=2)
)
else:
arr.append("not coupled")
sol.append(arr)
index += 1
inputMj += r'</select>\
<input type="radio" name="colors" id="sigma-" value="0" >$\sigma^-$ | \
<input type="radio" name="colors" id="pi" value="1" checked>$\pi$ |\
<input type="radio" name="colors" id="sigma+" value="2" >$\sigma^+$\
transition</p></form>'
script = "<script id='returnscript' type='text/javascript'>"
script = script + "var rabiFreq =" + str(sol) + "; "
script += 'function myFunction() {\
var mj = document.getElementById("mj").value;\
var p = 0;\
if (document.getElementById("sigma-").checked){\
p=0;\
}\
if (document.getElementById("pi").checked){\
p=1; \
}\
if (document.getElementById("sigma+").checked){\
p=2; \
}\
document.getElementById("rabival").innerHTML = rabiFreq[mj][p] ;\
MathJax.Hub.Queue(["Typeset",MathJax.Hub,"rabival"]);\
}\
document.getElementById("polarization").addEventListener("click", myFunction);\
myFunction();\
</script>'
return inputMj + script
def printValueString(value, unit, decimalPlaces=3):
prefix = ["f", "p", "n", "$\\mu$", "m", "", "k", "M", "G", "T"]
i = 5
sg = 1.0
if value < 0:
sg = -1.0
value = abs(value)
formatString = "%%.%df %%s%%s" % decimalPlaces
if value > 1000:
while (value > 1000) and (i < 9):
value = value * 1.0e-3
i += 1
return formatString % (sg * value, prefix[i], unit)
elif value < 1:
while (value < 1) and (i > 0):
value = value * 1.0e3
i -= 1
return formatString % (sg * value, prefix[i], unit)
else:
return formatString % (sg * value, "", unit)
def plotStarkMap(calc, units=1, xlim=[], ylim=[], filename=""):
originalState = calc.basisStates[calc.indexOfCoupledState]
n = originalState[0]
l = originalState[1]
j = originalState[2]
ax = webPlot()
x = []
y = []
yState = []
ax.xlabel = "E field (V/cm)"
coeff = 1.0
ax.ylabel = "Energy/h (GHz)"
if units == 1:
# in cm^{-1}
coeff = 0.03336 # conversion factor from GHz to cm^{-1}
ax.ylabel = "Energy/(h c) (cm^{-1})"
if ylim == []:
ylim = [
calc.atom.getEnergy(n, l, j) * C_e / C_h * 1e-9 * coeff - 10,
calc.atom.getEnergy(n, l, j) * C_e / C_h * 1e-9 * coeff + 10,
]
for br in xrange(len(calc.y)):
for i in xrange(len(calc.y[br])):
yt = calc.y[br][i] * coeff
if yt < ylim[1] and ylim[0] < yt:
x.append(calc.eFieldList[i])
y.append(yt)
yState.append(calc.highlight[br][i])
yState = np.array(yState)
sortOrder = yState.argsort(kind="heapsort")
x = np.array(x)
y = np.array(y)
x = x[sortOrder]
y = y[sortOrder]
yState = yState[sortOrder]
ct = "|< %s | \\mu > |^2" % printStateString(n, l, j)
ax.scatter(x / 100.0, y, c=yState, cmin=0, cmax=1, ctitle=ct)
if xlim == []:
xlim = [min(x) / 100.0, max(x) / 100.0]
ax.printPlot(
xlim=xlim, ylim=ylim, filename=filename, name="starkdiv1", height=600
)
return 0
def plotInteractionLevels(calc, xlim=[], ylim=[], filename=""):
ax = webPlot()
ax.xlabel = r"R (\mu m)"
ax.ylabel = r"\Delta E (GHz)"
if calc.drivingFromState[0] == 0:
# colouring is based on the contribution of the original pair state here
ct = r"|< %s %.1f , %s %.1f | \mu > |^2$" % (
printStateString(calc.n, calc.l, calc.j),
calc.m1,
printStateString(calc.nn, calc.ll, calc.jj),
calc.m1,
)
else:
# colouring is based on the coupling to different states
ct = r"\Omega_\mu/\Omega"
x = []
y = []
yState = []
for br in xrange(len(calc.y)):
for i in xrange(len(calc.y[br])):
x.append(calc.r[i])
y.append(calc.y[br][i])
yState.append(calc.highlight[br][i])
yState = np.array(yState)
sortOrder = yState.argsort(kind="heapsort")
x = np.array(x)
y = np.array(y)
x = x[sortOrder]
y = y[sortOrder]
yState = yState[sortOrder]
ax.scatter(x, y, c=yState, cmin=0, cmax=1, ctitle=ct)
ax.printPlot(xlim=xlim, ylim=ylim, filename=filename, name="levelintdiv")
return
class webPlot:
def __init__(self):
self.traces = []
self.layout = []
self.traceNo = 0
self.xlabel = ""
self.ylabel = ""
self.layoutx = ""
self.layouty = ""
self.title = ""
def plot(self, x, y, type, name=""):
np.set_printoptions(threshold=1e10)
self.traceNo += 1
temp = "{ x:" + np.array2string(x, separator=",") + ",\n"
temp = temp + "y: " + np.array2string(y, separator=",") + ",\n"
if type == ".":
temp += "mode: 'markers',\n marker: {size:5},\n"
elif type == "-":
temp += "mode: 'lines',\n"
temp += "name: '%s'" % name
temp += "}"
self.traces.append(temp)
def semilogx(self, x, y, type, name=""):
self.layoutx = "type:'log' ,\n\
tickformat :'.1e',\n "
self.plot(x, y, type, name)
def semilogy(self, x, y, type, name=""):
self.layouty = "type:'log' ,\n\
tickformat :'.1e',\n "
self.plot(x, y, type, name)
def scatter(self, x, y, c=[], cmin=0, cmax=1, ctitle="", name=""):
np.set_printoptions(threshold=1e10)
self.traceNo += 1
temp = (
"{ x:"
+ np.array2string(
x,
separator=",",
)
+ ",\n"
)
temp = temp + "y: " + np.array2string(y, separator=",") + ",\n"
temp += "name: '%s',\n" % name
if c != []:
temp = temp + " text: " + np.array2string(c, separator=",") + ",\n"
temp += "mode: 'markers',\n"
if c != []:
temp = (
temp
+ "marker:{\n\
color:"
+ np.array2string(c, separator=",")
+ ",\n\
cmin:%f,\n\
cmax:%f,\n\
showscale: true,\n\
colorbar:{\n\
title:'"
% (cmin, cmax)
+ str(ctitle)
+ "',\n\
},\n\
size:5\n\
},\n"
)
else:
temp = (
temp
+ "marker:{\n\
size:5\n\
},\n"
)
temp += "}"
self.traces.append(temp)
def printPlot(
self,
name="",
width=600,
height=363,
xlim=[],
ylim=[],
filename="",
scriptName="returnscript",
):
d = ""
i = 0
while i < self.traceNo:
if i != 0:
d += ","
d += self.traces[i]
i += 1
d = "data=[" + d + "];\n"
xLimData = ""
if not xlim == []:
xLimData = "range: [%.2E,%.2E],\n" % (xlim[0], xlim[1])
yLimData = ""
if not ylim == []:
yLimData = "range: [%.2E,%.2E],\n" % (ylim[0], ylim[1])
# now layout
l = (
"layout = {\n\
hovermode: 'closest',\n\
xaxis:{\n\
zeroline:false,\n\
"
+ self.layoutx
+ "\
"
+ xLimData
+ "\
title: '"
+ self.xlabel
+ "',\n\
ticks: 'inside',\n\
showline: true\n\
},\n\
yaxis:{\n\
zeroline:false,\n\
"
+ self.layouty
+ "\
"
+ yLimData
+ "\
title: '"
+ self.ylabel
+ "',\n\
ticks: 'inside' ,\n\
showline: true \n\
}\n\
};\n"
)
if filename == "":
if name == "":
name = "plotdiv"
if self.title != "":
print("<p>" + self.title + "</p>")
print(
"<div id='"
+ name
+ "' style='width:%dpx;height:%dpx;'></div>\n" % (width, height)
)
print("<script id='" + scriptName + "' type='text/javascript'>\n")
print("plotarea = document.getElementById('" + name + "');\n")
print(d)
print(l)
print("Plotly.plot(plotarea, data, layout);\n")
print("</script>\n")
else:
f = open(filename, "w")
if name == "":
name = "plotdiv"
if self.title != "":
f.write("<p>" + self.title + "</p>")
f.write(
"<div id='"
+ name
+ "' style='width:%dpx;height:%dpx;'></div>\n" % (width, height)
)
f.write("<script id='" + scriptName + "' type='text/javascript'>\n")
f.write("plotarea = document.getElementById('" + name + "')\n")
f.write(d)
f.write(l)
f.write("Plotly.plot(plotarea, data, layout);\n")
f.write("</script>\n")
f.close() | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/web_functionality.py | web_functionality.py |
from __future__ import division, print_function, absolute_import
from arc._database import sqlite3
from arc.wigner import Wigner6j, CG, WignerDmatrix
from arc.alkali_atom_functions import (
_atomLightAtomCoupling,
singleAtomState,
compositeState,
)
from scipy.constants import physical_constants, pi
import gzip
import sys
import os
import datetime
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
from arc.calculations_atom_single import StarkMap
from arc.alkali_atom_functions import (
printStateStringLatex,
printStateString,
printStateLetter,
)
from arc.divalent_atom_functions import DivalentAtom
from scipy.special import factorial
from scipy.sparse.linalg import eigsh
from scipy.sparse import csr_matrix
from scipy.optimize import curve_fit
from scipy.constants import e as C_e
from scipy.constants import h as C_h
from scipy.constants import c as C_c
import numpy as np
from math import exp, sqrt
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["xtick.minor.visible"] = True
mpl.rcParams["ytick.minor.visible"] = True
mpl.rcParams["xtick.major.size"] = 8
mpl.rcParams["ytick.major.size"] = 8
mpl.rcParams["xtick.minor.size"] = 4
mpl.rcParams["ytick.minor.size"] = 4
mpl.rcParams["xtick.direction"] = "in"
mpl.rcParams["ytick.direction"] = "in"
mpl.rcParams["xtick.top"] = True
mpl.rcParams["ytick.right"] = True
mpl.rcParams["font.family"] = "serif"
# for matrices
if sys.version_info > (2,):
xrange = range
DPATH = os.path.join(os.path.expanduser("~"), ".arc-data")
__all__ = ["PairStateInteractions", "StarkMapResonances"]
class PairStateInteractions:
"""
Calculates Rydberg level diagram (spaghetti) for the given pair state
Initializes Rydberg level spaghetti calculation for the given atom
species (or for two atoms of different species) in the vicinity
of the given pair state. For details of calculation see
Ref. [1]_. For a quick start point example see
`interactions example snippet`_.
For inter-species calculations see
`inter-species interaction calculation snippet`_.
.. _`interactions example snippet`:
./Rydberg_atoms_a_primer.html#Short-range-interactions
.. _`inter-species interaction calculation snippet`:
./ARC_3_0_introduction.html#Inter-species-pair-state-calculations
Parameters:
atom (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
= {
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Select the alkali metal for energy level
diagram calculation
n (int):
principal quantum number for the *first* atom
l (int):
orbital angular momentum for the *first* atom
j (float):
total angular momentum for the *first* atom
nn (int):
principal quantum number for the *second* atom
ll (int):
orbital angular momentum for the *second* atom
jj (float):
total angular momentum for the *second* atom
m1 (float):
projection of the total angular momentum on z-axis
for the *first* atom
m2 (float):
projection of the total angular momentum on z-axis
for the *second* atom
interactionsUpTo (int):
Optional. If set to 1, includes only
dipole-dipole interactions. If set to 2 includes interactions
up to quadrupole-quadrupole. Default value is 1.
s (float):
optional, spin state of the first atom. Default value
of 0.5 is correct for :obj:`arc.alkali_atom_functions.AlkaliAtom`
but for :obj:`arc.divalent_atom_functions.DivalentAtom`
it has to be explicitly set to 0 or 1 for
singlet and triplet states respectively.
**If `s2` is not specified, it is assumed that the second
atom is in the same spin state.**
s2 (float):
optinal, spin state of the second atom. If not
specified (left to default value None) it will assume spin
state of the first atom.
atom2 (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
optional,
specifies atomic species for the second atom, enabeling
calculation of **inter-species pair-state interactions**.
If not specified (left to default value None) it will assume
spin state of the first atom.
References:
.. [1] T. G Walker, M. Saffman, PRA **77**, 032723 (2008)
https://doi.org/10.1103/PhysRevA.77.032723
Examples:
**Advanced interfacing of pair-state is2=None, atom2=Nonenteractions calculations
(PairStateInteractions class).** This
is an advanced example intended for building up extensions to the
existing code. If you want to directly access the pair-state
interaction matrix, constructed by :obj:`defineBasis`,
you can assemble it easily from diagonal part
(stored in :obj:`matDiagonal` ) and off-diagonal matrices whose
spatial dependence is :math:`R^{-3},R^{-4},R^{-5}` stored in that
order in :obj:`matR`. Basis states are stored in :obj:`basisStates`
array.
>>> from arc import *
>>> calc = PairStateInteractions(Rubidium(), 60,0,0.5, \
60,0,0.5, 0.5,0.5,interactionsUpTo = 1)
>>> # theta=0, phi = 0, range of pqn, range of l, deltaE = 25e9
>>> calc.defineBasis(0 ,0 , 5, 5, 25e9, progressOutput=True)
>>> # now calc stores interaction matrix and relevant basis
>>> # we can access this directly and generate interaction matrix
>>> # at distance rval :
>>> rval = 4 # in mum
>>> matrix = calc.matDiagonal
>>> rX = (rval*1.e-6)**3
>>> for matRX in self.matR:
>>> matrix = matrix + matRX/rX
>>> rX *= (rval*1.e-6)
>>> # matrix variable now holds full interaction matrix for
>>> # interacting atoms at distance rval calculated in
>>> # pair-state basis states can be accessed as
>>> basisStates = calc.basisStates
"""
dataFolder = DPATH
# =============================== Methods ===============================
def __init__(
self,
atom,
n,
l,
j,
nn,
ll,
jj,
m1,
m2,
interactionsUpTo=1,
s=0.5,
s2=None,
atom2=None,
):
# alkali atom type, principal quantum number, orbital angular momentum,
# total angular momentum projections of the angular momentum on z axis
self.atom1 = atom #: the first atom type (isotope)
if atom2 is None:
self.atom2 = atom #: the second atom type (isotope)
else:
self.atom2 = atom2 #: thge second atom type (isotope)
self.n = n # : pair-state definition: principal quantum number of the first atom
self.l = l # : pair-state definition: orbital angular momentum of the first atom
self.j = j # : pair-state definition: total angular momentum of the first atom
self.nn = nn # : pair-state definition: principal quantum number of the second atom
self.ll = ll # : pair-state definition: orbital angular momentum of the second atom
self.jj = jj # : pair-state definition: total angular momentum of the second atom
self.m1 = m1 # : pair-state definition: projection of the total ang. momentum for the *first* atom
self.m2 = m2 # : pair-state definition: projection of the total angular momentum for the *second* atom
self.interactionsUpTo = interactionsUpTo
""" Specifies up to which approximation we include in pair-state interactions.
By default value is 1, corresponding to pair-state interactions up to
dipole-dipole coupling. Value of 2 is also supported, corresponding
to pair-state interactions up to quadrupole-quadrupole coupling.
"""
if issubclass(type(atom), DivalentAtom) and not (s == 0 or s == 1):
raise ValueError(
"total angular spin s has to be defined explicitly "
"for calculations, and value has to be 0 or 1 "
"for singlet and tripplet states respectively."
)
self.s1 = s #: total spin angular momentum, optional (default 0.5)
if s2 is None:
self.s2 = s
else:
self.s2 = s2
# check that values of spin states are valid for entered atomic species
if issubclass(type(self.atom1), DivalentAtom):
if abs(self.s1) > 0.1 and abs(self.s1 - 1) > 0.1:
raise ValueError(
"atom1 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)"
)
elif abs(self.s1 - 0.5) > 0.1:
raise ValueError(
"atom1 is AlkaliAtom and its spin has to be " "s=0.5"
)
if issubclass(type(self.atom2), DivalentAtom):
if abs(self.s2) > 0.1 and abs(self.s2 - 1) > 0.1:
raise ValueError(
"atom2 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)"
)
elif abs(self.s2 - 0.5) > 0.1:
# we have divalent atom
raise ValueError(
"atom2 is AlkaliAtom and its spin has to be " "s=0.5"
)
if abs((self.s1 - self.m1) % 1) > 0.1:
raise ValueError(
"atom1 with spin s = %.1d cannot have m1 = %.1d"
% (self.s1, self.m1)
)
if abs((self.s2 - self.m2) % 1) > 0.1:
raise ValueError(
"atom2 with spin s = %.1d cannot have m2 = %.1d"
% (self.s2, self.m2)
)
# ====================== J basis (not resolving mj) ===================
self.coupling = []
"""
List of matrices defineing coupling strengths between the states in
J basis (not resolving :math:`m_j` ). Basis is given by
:obj:`PairStateInteractions.channel`. Used as intermediary for full
interaction matrix calculation by
:obj:`PairStateInteractions.defineBasis`.
"""
self.channel = []
"""
states relevant for calculation, defined in J basis (not resolving
:math:`m_j`. Used as intermediary for full interaction matrix
calculation by :obj:`PairStateInteractions.defineBasis`.
"""
# ======================= Full basis (resolving mj) ===================
self.basisStates = []
"""
List of pair-states for calculation. In the form
[[n1,l1,j1,mj1,n2,l2,j2,mj2], ...].
Each state is an array [n1,l1,j1,mj1,n2,l2,j2,mj2] corresponding to
:math:`|n_1,l_1,j_1,m_{j1},n_2,l_2,j_2,m_{j2}\\rangle` state.
Calculated by :obj:`PairStateInteractions.defineBasis`.
"""
self.matrixElement = []
"""
`matrixElement[i]` gives index of state in
:obj:`PairStateInteractions.channel` basis
(that doesn't resolve :math:`m_j` states), for the given index `i`
of the state in :obj:`PairStateInteractions.basisStates`
( :math:`m_j` resolving) basis.
"""
# variuos parts of interaction matrix in pair-state basis
self.matDiagonal = []
"""
Part of interaction matrix in pair-state basis that doesn't depend
on inter-atomic distance. E.g. diagonal elements of the interaction
matrix, that describe energies of the pair states in unperturbed
basis, will be stored here. Basis states are stored in
:obj:`PairStateInteractions.basisStates`. Calculated by
:obj:`PairStateInteractions.defineBasis`.
"""
self.matR = []
"""
Stores interaction matrices in pair-state basis
that scale as :math:`1/R^3`, :math:`1/R^4` and :math:`1/R^5`
with distance in :obj:`matR[0]`, :obj:`matR[1]` and :obj:`matR[2]`
respectively. These matrices correspond to dipole-dipole
( :math:`C_3`), dipole-quadrupole ( :math:`C_4`) and
quadrupole-quadrupole ( :math:`C_5`) interactions
coefficients. Basis states are stored in
:obj:`PairStateInteractions.basisStates`.
Calculated by :obj:`PairStateInteractions.defineBasis`.
"""
self.originalPairStateIndex = 0
"""
index of the original n,l,j,m1,nn,ll,jj,m2 pair-state in the
:obj:`PairStateInteractions.basisStates` basis.
"""
self.matE = []
self.matB_1 = []
self.matB_2 = []
# ===================== Eigen states and plotting =====================
# finding perturbed energy levels
self.r = [] # detuning scale
self.y = [] # energy levels
self.highlight = []
# pointers towards figure
self.fig = 0
self.ax = 0
# for normalization of the maximum coupling later
self.maxCoupling = 0.0
# n,l,j,mj, drive polarization q
self.drivingFromState = [0, 0, 0, 0, 0]
# sam = saved angular matrix metadata
self.angularMatrixFile = "angularMatrix.npy"
self.angularMatrixFile_meta = "angularMatrix_meta.npy"
# self.sam = []
self.savedAngularMatrix_matrix = []
# intialize precalculated values for factorial term
# in __getAngularMatrix_M
def fcoef(l1, l2, m):
return (
factorial(l1 + l2)
/ (
factorial(l1 + m)
* factorial(l1 - m)
* factorial(l2 + m)
* factorial(l2 - m)
)
** 0.5
)
x = self.interactionsUpTo
self.fcp = np.zeros((x + 1, x + 1, 2 * x + 1))
for c1 in range(1, x + 1):
for c2 in range(1, x + 1):
for p in range(-min(c1, c2), min(c1, c2) + 1):
self.fcp[c1, c2, p + x] = fcoef(c1, c2, p)
self.conn = False
def __getAngularMatrix_M(self, l, j, ll, jj, l1, j1, l2, j2):
# did we already calculated this matrix?
c = self.conn.cursor()
c.execute(
"""SELECT ind FROM pair_angularMatrix WHERE
l1 = ? AND j1_x2 = ? AND
l2 = ? AND j2_x2 = ? AND
l3 = ? AND j3_x2 = ? AND
l4 = ? AND j4_x2 = ?
""",
(l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2),
)
index = c.fetchone()
if index:
return self.savedAngularMatrix_matrix[index[0]]
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif dl == 0 or dl == 2 or dl == 1:
c1 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif dl == 0 or dl == 2 or dl == 1:
c2 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
am = np.zeros(
(
round((2 * j1 + 1) * (2 * j2 + 1)),
round((2 * j + 1) * (2 * jj + 1)),
),
dtype=np.float64,
)
if (c1 > self.interactionsUpTo) or (c2 > self.interactionsUpTo):
return am
j1range = np.linspace(-j1, j1, round(2 * j1) + 1)
j2range = np.linspace(-j2, j2, round(2 * j2) + 1)
jrange = np.linspace(-j, j, round(2 * j) + 1)
jjrange = np.linspace(-jj, jj, round(2 * jj) + 1)
for m1 in j1range:
for m2 in j2range:
# we have chosen the first index
index1 = round(
m1 * (2.0 * j2 + 1.0) + m2 + (j1 * (2.0 * j2 + 1.0) + j2)
)
for m in jrange:
for mm in jjrange:
# we have chosen the second index
index2 = round(
m * (2.0 * jj + 1.0)
+ mm
+ (j * (2.0 * jj + 1.0) + jj)
)
# angular matrix element from Sa??mannshausen, Heiner,
# Merkt, Fr??d??ric, Deiglmayr, Johannes
# PRA 92: 032505 (2015)
elem = (
(-1.0) ** (j + jj + self.s1 + self.s2 + l1 + l2)
* CG(l, 0, c1, 0, l1, 0)
* CG(ll, 0, c2, 0, l2, 0)
)
elem = (
elem
* sqrt((2.0 * l + 1.0) * (2.0 * ll + 1.0))
* sqrt((2.0 * j + 1.0) * (2.0 * jj + 1.0))
)
elem = (
elem
* Wigner6j(l, self.s1, j, j1, c1, l1)
* Wigner6j(ll, self.s2, jj, j2, c2, l2)
)
sumPol = 0.0 # sum over polarisations
limit = min(c1, c2)
for p in xrange(-limit, limit + 1):
sumPol = sumPol + self.fcp[
c1, c2, p + self.interactionsUpTo
] * CG(j, m, c1, p, j1, m1) * CG(
jj, mm, c2, -p, j2, m2
)
am[index1, index2] = elem * sumPol
index = len(self.savedAngularMatrix_matrix)
c.execute(
""" INSERT INTO pair_angularMatrix
VALUES (?,?, ?,?, ?,?, ?,?, ?)""",
(l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2, index),
)
self.conn.commit()
self.savedAngularMatrix_matrix.append(am)
self.savedAngularMatrixChanged = True
return am
def __updateAngularMatrixElementsFile(self):
if not (self.savedAngularMatrixChanged):
return
try:
c = self.conn.cursor()
c.execute("""SELECT * FROM pair_angularMatrix """)
data = []
for v in c.fetchall():
data.append(v)
data = np.array(data, dtype=np.float32)
data[:, 1] /= 2.0 # 2 r j1 -> j1
data[:, 3] /= 2.0 # 2 r j2 -> j2
data[:, 5] /= 2.0 # 2 r j3 -> j3
data[:, 7] /= 2.0 # 2 r j4 -> j4
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta), "wb"
)
np.save(fileHandle, data)
fileHandle.close()
except IOError:
print(
"Error while updating angularMatrix \
data meta (description) File "
+ self.angularMatrixFile_meta
)
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile), "wb"
)
np.save(fileHandle, self.savedAngularMatrix_matrix)
fileHandle.close()
except IOError as e:
print(
"Error while updating angularMatrix \
data File "
+ self.angularMatrixFile
)
print(e)
def __loadAngularMatrixElementsFile(self):
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta), "rb"
)
data = np.load(fileHandle, encoding="latin1", allow_pickle=True)
fileHandle.close()
except Exception as ex:
print(ex)
print("Note: No saved angular matrix metadata files to be loaded.")
print(sys.exc_info())
return
data[:, 1] *= 2 # j1 -> 2 r j1
data[:, 3] *= 2 # j2 -> 2 r j2
data[:, 5] *= 2 # j3 -> 2 r j3
data[:, 7] *= 2 # j4 -> 2 r j4
data = np.array(np.rint(data), dtype=np.int)
try:
c = self.conn.cursor()
c.executemany(
"""INSERT INTO pair_angularMatrix
(l1, j1_x2 ,
l2 , j2_x2 ,
l3, j3_x2,
l4 , j4_x2 ,
ind)
VALUES (?,?,?,?,?,?,?,?,?)""",
data,
)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database!")
print(e)
exit()
if len(data) == 0:
print("error")
return
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile), "rb"
)
self.savedAngularMatrix_matrix = np.load(
fileHandle, encoding="latin1", allow_pickle=True
).tolist()
fileHandle.close()
except Exception as ex:
print(ex)
print("Note: No saved angular matrix files to be loaded.")
print(sys.exc_info())
def __isCoupled(self, n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2, limit):
if (
(
abs(
self.__getEnergyDefect(
n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2
)
)
/ C_h
< limit
)
and not (
n == n1
and nn == n2
and l == l1
and ll == l2
and j == j1
and jj == j2
)
and not (
(
abs(l1 - l) != 1
and (
(
abs(j - 0.5) < 0.1 and abs(j1 - 0.5) < 0.1
) # j = 1/2 and j'=1/2 forbidden
or (
abs(j) < 0.1 and abs(j1 - 1) < 0.1
) # j = 0 and j'=1 forbidden
or (
abs(j - 1) < 0.1 and abs(j1) < 0.1
) # j = 1 and j'=0 forbidden
)
)
or (
abs(l2 - ll) != 1
and (
(
abs(jj - 0.5) < 0.1 and abs(j2 - 0.5) < 0.1
) # j = 1/2 and j'=1/2 forbidden
or (
abs(jj) < 0.1 and abs(j2 - 1) < 0.1
) # j = 0 and j'=1 forbidden
or (
abs(jj - 1) < 0.1 and abs(j2) < 0.1
) # j = 1 and j'=0 forbidden
)
)
)
and not (abs(j) < 0.1 and abs(j1) < 0.1) # j = 0 and j'=0 forbiden
and not (abs(jj) < 0.1 and abs(j2) < 0.1)
and not (
abs(l) < 0.1 and abs(l1) < 0.1
) # l = 0 and l' = 0 is forbiden
and not (abs(ll) < 0.1 and abs(l2) < 0.1)
):
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif (
(dl == 0 or dl == 2 or dl == 1)
and (dj < 2.1)
and (2 <= self.interactionsUpTo)
):
c1 = 2 # quadrupole coupling
else:
return False
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif (
(dl == 0 or dl == 2 or dl == 1)
and (dj < 2.1)
and (2 <= self.interactionsUpTo)
):
c2 = 2 # quadrupole coupling
else:
return False
return c1 + c2
else:
return False
def __getEnergyDefect(self, n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2):
"""
Energy defect between |n,l,j>x|nn,ll,jj> state and |n1,l1,j1>x|n1,l1,j1>
state of atom1 and atom2 in respective spins states s1 and s2
Takes spin vales s1 and s2 as the one defined when defining calculation.
Parameters:
n (int): principal quantum number
l (int): orbital angular momenutum
j (float): total angular momentum
nn (int): principal quantum number
ll (int): orbital angular momenutum
jj (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
Returns:
float: energy defect (SI units: J)
"""
return C_e * (
self.atom1.getEnergy(n1, l1, j1, s=self.s1)
+ self.atom2.getEnergy(n2, l2, j2, s=self.s2)
- self.atom1.getEnergy(n, l, j, s=self.s1)
- self.atom2.getEnergy(nn, ll, jj, s=self.s2)
)
def __makeRawMatrix2(
self,
n,
l,
j,
nn,
ll,
jj,
k,
lrange,
limit,
limitBasisToMj,
progressOutput=False,
debugOutput=False,
):
# limit = limit in Hz on energy defect
# k defines range of n' = [n-k, n+k]
dimension = 0
# which states/channels contribute significantly in the second order perturbation?
states = []
# original pairstate index
opi = 0
# this numbers are conserved if we use only dipole-dipole interactions
Lmod2 = (l + ll) % 2
l1start = max(l - self.interactionsUpTo, 0)
l2start = max(ll - self.interactionsUpTo, 0)
if debugOutput:
print("\n ======= Relevant states =======\n")
for n1 in xrange(max(n - k, 1), n + k + 1):
for n2 in xrange(max(nn - k, 1), nn + k + 1):
l1max = max(l + self.interactionsUpTo, lrange) + 1
l1max = min(l1max, n1 - 1)
for l1 in xrange(l1start, l1max):
l2max = max(ll + self.interactionsUpTo, lrange) + 1
l2max = min(l2max, n2 - 1)
for l2 in xrange(l2start, l2max):
j1 = l1 - self.s1
while j1 < -0.1:
j1 += 2 * self.s1
while j1 <= l1 + self.s1 + 0.1:
j2 = l2 - self.s2
while j2 < -0.1:
j2 += 2 * self.s2
while j2 <= l2 + self.s2 + 0.1:
ed = (
self.__getEnergyDefect(
n,
l,
j,
nn,
ll,
jj,
n1,
l1,
j1,
n2,
l2,
j2,
)
/ C_h
)
if (
abs(ed) < limit
and (
not (self.interactionsUpTo == 1)
or (Lmod2 == ((l1 + l2) % 2))
)
and (
(not limitBasisToMj)
or (j1 + j2 + 0.1 > self.m1 + self.m2)
)
and (
n1 >= self.atom1.groundStateN
or [n1, l1, j1]
in self.atom1.extraLevels
)
and (
n2 >= self.atom2.groundStateN
or [n2, l2, j2]
in self.atom2.extraLevels
)
):
if debugOutput:
pairState = (
"|"
+ printStateString(
n1, l1, j1, s=self.s1
)
+ ","
+ printStateString(
n2, l2, j2, s=self.s2
)
+ ">"
)
print(
pairState
+ (
"\t EnergyDefect = %.3f GHz"
% (ed * 1.0e-9)
)
)
states.append([n1, l1, j1, n2, l2, j2])
if (
n == n1
and nn == n2
and l == l1
and ll == l2
and j == j1
and jj == j2
):
opi = dimension
dimension = dimension + 1
j2 = j2 + 1.0
j1 = j1 + 1.0
if debugOutput:
print("\tMatrix dimension\t=\t", dimension)
# mat_value, mat_row, mat_column for each sparce matrix describing
# dipole-dipole, dipole-quadrupole (and quad-dipole) and quadrupole-quadrupole
couplingMatConstructor = [
[[], [], []] for i in xrange(2 * self.interactionsUpTo - 1)
]
# original pair-state (i.e. target pair state) Zeeman Shift
opZeemanShift = (
(
self.atom1.getZeemanEnergyShift(
self.l, self.j, self.m1, self.Bz, s=self.s1
)
+ self.atom2.getZeemanEnergyShift(
self.ll, self.jj, self.m2, self.Bz, s=self.s2
)
)
/ C_h
* 1.0e-9
) # in GHz
if debugOutput:
print("\n ======= Coupling strengths (radial part only) =======\n")
maxCoupling = "quadrupole-quadrupole"
if self.interactionsUpTo == 1:
maxCoupling = "dipole-dipole"
if debugOutput:
print(
"Calculating coupling (up to ",
maxCoupling,
") between the pair states",
)
for i in xrange(dimension):
ed = (
self.__getEnergyDefect(
states[opi][0],
states[opi][1],
states[opi][2],
states[opi][3],
states[opi][4],
states[opi][5],
states[i][0],
states[i][1],
states[i][2],
states[i][3],
states[i][4],
states[i][5],
)
/ C_h
* 1.0e-9
- opZeemanShift
)
pairState1 = (
"|"
+ printStateString(
states[i][0], states[i][1], states[i][2], s=self.s1
)
+ ","
+ printStateString(
states[i][3], states[i][4], states[i][5], s=self.s2
)
+ ">"
)
states[i].append(ed) # energy defect of given state
for j in xrange(i + 1, dimension):
coupled = self.__isCoupled(
states[i][0],
states[i][1],
states[i][2],
states[i][3],
states[i][4],
states[i][5],
states[j][0],
states[j][1],
states[j][2],
states[j][3],
states[j][4],
states[j][5],
limit,
)
if states[i][0] == 24 and states[j][0] == 18:
print("\n")
print(states[i])
print(states[j])
print(coupled)
if coupled and (
abs(states[i][0] - states[j][0]) <= k
and abs(states[i][3] - states[j][3]) <= k
):
if debugOutput:
pairState2 = (
"|"
+ printStateString(
states[j][0],
states[j][1],
states[j][2],
s=self.s1,
)
+ ","
+ printStateString(
states[j][3],
states[j][4],
states[j][5],
s=self.s2,
)
+ ">"
)
print(pairState1 + " <---> " + pairState2)
couplingStregth = (
_atomLightAtomCoupling(
states[i][0],
states[i][1],
states[i][2],
states[i][3],
states[i][4],
states[i][5],
states[j][0],
states[j][1],
states[j][2],
states[j][3],
states[j][4],
states[j][5],
self.atom1,
atom2=self.atom2,
s=self.s1,
s2=self.s2,
)
/ C_h
* 1.0e-9
)
couplingMatConstructor[coupled - 2][0].append(
couplingStregth
)
couplingMatConstructor[coupled - 2][1].append(i)
couplingMatConstructor[coupled - 2][2].append(j)
exponent = coupled + 1
if debugOutput:
print(
(
"\tcoupling (C_%d/R^%d) = %.5f"
% (
exponent,
exponent,
couplingStregth * (1e6) ** (exponent),
)
),
"/R^",
exponent,
" GHz (mu m)^",
exponent,
"\n",
)
# coupling = [1,1] dipole-dipole, [2,1] quadrupole dipole, [2,2] quadrupole quadrupole
couplingMatArray = [
csr_matrix(
(
couplingMatConstructor[i][0],
(
couplingMatConstructor[i][1],
couplingMatConstructor[i][2],
),
),
shape=(dimension, dimension),
)
for i in xrange(len(couplingMatConstructor))
]
return states, couplingMatArray
def __initializeDatabaseForMemoization(self):
# memoization of angular parts
self.conn = sqlite3.connect(
os.path.join(self.dataFolder, "precalculated_pair.db")
)
c = self.conn.cursor()
# ANGULAR PARTS
c.execute("""DROP TABLE IF EXISTS pair_angularMatrix""")
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='pair_angularMatrix';"""
)
if c.fetchone()[0] == 0:
# create table
try:
c.execute(
"""CREATE TABLE IF NOT EXISTS pair_angularMatrix
(l1 TINYINT UNSIGNED, j1_x2 TINYINT UNSIGNED,
l2 TINYINT UNSIGNED, j2_x2 TINYINT UNSIGNED,
l3 TINYINT UNSIGNED, j3_x2 TINYINT UNSIGNED,
l4 TINYINT UNSIGNED, j4_x2 TINYINT UNSIGNED,
ind INTEGER,
PRIMARY KEY (l1,j1_x2, l2,j2_x2, l3,j3_x2, l4,j4_x2)
) """
)
except sqlite3.Error as e:
print(e)
self.conn.commit()
self.__loadAngularMatrixElementsFile()
self.savedAngularMatrixChanged = False
def __closeDatabaseForMemoization(self):
self.conn.commit()
self.conn.close()
self.conn = False
def getLeRoyRadius(self):
"""
Returns Le Roy radius for initial pair-state.
Le Roy radius [#leroy]_ is defined as
:math:`2(\\langle r_1^2 \\rangle^{1/2} + \\langle r_2^2 \\rangle^{1/2})`,
where :math:`r_1` and :math:`r_2` are electron coordinates for the
first and the second atom in the initial pair-state.
Below this radius, calculations are not valid since electron
wavefunctions start to overlap.
Returns:
float: LeRoy radius measured in :math:`\\mu m`
References:
.. [#leroy] R.J. Le Roy, Can. J. Phys. **52**, 246 (1974)
http://www.nrcresearchpress.com/doi/abs/10.1139/p74-035
"""
step = 0.001
r1, psi1_r1 = self.atom1.radialWavefunction(
self.l,
0.5,
self.j,
self.atom1.getEnergy(self.n, self.l, self.j, s=self.s1) / 27.211,
self.atom1.alphaC ** (1 / 3.0),
2.0 * self.n * (self.n + 15.0),
step,
)
sqrt_r1_on2 = np.trapz(
np.multiply(np.multiply(psi1_r1, psi1_r1), np.multiply(r1, r1)),
x=r1,
)
r2, psi2_r2 = self.atom2.radialWavefunction(
self.ll,
0.5,
self.jj,
self.atom2.getEnergy(self.nn, self.ll, self.jj, s=self.s2) / 27.211,
self.atom2.alphaC ** (1 / 3.0),
2.0 * self.nn * (self.nn + 15.0),
step,
)
sqrt_r2_on2 = np.trapz(
np.multiply(np.multiply(psi2_r2, psi2_r2), np.multiply(r2, r2)),
x=r2,
)
return (
2.0
* (sqrt(sqrt_r1_on2) + sqrt(sqrt_r2_on2))
* (physical_constants["Bohr radius"][0] * 1.0e6)
)
def getC6perturbatively(
self, theta, phi, nRange, energyDelta, degeneratePerturbation=False
):
r"""
Calculates :math:`C_6` from second order perturbation theory.
Calculates
:math:`C_6=\sum_{\rm r',r''}|\langle {\rm r',r''}|V|\
{\rm r1,r2}\rangle|^2/\Delta_{\rm r',r''}`, where
:math:`\Delta_{\rm r',r''}\equiv E({\rm r',r''})-E({\rm r1, r2})`
When second order perturbation couples to multiple energy degenerate
states, users shold use **degenerate perturbation calculations** by
setting `degeneratePerturbation=True` .
This calculation is faster then full diagonalization, but it is valid
only far from the so called spaghetti region that occurs when atoms
are close to each other. In that region multiple levels are strongly
coupled, and one needs to use full diagonalization. In region where
perturbative calculation is correct, energy level shift can be
obtained as :math:`V(R)=-C_6/R^6`
See `perturbative C6 calculations example snippet`_ and for
degenerate perturbation calculation see
`degenerate pertubation C6 calculation example snippet`_
.. _`perturbative C6 calculations example snippet`:
./Rydberg_atoms_a_primer.html#Dispersion-Coefficients
.. _`degenerate pertubation C6 calculation example snippet`:
./ARC_3_0_introduction.html#Pertubative-C6-calculation-in-the-manifold-of-degenerate-states
Parameters:
theta (float):
orientation of inter-atomic axis with respect
to quantization axis (:math:`z`) in Euler coordinates
(measured in units of radian)
phi (float):
orientation of inter-atomic axis with respect
to quantization axis (:math:`z`) in Euler coordinates
(measured in units of radian)
nRange (int):
how much below and above the given principal quantum number
of the pair state we should be looking
energyDelta (float):
what is maximum energy difference ( :math:`\Delta E/h` in Hz)
between the original pair state and the other pair states that we are including in
calculation
degeneratePerturbation (bool):
optional, default False. Should one
use degenerate perturbation theory. This should be used whenever
angle between quantisation and interatomic axis is non-zero,
as well as when one considers non-stretched states.
Returns:
float: if **degeneratePerturbation=False**, returns
:math:`C_6` measured in :math:`\text{GHz }\mu\text{m}^6`;
if **degeneratePerturbation=True**, returns array of
:math:`C_6` measured in :math:`\text{GHz }\mu\text{m}^6`
AND array of corresponding eigenvectors in
:math:`\{m_{j_1}=-j_1, \ldots, m_{j_1} = +j1\}\bigotimes \
\{ m_{j_2}=-j_2, \ldots, m_{j_2} = +j2\}`
basis
Example:
If we want to quickly calculate :math:`C_6` for two Rubidium
atoms in state :math:`62 D_{3/2} m_j=3/2`, positioned in space
along the shared quantization axis::
from arc import *
calculation = PairStateInteractions(Rubidium(), 62, 2, 1.5, 62, 2, 1.5, 1.5, 1.5)
c6 = calculation.getC6perturbatively(0,0, 5, 25e9)
print "C_6 = %.0f GHz (mu m)^6" % c6
Which returns::
C_6 = 767 GHz (mu m)^6
Quick calculation of angular anisotropy of for Rubidium
:math:`D_{2/5},m_j=5/2` states::
# Rb 60 D_{2/5}, mj=2.5 , 60 D_{2/5}, mj=2.5 pair state
calculation1 = PairStateInteractions(Rubidium(), 60, 2, 2.5, 60, 2, 2.5, 2.5, 2.5)
# list of atom orientations
thetaList = np.linspace(0,pi,30)
# do calculation of C6 pertubatively for all atom orientations
c6 = []
for theta in thetaList:
value = calculation1.getC6perturbatively(theta,0,5,25e9)
c6.append(value)
print ("theta = %.2f * pi \tC6 = %.2f GHz mum^6" % (theta/pi,value))
# plot results
plot(thetaList/pi,c6,"b-")
title("Rb, pairstate 60 $D_{5/2},m_j = 5/2$, 60 $D_{5/2},m_j = 5/2$")
xlabel(r"$\Theta /\pi$")
ylabel(r"$C_6$ (GHz $\mu$m${}^6$")
show()
"""
self.__initializeDatabaseForMemoization()
# ========= START OF THE MAIN CODE ===========
# wigner D matrix allows calculations with arbitrary orientation of
# the two atoms
wgd = WignerDmatrix(theta, phi)
# any conservation?
# this numbers are conserved if we use only dipole-dipole interactions
Lmod2 = (self.l + self.ll) % 2
# find nearby states
lmin1 = self.l - 1
if lmin1 < -0.1:
lmin1 = 1
lmin2 = self.ll - 1
if lmin2 < -0.1:
lmin2 = 1
interactionMatrix = np.zeros(
(
round((2 * self.j + 1) * (2 * self.jj + 1)),
round((2 * self.j + 1) * (2 * self.jj + 1)),
),
dtype=np.complex,
)
for n1 in xrange(max(self.n - nRange, 1), self.n + nRange + 1):
for n2 in xrange(max(self.nn - nRange, 1), self.nn + nRange + 1):
lmax1 = min(self.l + 2, n1)
for l1 in xrange(lmin1, lmax1, 2):
lmax2 = min(self.ll + 2, n2)
for l2 in xrange(lmin2, lmax2, 2):
if (l1 + l2) % 2 == Lmod2:
j1 = l1 - self.s1
while j1 < -0.1:
j1 += 2 * self.s1
while j1 <= l1 + self.s1 + 0.1:
j2 = l2 - self.s2
while j2 < -0.1:
j2 += 2 * self.s2
while j2 <= l2 + self.s2 + 0.1:
coupled = self.__isCoupled(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
n1,
l1,
j1,
n2,
l2,
j2,
energyDelta,
)
if (
coupled
and (
not (self.interactionsUpTo == 1)
or (Lmod2 == ((l1 + l2) % 2))
)
and (
n1 >= self.atom1.groundStateN
or [n1, l1, j1]
in self.atom1.extraLevels
)
and (
n2 >= self.atom2.groundStateN
or [n2, l2, j2]
in self.atom2.extraLevels
)
):
energyDefect = (
self.__getEnergyDefect(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
n1,
l1,
j1,
n2,
l2,
j2,
)
/ C_h
)
energyDefect = (
energyDefect * 1.0e-9
) # GHz
if abs(energyDefect) < 1e-10:
raise ValueError(
"The requested pair-state "
"is dipole coupled resonatly "
"(energy defect = 0)"
"to other pair-states"
"Aborting pertubative "
"calculation."
"(This usually happens for "
"high-L states for which "
"identical quantum defects give "
"raise to degeneracies, making "
"total L ultimately not "
"conserved quantum number) "
)
# calculate radial part
couplingStregth = (
_atomLightAtomCoupling(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
n1,
l1,
j1,
n2,
l2,
j2,
self.atom1,
atom2=self.atom2,
s=self.s1,
s2=self.s2,
)
* (1.0e-9 * (1.0e6) ** 3 / C_h)
) # GHz / mum^3
d = self.__getAngularMatrix_M(
self.l,
self.j,
self.ll,
self.jj,
l1,
j1,
l2,
j2,
)
interactionMatrix += (
d.conj().T.dot(d)
* abs(couplingStregth) ** 2
/ energyDefect
)
j2 = j2 + 1.0
j1 = j1 + 1.0
rotationMatrix = np.kron(
wgd.get(self.j).toarray(), wgd.get(self.jj).toarray()
)
interactionMatrix = rotationMatrix.dot(
interactionMatrix.dot(rotationMatrix.conj().T)
)
# ========= END OF THE MAIN CODE ===========
self.__closeDatabaseForMemoization()
value, vectors = np.linalg.eigh(interactionMatrix)
vectors = vectors.T
stateCom = compositeState(
singleAtomState(self.j, self.m1), singleAtomState(self.jj, self.m2)
).T
if not degeneratePerturbation:
for i, v in enumerate(vectors):
if abs(np.vdot(v, stateCom)) > 1 - 1e-9:
return value[i]
# else:
# print(np.vdot(v, stateCom))
# if initial state is not eigen state print warning and return
# results for eigenstates, and eigenstate composition
"""
print("WARNING: Requested state is not eigenstate when dipole-dipole "
"interactions and/or relative position of atoms are "
"taken into account.\n"
"We will use degenerate pertubative theory to correctly "
"calculate C6.\n"
"Method will return values AND eigenvectors in basis \n"
"{mj1 = -j1, ... , mj1 = +j1} x {mj2 = -j2, ... , m2 = +j2}, "
"where x denotes Kronecker product\n"
"To not see this warning request explicitly "
"degeneratePerturbation=True in call of this method.\n")
"""
# print(stateCom.conj().dot(interactionMatrix.dot(stateCom.T)))
# print(stateCom.conj().dot(interactionMatrix.dot(stateCom.T)).shape)
return np.real(
stateCom.conj().dot(interactionMatrix.dot(stateCom.T))[0][0]
)
return np.real(value), vectors
def defineBasis(
self,
theta,
phi,
nRange,
lrange,
energyDelta,
Bz=0,
progressOutput=False,
debugOutput=False,
):
r"""
Finds relevant states in the vicinity of the given pair-state
Finds relevant pair-state basis and calculates interaction matrix.
Pair-state basis is saved in :obj:`basisStates`.
Interaction matrix is saved in parts depending on the scaling with
distance. Diagonal elements :obj:`matDiagonal`, correponding to
relative energy defects of the pair-states, don't change with
interatomic separation. Off diagonal elements can depend
on distance as :math:`R^{-3}, R^{-4}` or :math:`R^{-5}`,
corresponding to dipole-dipole (:math:`C_3` ), dipole-qudrupole
(:math:`C_4` ) and quadrupole-quadrupole coupling (:math:`C_5` )
respectively. These parts of the matrix are stored in
:obj:`PairStateInteractions.matR`
in that order. I.e. :obj:`matR[0]`
stores dipole-dipole coupling
(:math:`\propto R^{-3}`),
:obj:`matR[1]` stores dipole-quadrupole
couplings etc.
Parameters:
theta (float): relative orientation of the two atoms
(see figure on top of the page), range 0 to :math:`\pi`
phi (float): relative orientation of the two atoms (see figure
on top of the page), range 0 to :math:`2\pi`
nRange (int): how much below and above the given principal
quantum number of the pair state we should be looking?
lrange (int): what is the maximum angular orbital momentum
state that we are including in calculation
energyDelta (float): what is maximum energy difference (
:math:`\Delta E/h` in Hz)
between the original pair state and the other pair states
that we are including in calculation
Bz (float): optional, magnetic field directed along z-axis in
units of Tesla. Calculation will be correct only for weak
magnetic fields, where paramagnetic term is much stronger
then diamagnetic term. Diamagnetic term is neglected.
progressOutput (bool): optional, False by default. If true,
prints information about the progress of the calculation.
debugOutput (bool): optional, False by default. If true,
similarly to progressOutput=True, this will print
information about the progress of calculations, but with
more verbose output.
See also:
:obj:`arc.alkali_atom_functions.saveCalculation` and
:obj:`arc.alkali_atom_functions.loadSavedCalculation` for
information on saving intermediate results of calculation for
later use.
"""
self.__initializeDatabaseForMemoization()
# save call parameters
self.theta = theta
self.phi = phi
self.nRange = nRange
self.lrange = lrange
self.energyDelta = energyDelta
self.Bz = Bz
self.basisStates = []
# wignerDmatrix
wgd = WignerDmatrix(theta, phi)
limitBasisToMj = False
if theta < 0.001:
limitBasisToMj = True # Mj will be conserved in calculations
originalMj = self.m1 + self.m2
self.channel, self.coupling = self.__makeRawMatrix2(
self.n,
self.l,
self.j,
self.nn,
self.ll,
self.jj,
nRange,
lrange,
energyDelta,
limitBasisToMj,
progressOutput=progressOutput,
debugOutput=debugOutput,
)
self.atom1.updateDipoleMatrixElementsFile()
self.atom2.updateDipoleMatrixElementsFile()
# generate all the states (with mj principal quantum number)
# opi = original pairstate index
opi = 0
# NEW FOR SPACE MATRIX
self.index = np.zeros(len(self.channel) + 1, dtype=np.int16)
for i in xrange(len(self.channel)):
self.index[i] = len(self.basisStates)
stateCoupled = self.channel[i]
for m1c in np.linspace(
stateCoupled[2],
-stateCoupled[2],
round(1 + 2 * stateCoupled[2]),
):
for m2c in np.linspace(
stateCoupled[5],
-stateCoupled[5],
round(1 + 2 * stateCoupled[5]),
):
if (not limitBasisToMj) or (
abs(originalMj - m1c - m2c) < 0.1
):
self.basisStates.append(
[
stateCoupled[0],
stateCoupled[1],
stateCoupled[2],
m1c,
stateCoupled[3],
stateCoupled[4],
stateCoupled[5],
m2c,
]
)
self.matrixElement.append(i)
if (
abs(stateCoupled[0] - self.n) < 0.1
and abs(stateCoupled[1] - self.l) < 0.1
and abs(stateCoupled[2] - self.j) < 0.1
and abs(m1c - self.m1) < 0.1
and abs(stateCoupled[3] - self.nn) < 0.1
and abs(stateCoupled[4] - self.ll) < 0.1
and abs(stateCoupled[5] - self.jj) < 0.1
and abs(m2c - self.m2) < 0.1
):
opi = len(self.basisStates) - 1
if self.index[i] == len(self.basisStates):
print(stateCoupled)
self.index[-1] = len(self.basisStates)
if progressOutput or debugOutput:
print("\nCalculating Hamiltonian matrix...\n")
dimension = len(self.basisStates)
if progressOutput or debugOutput:
print("\n\tmatrix (dimension ", dimension, ")\n")
# INITIALIZING MATICES
# all (sparce) matrices will be saved in csr format
# value, row, column
matDiagonalConstructor = [[], [], []]
matRConstructor = [
[[], [], []] for i in xrange(self.interactionsUpTo * 2 - 1)
]
matRIndex = 0
for c in self.coupling:
progress = 0.0
for ii in xrange(len(self.channel)):
if progressOutput:
dim = len(self.channel)
progress += (dim - ii) * 2 - 1
sys.stdout.write(
"\rMatrix R%d %.1f %% (state %d of %d)"
% (
matRIndex + 3,
float(progress) / float(dim**2) * 100.0,
ii + 1,
len(self.channel),
)
)
sys.stdout.flush()
ed = self.channel[ii][6]
# solves problems with exactly degenerate basisStates
degeneracyOffset = 0.00000001
i = self.index[ii]
dMatrix1 = wgd.get(self.basisStates[i][2])
dMatrix2 = wgd.get(self.basisStates[i][6])
for i in xrange(self.index[ii], self.index[ii + 1]):
statePart1 = singleAtomState(
self.basisStates[i][2], self.basisStates[i][3]
)
statePart2 = singleAtomState(
self.basisStates[i][6], self.basisStates[i][7]
)
# rotate individual states
statePart1 = dMatrix1.T.conjugate().dot(statePart1)
statePart2 = dMatrix2.T.conjugate().dot(statePart2)
stateCom = compositeState(statePart1, statePart2)
if matRIndex == 0:
zeemanShift = (
(
self.atom1.getZeemanEnergyShift(
self.basisStates[i][1],
self.basisStates[i][2],
self.basisStates[i][3],
self.Bz,
s=self.s1,
)
+ self.atom2.getZeemanEnergyShift(
self.basisStates[i][5],
self.basisStates[i][6],
self.basisStates[i][7],
self.Bz,
s=self.s2,
)
)
/ C_h
* 1.0e-9
) # in GHz
matDiagonalConstructor[0].append(
ed + zeemanShift + degeneracyOffset
)
degeneracyOffset += 0.00000001
matDiagonalConstructor[1].append(i)
matDiagonalConstructor[2].append(i)
for dataIndex in xrange(c.indptr[ii], c.indptr[ii + 1]):
jj = c.indices[dataIndex]
radialPart = c.data[dataIndex]
j = self.index[jj]
dMatrix3 = wgd.get(self.basisStates[j][2])
dMatrix4 = wgd.get(self.basisStates[j][6])
if self.index[jj] != self.index[jj + 1]:
d = self.__getAngularMatrix_M(
self.basisStates[i][1],
self.basisStates[i][2],
self.basisStates[i][5],
self.basisStates[i][6],
self.basisStates[j][1],
self.basisStates[j][2],
self.basisStates[j][5],
self.basisStates[j][6],
)
secondPart = d.dot(stateCom)
else:
print(" - - - ", self.channel[jj])
for j in xrange(self.index[jj], self.index[jj + 1]):
statePart1 = singleAtomState(
self.basisStates[j][2], self.basisStates[j][3]
)
statePart2 = singleAtomState(
self.basisStates[j][6], self.basisStates[j][7]
)
# rotate individual states
statePart1 = dMatrix3.T.conjugate().dot(statePart1)
statePart2 = dMatrix4.T.conjugate().dot(statePart2)
# composite state of two atoms
stateCom2 = compositeState(statePart1, statePart2)
angularFactor = stateCom2.T.conjugate().dot(
secondPart
)
if abs(self.phi) < 1e-9:
angularFactor = angularFactor[0, 0].real
else:
angularFactor = angularFactor[0, 0]
if abs(angularFactor) > 1.0e-5:
matRConstructor[matRIndex][0].append(
(radialPart * angularFactor).conj()
)
matRConstructor[matRIndex][1].append(i)
matRConstructor[matRIndex][2].append(j)
matRConstructor[matRIndex][0].append(
radialPart * angularFactor
)
matRConstructor[matRIndex][1].append(j)
matRConstructor[matRIndex][2].append(i)
matRIndex += 1
if progressOutput or debugOutput:
print("\n")
self.matDiagonal = csr_matrix(
(
matDiagonalConstructor[0],
(matDiagonalConstructor[1], matDiagonalConstructor[2]),
),
shape=(dimension, dimension),
)
self.matR = [
csr_matrix(
(
matRConstructor[i][0],
(matRConstructor[i][1], matRConstructor[i][2]),
),
shape=(dimension, dimension),
)
for i in xrange(self.interactionsUpTo * 2 - 1)
]
self.originalPairStateIndex = opi
self.__updateAngularMatrixElementsFile()
self.__closeDatabaseForMemoization()
def diagonalise(
self,
rangeR,
noOfEigenvectors,
drivingFromState=[0, 0, 0, 0, 0],
eigenstateDetuning=0.0,
sortEigenvectors=False,
progressOutput=False,
debugOutput=False,
):
r"""
Finds eigenstates in atom pair basis.
ARPACK ( :obj:`scipy.sparse.linalg.eigsh`) calculation of the
`noOfEigenvectors` eigenvectors closest to the original state. If
`drivingFromState` is specified as `[n,l,j,mj,q]` coupling between
the pair-states and the situation where one of the atoms in the
pair state basis is in :math:`|n,l,j,m_j\rangle` state due to
driving with a laser field that drives :math:`q` transition
(+1,0,-1 for :math:`\sigma^-`, :math:`\pi` and :math:`\sigma^+`
transitions respectively) is calculated and marked by the
colourmaping these values on the obtained eigenvectors.
Parameters:
rangeR ( :obj:`array`): Array of values for distance between
the atoms (in :math:`\mu` m) for which we want to calculate
eigenstates.
noOfEigenvectors (int): number of eigen vectors closest to the
energy of the original (unperturbed) pair state. Has to be
smaller then the total number of states.
eigenstateDetuning (float, optional): Default is 0. This
specifies detuning from the initial pair-state (in Hz)
around which we want to find `noOfEigenvectors`
eigenvectors. This is useful when looking only for couple
of off-resonant features.
drivingFromState ([int,int,float,float,int]): Optional. State
of one of the atoms from the original pair-state basis
from which we try to drive to the excited pair-basis
manifold, **assuming that the first of the two atoms is
already excited to the specified Rydberg state**.
By default, program will calculate just
contribution of the original pair-state in the eigenstates
obtained by diagonalization, and will highlight it's
admixure by colour mapping the obtained eigenstates plot.
State is specified as :math:`[n,\ell,j,mj, d]`
where :math:`d` is +1, 0 or
-1 for driving :math:`\sigma^-` , :math:`\pi`
and :math:`\sigma^+` transitions respectively.
sortEigenvectors(bool): optional, False by default. Tries to
sort eigenvectors so that given eigen vector index
corresponds to adiabatically changing eigenstate, as
detirmined by maximising overlap between old and new
eigenvectors.
progressOutput (bool): optional, False by default. If true,
prints information about the progress of the calculation.
debugOutput (bool): optional, False by default. If true,
similarly to progressOutput=True, this will print
information about the progress of calculations, but with
more verbose output.
"""
self.r = np.sort(rangeR)
dimension = len(self.basisStates)
self.noOfEigenvectors = noOfEigenvectors
# energy of the state - to be calculated
self.y = []
# how much original state is contained in this eigenvector
self.highlight = []
# what are the dominant contributing states?
self.composition = []
if noOfEigenvectors >= dimension - 1:
noOfEigenvectors = dimension - 1
print(
"Warning: Requested number of eigenvectors >=dimension-1\n \
ARPACK can only find up to dimension-1 eigenvectors, where\
dimension is matrix dimension.\n"
)
if noOfEigenvectors < 1:
return
coupling = []
self.maxCoupling = 0.0
self.maxCoupledStateIndex = 0
if drivingFromState[0] != 0:
self.drivingFromState = drivingFromState
if progressOutput:
print("Finding coupling strengths")
# get first what was the state we are calculating coupling with
state1 = drivingFromState
n1 = round(state1[0])
l1 = round(state1[1])
j1 = state1[2]
m1 = state1[3]
q = state1[4]
for i in xrange(dimension):
thisCoupling = 0.0
if (
round(abs(self.basisStates[i][5] - l1)) == 1
and abs(
self.basisStates[i][0]
- self.basisStates[self.originalPairStateIndex][0]
)
< 0.1
and abs(
self.basisStates[i][1]
- self.basisStates[self.originalPairStateIndex][1]
)
< 0.1
and abs(
self.basisStates[i][2]
- self.basisStates[self.originalPairStateIndex][2]
)
< 0.1
and abs(
self.basisStates[i][3]
- self.basisStates[self.originalPairStateIndex][3]
)
< 0.1
):
state2 = self.basisStates[i]
n2 = round(state2[0 + 4])
l2 = round(state2[1 + 4])
j2 = state2[2 + 4]
m2 = state2[3 + 4]
if debugOutput:
print(
n1,
" ",
l1,
" ",
j1,
" ",
m1,
" ",
n2,
" ",
l2,
" ",
j2,
" ",
m2,
" q=",
q,
)
print(self.basisStates[i])
dme = self.atom2.getDipoleMatrixElement(
n1, l1, j1, m1, n2, l2, j2, m2, q, s=self.s2
)
thisCoupling += dme
thisCoupling = abs(thisCoupling) ** 2
if thisCoupling > self.maxCoupling:
self.maxCoupling = thisCoupling
self.maxCoupledStateIndex = i
if (thisCoupling > 0.000001) and debugOutput:
print(
"original pairstate index = ",
self.originalPairStateIndex,
)
print("this pairstate index = ", i)
print("state itself ", self.basisStates[i])
print("coupling = ", thisCoupling)
coupling.append(thisCoupling)
print("Maximal coupling from a state")
print("is to a state ", self.basisStates[self.maxCoupledStateIndex])
print("is equal to %.3e a_0 e" % self.maxCoupling)
if progressOutput:
print("\n\nDiagonalizing interaction matrix...\n")
rvalIndex = 0.0
previousEigenvectors = []
for rval in self.r:
if progressOutput:
sys.stdout.write(
"\r%d%%" % (rvalIndex / len(self.r - 1) * 100.0)
)
sys.stdout.flush()
rvalIndex += 1.0
# calculate interaction matrix
m = self.matDiagonal
rX = (rval * 1.0e-6) ** 3
for matRX in self.matR:
m = m + matRX / rX
rX *= rval * 1.0e-6
# uses ARPACK algorithm to find only noOfEigenvectors eigenvectors
# sigma specifies center frequency (in GHz)
ev, egvector = eigsh(
m,
noOfEigenvectors,
sigma=eigenstateDetuning * 1.0e-9,
which="LM",
tol=1e-6,
)
if sortEigenvectors:
# Find which eigenvectors overlap most with eigenvectors from
# previous diagonalisatoin, in order to find "adiabatic"
# continuation for the respective states
if previousEigenvectors == []:
previousEigenvectors = np.copy(egvector)
rowPicked = [False for i in range(len(ev))]
columnPicked = [False for i in range(len(ev))]
stateOverlap = np.zeros((len(ev), len(ev)))
for i in range(len(ev)):
for j in range(len(ev)):
stateOverlap[i, j] = (
np.vdot(egvector[:, i], previousEigenvectors[:, j])
** 2
)
sortedOverlap = np.dstack(
np.unravel_index(
np.argsort(stateOverlap.ravel()), (len(ev), len(ev))
)
)[0]
sortedEigenvaluesOrder = np.zeros(len(ev), dtype=np.int32)
j = len(ev) ** 2 - 1
for i in range(len(ev)):
while (
rowPicked[sortedOverlap[j, 0]]
or columnPicked[sortedOverlap[j, 1]]
):
j -= 1
rowPicked[sortedOverlap[j, 0]] = True
columnPicked[sortedOverlap[j, 1]] = True
sortedEigenvaluesOrder[sortedOverlap[j, 1]] = sortedOverlap[
j, 0
]
egvector = egvector[:, sortedEigenvaluesOrder]
ev = ev[sortedEigenvaluesOrder]
previousEigenvectors = np.copy(egvector)
self.y.append(ev)
if drivingFromState[0] < 0.1:
# if we've defined from which state we are driving
sh = []
comp = []
for i in xrange(len(ev)):
sh.append(
abs(egvector[self.originalPairStateIndex, i]) ** 2
)
comp.append(self._stateComposition(egvector[:, i]))
self.highlight.append(sh)
self.composition.append(comp)
else:
sh = []
comp = []
for i in xrange(len(ev)):
sumCoupledStates = 0.0
for j in xrange(dimension):
sumCoupledStates += (
abs(coupling[j] / self.maxCoupling)
* abs(egvector[j, i]) ** 2
)
comp.append(self._stateComposition(egvector[:, i]))
sh.append(sumCoupledStates)
self.highlight.append(sh)
self.composition.append(comp)
# end of FOR loop over inter-atomic dinstaces
def exportData(self, fileBase, exportFormat="csv"):
"""
Exports PairStateInteractions calculation data.
Only supported format (selected by default) is .csv in a
human-readable form with a header that saves details of calculation.
Function saves three files: 1) `filebase` _r.csv;
2) `filebase` _energyLevels
3) `filebase` _highlight
For more details on the format, see header of the saved files.
Parameters:
filebase (string): filebase for the names of the saved files
without format extension. Add as a prefix a directory path
if necessary (e.g. saving outside the current working directory)
exportFormat (string): optional. Format of the exported file. Currently
only .csv is supported but this can be extended in the future.
"""
fmt = "on %Y-%m-%d @ %H:%M:%S"
ts = datetime.datetime.now().strftime(fmt)
commonHeader = "Export from Alkali Rydberg Calculator (ARC) %s.\n" % ts
commonHeader += (
"\n *** Pair State interactions for %s %s m_j = %d/2 , %s %s m_j = %d/2 pair-state. ***\n\n"
% (
self.atom1.elementName,
printStateString(self.n, self.l, self.j),
round(2.0 * self.m1),
self.atom2.elementName,
printStateString(self.nn, self.ll, self.jj),
round(2.0 * self.m2),
)
)
if self.interactionsUpTo == 1:
commonHeader += " - Pair-state interactions included up to dipole-dipole coupling.\n"
elif self.interactionsUpTo == 2:
commonHeader += " - Pair-state interactions included up to quadrupole-quadrupole coupling.\n"
commonHeader += (
" - Pair-state interactions calculated for manifold with spin angular momentum s1 = %.1d s2 = %.1d .\n"
% (self.s1, self.s2)
)
if hasattr(self, "theta"):
commonHeader += " - Atom orientation:\n"
commonHeader += " theta (polar angle) = %.5f x pi\n" % (
self.theta / pi
)
commonHeader += " phi (azimuthal angle) = %.5f x pi\n" % (
self.phi / pi
)
commonHeader += " - Calculation basis includes:\n"
commonHeader += (
" States with principal quantum number in range [%d-%d]x[%d-%d],\n"
% (
self.n - self.nRange,
self.n + self.nRange,
self.nn - self.nRange,
self.nn + self.nRange,
)
)
commonHeader += (
" AND whose orbital angular momentum (l) is in range [%d-%d] (i.e. %s-%s),\n"
% (
0,
self.lrange,
printStateLetter(0),
printStateLetter(self.lrange),
)
)
commonHeader += (
" AND whose pair-state energy difference is at most %.3f GHz\n"
% (self.energyDelta / 1.0e9)
)
commonHeader += " (energy difference is measured relative to original pair-state).\n"
else:
commonHeader += " ! Atom orientation and basis not yet set (this is set in defineBasis method).\n"
if hasattr(self, "noOfEigenvectors"):
commonHeader += (
" - Finding %d eigenvectors closest to the given pair-state\n"
% self.noOfEigenvectors
)
if self.drivingFromState[0] < 0.1:
commonHeader += (
" - State highlighting based on the relative contribution \n"
+ " of the original pair-state in the eigenstates obtained by diagonalization.\n"
)
else:
commonHeader += (
" - State highlighting based on the relative driving strength \n"
+ " to a given energy eigenstate (energy level) from state\n"
+ " %s m_j =%d/2 with polarization q=%d.\n"
% (
printStateString(*self.drivingFromState[0:3]),
round(2.0 * self.drivingFromState[3]),
self.drivingFromState[4],
)
)
else:
commonHeader += " ! Energy levels not yet found (this is done by calling diagonalise method).\n"
if exportFormat == "csv":
print("Exporting StarkMap calculation results as .csv ...")
commonHeader += " - Export consists of three (3) files:\n"
commonHeader += " 1) %s,\n" % (
fileBase + "_r." + exportFormat
)
commonHeader += " 2) %s,\n" % (
fileBase + "_energyLevels." + exportFormat
)
commonHeader += " 3) %s.\n\n" % (
fileBase + "_highlight." + exportFormat
)
filename = fileBase + "_r." + exportFormat
np.savetxt(
filename,
self.r,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader
+ " - - - Interatomic distance, r (\\mu m) - - -"
),
comments="# ",
)
print(" Interatomic distances (\\mu m) saved in %s" % filename)
filename = fileBase + "_energyLevels." + exportFormat
headerDetails = " NOTE : Each row corresponds to eigenstates for a single specified interatomic distance"
np.savetxt(
filename,
self.y,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader + " - - - Energy (GHz) - - -\n" + headerDetails
),
comments="# ",
)
print(
" Lists of energies (in GHz relative to the original pair-state energy)"
+ (" saved in %s" % filename)
)
filename = fileBase + "_highlight." + exportFormat
np.savetxt(
filename,
self.highlight,
fmt="%.18e",
delimiter=", ",
newline="\n",
header=(
commonHeader
+ " - - - Highlight value (rel.units) - - -\n"
+ headerDetails
),
comments="# ",
)
print(" Highlight values saved in %s" % filename)
print("... data export finished!")
else:
raise ValueError("Unsupported export format (.%s)." % format)
def _stateComposition(self, stateVector):
contribution = np.absolute(stateVector)
order = np.argsort(contribution, kind="heapsort")
index = -1
totalContribution = 0
value = "$"
while (index > -5) and (totalContribution < 0.95):
i = order[index]
if index != -1 and (
stateVector[i].real > 0 or abs(stateVector[i].imag) > 1e-9
):
value += "+"
if abs(self.phi) < 1e-9:
value = (
value
+ ("%.2f" % stateVector[i])
+ self._addState(*self.basisStates[i])
)
else:
value = (
value
+ (
"(%.2f+i%.2f)"
% (stateVector[i].real, stateVector[i].imag)
)
+ self._addState(*self.basisStates[i])
)
totalContribution += contribution[i] ** 2
index -= 1
if totalContribution < 0.999:
value += "+\\ldots"
return value + "$"
def _addState(self, n1, l1, j1, mj1, n2, l2, j2, mj2):
stateString = ""
if abs(self.s1 - 0.5) < 0.1:
# Alkali atom
stateString += "|%s %d/2" % (
printStateStringLatex(n1, l1, j1, s=self.s1),
round(2 * mj1),
)
else:
# divalent atoms
stateString += "|%s %d" % (
printStateStringLatex(n1, l1, j1, s=self.s1),
round(mj1),
)
if abs(self.s2 - 0.5) < 0.1:
# Alkali atom
stateString += ",%s %d/2\\rangle" % (
printStateStringLatex(n2, l2, j2, s=self.s2),
round(2 * mj2),
)
else:
# divalent atom
stateString += ",%s %d\\rangle" % (
printStateStringLatex(n2, l2, j2, s=self.s2),
round(mj2),
)
return stateString
def plotLevelDiagram(
self, highlightColor="red", highlightScale="linear", units="GHz"
):
"""
Plots pair state level diagram
Call :obj:`showPlot` if you want to display a plot afterwards.
Parameters:
highlightColor (string): optional, specifies the colour used
for state highlighting
highlightScale (string): optional, specifies scaling of
state highlighting. Default is 'linear'. Use 'log-2' or
'log-3' for logarithmic scale going down to 1e-2 and 1e-3
respectively. Logarithmic scale is useful for spotting
weakly admixed states.
units (:obj:`char`,optional): possible values {'*GHz*','cm','eV'};
[case insensitive] if value is 'GHz' (default), diagram will
be plotted as energy :math:`/h` in units of GHz; if the
string contains 'cm' diagram will be plotted in energy units
cm :math:`{}^{-1}`; if the value is 'eV', diagram
will be plotted as energy in units eV.
"""
rvb = LinearSegmentedColormap.from_list(
"mymap", ["0.9", highlightColor]
)
if units.lower() == "ev":
self.units = "eV"
self.scaleFactor = 1e9 * C_h / C_e
eLabel = ""
elif units.lower() == "ghz":
self.units = "GHz"
self.scaleFactor = 1
eLabel = "/h"
elif "cm" in units.lower():
self.units = "cm$^{-1}$"
self.scaleFactor = 1e9 / (C_c * 100)
eLabel = "/(h c)"
if highlightScale == "linear":
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
elif highlightScale == "log-2":
cNorm = matplotlib.colors.LogNorm(vmin=1e-2, vmax=1)
elif highlightScale == "log-3":
cNorm = matplotlib.colors.LogNorm(vmin=1e-3, vmax=1)
else:
raise ValueError(
"Only 'linear', 'log-2' and 'log-3' are valid "
"inputs for highlightScale"
)
print(" Now we are plotting...")
self.fig, self.ax = plt.subplots(1, 1, figsize=(11.5, 5.0))
self.y = np.array(self.y)
self.highlight = np.array(self.highlight)
colorfulX = []
colorfulY = []
colorfulState = []
for i in xrange(len(self.r)):
for j in xrange(len(self.y[i])):
colorfulX.append(self.r[i])
colorfulY.append(self.y[i][j])
colorfulState.append(self.highlight[i][j])
colorfulState = np.array(colorfulState)
sortOrder = colorfulState.argsort(kind="heapsort")
colorfulX = np.array(colorfulX)
colorfulY = np.array(colorfulY)
colorfulX = colorfulX[sortOrder]
colorfulY = colorfulY[sortOrder]
colorfulState = colorfulState[sortOrder]
self.ax.scatter(
colorfulX,
colorfulY * self.scaleFactor,
s=10,
c=colorfulState,
linewidth=0,
norm=cNorm,
cmap=rvb,
zorder=2,
picker=5,
)
cax = self.fig.add_axes([0.91, 0.1, 0.02, 0.8])
cb = matplotlib.colorbar.ColorbarBase(cax, cmap=rvb, norm=cNorm)
if self.drivingFromState[0] == 0:
# colouring is based on the contribution of the original pair state here
label = ""
if abs(self.s1 - 0.5) < 0.1:
# Alkali atom
label += r"$|\langle %s m_j=%d/2 " % (
printStateStringLatex(self.n, self.l, self.j),
round(2.0 * self.m1),
)
else:
# divalent atom
label += r"$|\langle %s m_j=%d " % (
printStateStringLatex(self.n, self.l, self.j, s=self.s1),
round(self.m1),
)
if abs(self.s2 - 0.5) < 0.1:
# Alkali atom
label += r", %s m_j=%d/2 | \mu \rangle |^2$" % (
printStateStringLatex(self.nn, self.ll, self.jj),
round(2.0 * self.m2),
)
else:
# divalent atom
label += r", %s m_j=%d | \mu \rangle |^2$" % (
printStateStringLatex(self.nn, self.ll, self.jj, s=self.s2),
round(self.m2, 0),
)
cb.set_label(label)
else:
# colouring is based on the coupling to different states
cb.set_label(r"$(\Omega_\mu/\Omega)^2$")
self.ax.set_xlabel(r"Interatomic distance, $R$ ($\mu$m)")
self.ax.set_ylabel(
r"Pair-state relative energy, $\Delta E %s$ (%s)"
% (eLabel, self.units)
)
def savePlot(self, filename="PairStateInteractions.pdf"):
"""
Saves plot made by :obj:`PairStateInteractions.plotLevelDiagram`
Args:
filename (:obj:`str`, optional): file location where the plot
should be saved
"""
if self.fig != 0:
self.fig.savefig(filename, bbox_inches="tight")
else:
print("Error while saving a plot: nothing is plotted yet")
return 0
def showPlot(self, interactive=True):
"""
Shows level diagram printed by
:obj:`PairStateInteractions.plotLevelDiagram`
By default, it will output interactive plot, which means that
clicking on the state will show the composition of the clicked
state in original basis (dominant elements)
Args:
interactive (bool): optional, by default it is True. If true,
plotted graph will be interactive, i.e. users can click
on the state to identify the state composition
Note:
interactive=True has effect if the graphs are explored in usual
matplotlib pop-up windows. It doesn't have effect on inline
plots in Jupyter (IPython) notebooks.
"""
if interactive:
self.ax.set_title("Click on state to see state composition")
self.clickedPoint = 0
self.fig.canvas.draw()
self.fig.canvas.mpl_connect("pick_event", self._onPick)
plt.show()
return 0
def _onPick(self, event):
if isinstance(event.artist, matplotlib.collections.PathCollection):
x = event.mouseevent.xdata
y = event.mouseevent.ydata / self.scaleFactor
i = np.searchsorted(self.r, x)
if i == len(self.r):
i -= 1
if (i > 0) and (abs(self.r[i - 1] - x) < abs(self.r[i] - x)):
i -= 1
j = 0
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < abs(self.y[i][j] - y):
j = jj
# now choose the most higlighted state in this area
distance = abs(self.y[i][j] - y) * 1.5
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < distance and (
abs(self.highlight[i][jj]) > abs(self.highlight[i][j])
):
j = jj
if self.clickedPoint != 0:
self.clickedPoint.remove()
(self.clickedPoint,) = self.ax.plot(
[self.r[i]],
[self.y[i][j] * self.scaleFactor],
"bs",
linewidth=0,
zorder=3,
)
self.ax.set_title(
"State = "
+ self.composition[i][j]
+ (" Colourbar = %.2f" % self.highlight[i][j]),
fontsize=11,
)
event.canvas.draw()
def getC6fromLevelDiagram(
self, rStart, rStop, showPlot=False, minStateContribution=0.0
):
"""
Finds :math:`C_6` coefficient for original pair state.
Function first finds for each distance in the range
[ `rStart` , `rStop` ] the eigen state with highest contribution of
the original state. One can set optional parameter
`minStateContribution` to value in the range [0,1), so that function
finds only states if they have contribution of the original state
that is bigger then `minStateContribution`.
Once original pair-state is found in the range of interatomic
distances, from smallest `rStart` to the biggest `rStop`, function
will try to perform fitting of the corresponding state energy
:math:`E(R)` at distance :math:`R` to the function
:math:`A+C_6/R^6` where :math:`A` is some offset.
Args:
rStart (float): smallest inter-atomic distance to be used for fitting
rStop (float): maximum inter-atomic distance to be used for fitting
showPlot (bool): If set to true, it will print the plot showing
fitted energy level and the obtained best fit. Default is
False
minStateContribution (float): valid values are in the range [0,1).
It specifies minimum amount of the original state in the given
energy state necessary for the state to be considered for
the adiabatic continuation of the original unperturbed
pair state.
Returns:
float:
:math:`C_6` measured in :math:`\\text{GHz }\\mu\\text{m}^6`
on success; If unsuccessful returns False.
Note:
In order to use this functions, highlighting in
:obj:`diagonalise` should be based on the original pair
state contribution of the eigenvectors (that this,
`drivingFromState` parameter should not be set, which
corresponds to `drivingFromState` = [0,0,0,0,0]).
"""
initialStateDetuning = []
initialStateDetuningX = []
fromRindex = -1
toRindex = -1
for br in xrange(len(self.r)):
if (fromRindex == -1) and (self.r[br] >= rStart):
fromRindex = br
if self.r[br] > rStop:
toRindex = br - 1
break
if (fromRindex != -1) and (toRindex == -1):
toRindex = len(self.r) - 1
if fromRindex == -1:
print(
"\nERROR: could not find data for energy levels for interatomic"
)
print("distances between %2.f and %.2f mu m.\n\n" % (rStart, rStop))
return 0
for br in xrange(fromRindex, toRindex + 1):
index = -1
maxPortion = minStateContribution
for br2 in xrange(len(self.y[br])):
if abs(self.highlight[br][br2]) > maxPortion:
index = br2
maxPortion = abs(self.highlight[br][br2])
if index != -1:
initialStateDetuning.append(abs(self.y[br][index]))
initialStateDetuningX.append(self.r[br])
initialStateDetuning = np.log(np.array(initialStateDetuning))
initialStateDetuningX = np.array(initialStateDetuningX)
def c6fit(r, c6, offset):
return np.log(c6 / r**6 + offset)
try:
popt, pcov = curve_fit(
c6fit, initialStateDetuningX, initialStateDetuning, [1, 0]
)
except Exception as ex:
print(ex)
print("ERROR: unable to find a fit for C6.")
return False
print("c6 = ", popt[0], " GHz /R^6 (mu m)^6")
print("offset = ", popt[1])
y_fit = []
for val in initialStateDetuningX:
y_fit.append(c6fit(val, popt[0], popt[1]))
y_fit = np.array(y_fit)
if showPlot:
fig, ax = plt.subplots(1, 1, figsize=(8.0, 5.0))
ax.loglog(
initialStateDetuningX,
np.exp(initialStateDetuning),
"b-",
lw=2,
zorder=1,
)
ax.loglog(
initialStateDetuningX, np.exp(y_fit), "r--", lw=2, zorder=2
)
ax.legend(
("calculated energy level", "fitted model function"),
loc=1,
fontsize=10,
)
ax.set_xlim(np.min(self.r), np.max(self.r))
ymin = np.min(initialStateDetuning)
ymax = np.max(initialStateDetuning)
ax.set_ylim(exp(ymin), exp(ymax))
minorLocator = mpl.ticker.MultipleLocator(1)
minorFormatter = mpl.ticker.FormatStrFormatter("%d")
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(minorFormatter)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel(r"Interatomic distance, $r$ ($\mu$m)")
ax.set_ylabel(r"Pair-state energy, $|E|$ (GHz)")
ax.set_title(r"$C_6$ fit")
plt.show()
self.fitX = initialStateDetuningX
self.fitY = initialStateDetuning
self.fittedCurveY = y_fit
return popt[0]
def getC3fromLevelDiagram(
self,
rStart,
rStop,
showPlot=False,
minStateContribution=0.0,
resonantBranch=+1,
):
"""
Finds :math:`C_3` coefficient for original pair state.
Function first finds for each distance in the range
[`rStart` , `rStop`] the eigen state with highest contribution of
the original state. One can set optional parameter
`minStateContribution` to value in the range [0,1), so that function
finds only states if they have contribution of the original state
that is bigger then `minStateContribution`.
Once original pair-state is found in the range of interatomic
distances, from smallest `rStart` to the biggest `rStop`, function
will try to perform fitting of the corresponding state energy
:math:`E(R)` at distance :math:`R` to the function
:math:`A+C_3/R^3` where :math:`A` is some offset.
Args:
rStart (float): smallest inter-atomic distance to be used for fitting
rStop (float): maximum inter-atomic distance to be used for fitting
showPlot (bool): If set to true, it will print the plot showing
fitted energy level and the obtained best fit. Default is
False
minStateContribution (float): valid values are in the range [0,1).
It specifies minimum amount of the original state in the given
energy state necessary for the state to be considered for
the adiabatic continuation of the original unperturbed
pair state.
resonantBranch (int): optional, default +1. For resonant
interactions we have two branches with identical
state contributions. In this case, we will select only
positively detuned branch (for resonantBranch = +1)
or negatively detuned branch (fore resonantBranch = -1)
depending on the value of resonantBranch optional parameter
Returns:
float:
:math:`C_3` measured in :math:`\\text{GHz }\\mu\\text{m}^6`
on success; If unsuccessful returns False.
Note:
In order to use this functions, highlighting in
:obj:`diagonalise` should be based on the original pair
state contribution of the eigenvectors (that this,
`drivingFromState` parameter should not be set, which
corresponds to `drivingFromState` = [0,0,0,0,0]).
"""
selectBranch = False
if abs(self.l - self.ll) == 1:
selectBranch = True
resonantBranch = float(resonantBranch)
initialStateDetuning = []
initialStateDetuningX = []
fromRindex = -1
toRindex = -1
for br in xrange(len(self.r)):
if (fromRindex == -1) and (self.r[br] >= rStart):
fromRindex = br
if self.r[br] > rStop:
toRindex = br - 1
break
if (fromRindex != -1) and (toRindex == -1):
toRindex = len(self.r) - 1
if fromRindex == -1:
print(
"\nERROR: could not find data for energy levels for interatomic"
)
print("distances between %2.f and %.2f mu m.\n\n" % (rStart, rStop))
return False
discontinuityDetected = False
for br in xrange(toRindex, fromRindex - 1, -1):
index = -1
maxPortion = minStateContribution
for br2 in xrange(len(self.y[br])):
if (abs(self.highlight[br][br2]) > maxPortion) and (
not selectBranch or (self.y[br][br2] * selectBranch > 0.0)
):
index = br2
maxPortion = abs(self.highlight[br][br2])
if len(initialStateDetuningX) > 2:
slope1 = (
initialStateDetuning[-1] - initialStateDetuning[-2]
) / (initialStateDetuningX[-1] - initialStateDetuningX[-2])
slope2 = (abs(self.y[br][index]) - initialStateDetuning[-1]) / (
self.r[br] - initialStateDetuningX[-1]
)
if abs(slope2) > 3.0 * abs(slope1):
discontinuityDetected = True
if (index != -1) and (not discontinuityDetected):
initialStateDetuning.append(abs(self.y[br][index]))
initialStateDetuningX.append(self.r[br])
initialStateDetuning = np.log(np.array(initialStateDetuning)) # *1e9
initialStateDetuningX = np.array(initialStateDetuningX)
def c3fit(r, c3, offset):
return np.log(c3 / r**3 + offset)
try:
popt, pcov = curve_fit(
c3fit, initialStateDetuningX, initialStateDetuning, [1, 0]
)
except Exception as ex:
print(ex)
print("ERROR: unable to find a fit for C3.")
return False
print("c3 = ", popt[0], " GHz /R^3 (mu m)^3")
print("offset = ", popt[1])
y_fit = []
for val in initialStateDetuningX:
y_fit.append(c3fit(val, popt[0], popt[1]))
y_fit = np.array(y_fit)
if showPlot:
fig, ax = plt.subplots(1, 1, figsize=(8.0, 5.0))
ax.loglog(
initialStateDetuningX,
np.exp(initialStateDetuning),
"b-",
lw=2,
zorder=1,
)
ax.loglog(
initialStateDetuningX, np.exp(y_fit), "r--", lw=2, zorder=2
)
ax.legend(
("calculated energy level", "fitted model function"),
loc=1,
fontsize=10,
)
ax.set_xlim(np.min(self.r), np.max(self.r))
ymin = np.min(initialStateDetuning)
ymax = np.max(initialStateDetuning)
ax.set_ylim(exp(ymin), exp(ymax))
minorLocator = mpl.ticker.MultipleLocator(1)
minorFormatter = mpl.ticker.FormatStrFormatter("%d")
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(minorFormatter)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel(r"Interatomic distance, $r$ ($\mu$m)")
ax.set_ylabel(r"Pair-state energy, $|E|$ (GHz)")
locatorStep = 1.0
while (locatorStep > (ymax - ymin)) and locatorStep > 1.0e-4:
locatorStep /= 10.0
ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(locatorStep))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%.3f"))
ax.yaxis.set_minor_locator(
mpl.ticker.MultipleLocator(locatorStep / 10.0)
)
ax.yaxis.set_minor_formatter(plt.NullFormatter())
# ax.yaxis.set_minor_formatter(mpl.ticker.FormatStrFormatter('%.3f'))
ax.set_title(r"$C_3$ fit")
plt.show()
self.fitX = initialStateDetuningX
self.fitY = initialStateDetuning
self.fittedCurveY = y_fit
return popt[0]
def getVdwFromLevelDiagram(
self, rStart, rStop, showPlot=False, minStateContribution=0.0
):
"""
Finds :math:`r_{\\rm vdW}` coefficient for original pair state.
Function first finds for each distance in the range [`rStart`,`rStop`]
the eigen state with highest contribution of the original state.
One can set optional parameter `minStateContribution` to value in
the range [0,1), so that function finds only states if they have
contribution of the original state that is bigger then
`minStateContribution`.
Once original pair-state is found in the range of interatomic
distances, from smallest `rStart` to the biggest `rStop`, function
will try to perform fitting of the corresponding state energy
:math:`E(R)` at distance :math:`R` to the function
:math:`A+B\\frac{1-\\sqrt{1+(r_{\\rm vdW}/r)^6}}{1-\\sqrt{1+r_{\\rm vdW}^6}}`
where :math:`A` and :math:`B` are some offset.
Args:
rStart (float): smallest inter-atomic distance to be used for fitting
rStop (float): maximum inter-atomic distance to be used for fitting
showPlot (bool): If set to true, it will print the plot showing
fitted energy level and the obtained best fit. Default is
False
minStateContribution (float): valid values are in the range [0,1).
It specifies minimum amount of the original state in the given
energy state necessary for the state to be considered for
the adiabatic continuation of the original unperturbed
pair state.
Returns:
float: :math:`r_{\\rm vdW}` measured in :math:`\\mu\\text{m}`
on success; If unsuccessful returns False.
Note:
In order to use this functions, highlighting in
:obj:`diagonalise` should be based on the original pair
state contribution of the eigenvectors (that this,
`drivingFromState` parameter should not be set, which
corresponds to `drivingFromState` = [0,0,0,0,0]).
"""
initialStateDetuning = []
initialStateDetuningX = []
fromRindex = -1
toRindex = -1
for br in xrange(len(self.r)):
if (fromRindex == -1) and (self.r[br] >= rStart):
fromRindex = br
if self.r[br] > rStop:
toRindex = br - 1
break
if (fromRindex != -1) and (toRindex == -1):
toRindex = len(self.r) - 1
if fromRindex == -1:
print(
"\nERROR: could not find data for energy levels for interatomic"
)
print("distances between %2.f and %.2f mu m.\n\n" % (rStart, rStop))
return False
discontinuityDetected = False
for br in xrange(toRindex, fromRindex - 1, -1):
index = -1
maxPortion = minStateContribution
for br2 in xrange(len(self.y[br])):
if abs(self.highlight[br][br2]) > maxPortion:
index = br2
maxPortion = abs(self.highlight[br][br2])
if len(initialStateDetuningX) > 2:
slope1 = (
initialStateDetuning[-1] - initialStateDetuning[-2]
) / (initialStateDetuningX[-1] - initialStateDetuningX[-2])
slope2 = (abs(self.y[br][index]) - initialStateDetuning[-1]) / (
self.r[br] - initialStateDetuningX[-1]
)
if abs(slope2) > 3.0 * abs(slope1):
discontinuityDetected = True
if (index != -1) and (not discontinuityDetected):
initialStateDetuning.append(abs(self.y[br][index]))
initialStateDetuningX.append(self.r[br])
initialStateDetuning = np.log(abs(np.array(initialStateDetuning)))
initialStateDetuningX = np.array(initialStateDetuningX)
def vdwFit(r, offset, scale, vdw):
return np.log(
abs(
offset
+ scale
* (1.0 - np.sqrt(1.0 + (vdw / r) ** 6))
/ (1.0 - np.sqrt(1 + vdw**6))
)
)
noOfPoints = len(initialStateDetuningX)
print("Data points to fit = ", noOfPoints)
try:
popt, pcov = curve_fit(
vdwFit,
initialStateDetuningX,
initialStateDetuning,
[
0,
initialStateDetuning[noOfPoints // 2],
initialStateDetuningX[noOfPoints // 2],
],
)
except Exception as ex:
print(ex)
print("ERROR: unable to find a fit for van der Waals distance.")
return False
if (initialStateDetuningX[0] < popt[2]) or (
popt[2] < initialStateDetuningX[-1]
):
print("WARNING: vdw radius seems to be outside the fitting range!")
print(
"It's estimated to be around %.2f mu m from the current fit."
% popt[2]
)
print("Rvdw = ", popt[2], " mu m")
print("offset = ", popt[0], "\n scale = ", popt[1])
y_fit = []
for val in initialStateDetuningX:
y_fit.append(vdwFit(val, popt[0], popt[1], popt[2]))
y_fit = np.array(y_fit)
if showPlot:
fig, ax = plt.subplots(1, 1, figsize=(8.0, 5.0))
ax.loglog(
initialStateDetuningX,
np.exp(initialStateDetuning),
"b-",
lw=2,
zorder=1,
)
ax.loglog(
initialStateDetuningX, np.exp(y_fit), "r--", lw=2, zorder=2
)
ax.set_xlim(np.min(self.r), np.max(self.r))
ymin = np.min(initialStateDetuning)
ymax = np.max(initialStateDetuning)
ax.set_ylim(exp(ymin), exp(ymax))
ax.axvline(x=popt[2], color="k")
ax.text(
popt[2],
exp((ymin + ymax) / 2.0),
r"$R_{vdw} = %.1f$ $\mu$m" % popt[2],
)
minorLocator = mpl.ticker.MultipleLocator(1)
minorFormatter = mpl.ticker.FormatStrFormatter("%d")
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(minorFormatter)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel(r"Interatomic distance, $r$ ($\mu$m)")
ax.set_ylabel(r"Pair-state energy, $|E|$ (GHz)")
ax.legend(
("calculated energy level", "fitted model function"),
loc=1,
fontsize=10,
)
plt.show()
self.fitX = initialStateDetuningX
self.fitY = initialStateDetuning
self.fittedCurveY = y_fit
return popt[2]
class StarkMapResonances:
"""
Calculates pair state Stark maps for finding resonances
Tool for finding conditions for Foster resonances. For a given pair
state, in a given range of the electric fields, looks for the pair-state
that are close in energy and coupled via dipole-dipole interactions
to the original pair-state.
See `Stark resonances example snippet`_.
.. _`Stark resonances example snippet`:
././Rydberg_atoms_a_primer.html#Tuning-the-interaction-strength-with-electric-fields
Parameters:
atom1 (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
={ :obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
the first atom in the pair-state
state1 ([int,int,float,float,(float)]): specification of the state
of the first state as an array of values :math:`[n,l,j,m_j]`.
For :obj:`arc.divalent_atom_functions.DivalentAtom` and other divalent atoms, 5th value
should be added specifying total spin angular momentum `s`.
Full definition of state then has format
:math:`[n,l,j,m_j,s]`.
atom2 (:obj:`arc.alkali_atom_functions.AlkaliAtom` or :obj:`arc.divalent_atom_functions.DivalentAtom`):
={ :obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
the second atom in the pair-state
state2 ([int,int,float,float,(float)]): specification of the state
of the first state as an array of values :math:`[n,l,j,m_j]`,
For :obj:`arc.divalent_atom_functions.DivalentAtom` and other divalent atoms, 5th value
should be added specifying total spin angular momentum `s`.
Full definition of state then has format
:math:`[n,l,j,m_j,s]`.
Note:
In checking if certain state is dipole coupled to the original
state, only the highest contributing state is checked for dipole
coupling. This should be fine if one is interested in resonances
in weak fields. For stronger fields, one might want to include
effect of coupling to other contributing base states.
"""
def __init__(self, atom1, state1, atom2, state2):
self.atom1 = atom1
if issubclass(type(self.atom1), DivalentAtom) and (
len(state1) != 5 or (state1[4] != 0 and state1[4] != 1)
):
raise ValueError(
"For divalent atoms state specification has to "
"include total spin angular momentum s as the last "
"number in the state specification [n,l,j,m_j,s]."
)
self.state1 = state1
# add exlicitly total spin of the state for Alkaline atoms
if len(self.state1) == 4:
self.state1.append(0.5)
self.atom2 = atom2
if issubclass(type(self.atom2), DivalentAtom) and (
len(state1) != 5 or (state1[4] != 0 and state1[4] != 1)
):
raise ValueError(
"For divalent atoms state specification has to "
"include total spin angular momentum s as the last "
"numbre in the state specification [n,l,j,m_j,s]."
)
self.state2 = state2
# add exlicitly total spin of the state for Alkaline atoms
if len(self.state2) == 4:
self.state2.append(0.5)
self.pairStateEnergy = (
(
atom1.getEnergy(
self.state1[0],
self.state1[1],
self.state1[2],
s=self.state1[4],
)
+ atom2.getEnergy(
self.state2[0],
self.state2[1],
self.state2[2],
s=self.state2[4],
)
)
* C_e
/ C_h
* 1e-9
)
def findResonances(
self,
nMin,
nMax,
maxL,
eFieldList,
energyRange=[-5.0e9, +5.0e9],
Bz=0,
progressOutput=False,
):
r"""
Finds near-resonant dipole-coupled pair-states
For states in range of principal quantum numbers [`nMin`,`nMax`]
and orbital angular momentum [0,`maxL`], for a range of electric fields
given by `eFieldList` function will find near-resonant pair states.
Only states that are in the range given by `energyRange` will be
extracted from the pair-state Stark maps.
Args:
nMin (int): minimal principal quantum number of the state to be
included in the StarkMap calculation
nMax (int): maximal principal quantum number of the state to be
included in the StarkMap calculation
maxL (int): maximum value of orbital angular momentum for the states
to be included in the calculation
eFieldList ([float]): list of the electric fields (in V/m) for
which to calculate level diagram (StarkMap)
Bz (float): optional, magnetic field directed along z-axis in
units of Tesla. Calculation will be correct only for weak
magnetic fields, where paramagnetic term is much stronger
then diamagnetic term. Diamagnetic term is neglected.
energyRange ([float,float]): optinal argument. Minimal and maximal
energy of that some dipole-coupled state should have in order
to keep it in the plot (in units of Hz). By default it finds
states that are :math:`\pm 5` GHz
progressOutput (:obj:`bool`, optional): if True prints the
progress of calculation; Set to false by default.
"""
self.eFieldList = eFieldList
self.Bz = Bz
eMin = energyRange[0] * 1.0e-9 # in GHz
eMax = energyRange[1] * 1.0e-9
# find where is the original pair state
sm1 = StarkMap(self.atom1)
sm1.defineBasis(
self.state1[0],
self.state1[1],
self.state1[2],
self.state1[3],
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state1[4],
)
sm1.diagonalise(eFieldList, progressOutput=progressOutput)
if (
(self.atom2 is self.atom1)
and (self.state1[0] == self.state2[0])
and (self.state1[1] == self.state2[1])
and (abs(self.state1[2] - self.state2[2]) < 0.1)
and (abs(self.state1[3] - self.state2[3]) < 0.1)
and (abs(self.state1[4] - self.state2[4]) < 0.1)
):
sm2 = sm1
else:
sm2 = StarkMap(self.atom2)
sm2.defineBasis(
self.state2[0],
self.state2[1],
self.state2[2],
self.state2[3],
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state2[4],
)
sm2.diagonalise(eFieldList, progressOutput=progressOutput)
self.originalStateY = []
self.originalStateContribution = []
for i in xrange(len(sm1.eFieldList)):
jmax1 = 0
jmax2 = 0
for j in xrange(len(sm1.highlight[i])):
if sm1.highlight[i][j] > sm1.highlight[i][jmax1]:
jmax1 = j
for j in xrange(len(sm2.highlight[i])):
if sm2.highlight[i][j] > sm2.highlight[i][jmax2]:
jmax2 = j
self.originalStateY.append(
sm1.y[i][jmax1] + sm2.y[i][jmax2] - self.pairStateEnergy
)
self.originalStateContribution.append(
(sm1.highlight[i][jmax1] + sm2.highlight[i][jmax2]) / 2.0
)
# M= mj1+mj2 is conserved with dipole-dipole interaction
dmlist1 = [1, 0]
if self.state1[3] != 0.5:
dmlist1.append(-1)
dmlist2 = [1, 0]
if self.state2[3] != 0.5:
dmlist2.append(-1)
n1 = self.state1[0]
l1 = self.state1[1] + 1
j1 = self.state1[2] + 1
mj1 = self.state1[3]
n2 = self.state2[0]
l2 = self.state2[1] + 1
j2 = self.state2[2] + 1
mj2 = self.state2[3]
self.fig, self.ax = plt.subplots(1, 1, figsize=(9.0, 6))
cm = LinearSegmentedColormap.from_list("mymap", ["0.9", "red", "black"])
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
self.r = []
self.y = []
self.composition = []
for dm1 in dmlist1:
sm1.defineBasis(
n1,
l1,
j1,
mj1 + dm1,
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state1[4],
)
sm1.diagonalise(eFieldList, progressOutput=progressOutput)
for dm2 in dmlist2:
sm2.defineBasis(
n2,
l2,
j2,
mj2 + dm2,
nMin,
nMax,
maxL,
Bz=self.Bz,
progressOutput=progressOutput,
s=self.state2[4],
)
sm2.diagonalise(eFieldList, progressOutput=progressOutput)
for i in xrange(len(sm1.eFieldList)):
yList = []
compositionList = []
if progressOutput:
sys.stdout.write("\rE=%.2f V/m " % sm1.eFieldList[i])
sys.stdout.flush()
for j in xrange(len(sm1.y[i])):
for jj in xrange(len(sm2.y[i])):
energy = (
sm1.y[i][j]
+ sm2.y[i][jj]
- self.pairStateEnergy
)
statec1 = sm1.basisStates[
sm1.composition[i][j][0][1]
]
statec2 = sm2.basisStates[
sm2.composition[i][jj][0][1]
]
if (
(energy > eMin)
and (energy < eMax)
and (abs(statec1[1] - self.state1[1]) == 1)
and (abs(statec2[1] - self.state2[1]) == 1)
):
# add this to PairStateMap
yList.append(energy)
compositionList.append(
[
sm1._stateComposition(
sm1.composition[i][j]
),
sm2._stateComposition(
sm2.composition[i][jj]
),
]
)
if len(self.y) <= i:
self.y.append(yList)
self.composition.append(compositionList)
else:
self.y[i].extend(yList)
self.composition[i].extend(compositionList)
if progressOutput:
print("\n")
for i in xrange(len(sm1.eFieldList)):
self.y[i] = np.array(self.y[i])
self.composition[i] = np.array(self.composition[i])
self.ax.scatter(
[sm1.eFieldList[i] / 100.0] * len(self.y[i]),
self.y[i],
c="k",
s=5,
norm=cNorm,
cmap=cm,
lw=0,
picker=5,
)
self.ax.plot(sm1.eFieldList / 100.0, self.originalStateY, "r-", lw=1)
self.ax.set_ylim(eMin, eMax)
self.ax.set_xlim(
min(self.eFieldList) / 100.0, max(self.eFieldList) / 100.0
)
self.ax.set_xlabel("Electric field (V/cm)")
self.ax.set_ylabel(r"Pair-state relative energy, $\Delta E/h$ (GHz)")
def showPlot(self, interactive=True):
"""
Plots initial state Stark map and its dipole-coupled resonances
Args:
interactive (optional, bool): if True (by default) points on plot
will be clickable so that one can find the state labels
and their composition (if they are heavily admixed).
Note:
Zero is given by the initial states of the atom given in
initialisation of calculations, calculated **in absence of
magnetic field B_z**. In other words, for non-zero magnetic
field the inital states will have offset from zero even
for zero electric field due to Zeeman shift.
"""
if self.fig != 0:
if interactive:
self.ax.set_title("Click on state to see state composition")
self.clickedPoint = 0
self.fig.canvas.draw()
self.fig.canvas.mpl_connect("pick_event", self._onPick)
plt.show()
else:
print("Error while showing a plot: nothing is plotted yet")
def _onPick(self, event):
if isinstance(event.artist, matplotlib.collections.PathCollection):
x = event.mouseevent.xdata * 100.0
y = event.mouseevent.ydata
i = np.searchsorted(self.eFieldList, x)
if i == len(self.eFieldList):
i -= 1
if (i > 0) and (
abs(self.eFieldList[i - 1] - x) < abs(self.eFieldList[i] - x)
):
i -= 1
j = 0
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < abs(self.y[i][j] - y):
j = jj
if self.clickedPoint != 0:
self.clickedPoint.remove()
(self.clickedPoint,) = self.ax.plot(
[self.eFieldList[i] / 100.0],
[self.y[i][j]],
"bs",
linewidth=0,
zorder=3,
)
atom1 = self.atom1.elementName
atom2 = self.atom2.elementName
composition1 = str(self.composition[i][j][0])
composition2 = str(self.composition[i][j][1])
self.ax.set_title(
("[%s,%s]=[" % (atom1, atom2))
+ composition1
+ ","
+ composition2
+ "]",
fontsize=10,
)
event.canvas.draw()
def _onPick2(self, xdata, ydata):
x = xdata * 100.0
y = ydata
i = np.searchsorted(self.eFieldList, x)
if i == len(self.eFieldList):
i -= 1
if (i > 0) and (
abs(self.eFieldList[i - 1] - x) < abs(self.eFieldList[i] - x)
):
i -= 1
j = 0
for jj in xrange(len(self.y[i])):
if abs(self.y[i][jj] - y) < abs(self.y[i][j] - y):
j = jj
if self.clickedPoint != 0:
self.clickedPoint.remove()
(self.clickedPoint,) = self.ax.plot(
[self.eFieldList[i] / 100.0],
[self.y[i][j]],
"bs",
linewidth=0,
zorder=3,
)
atom1 = self.atom1.elementName
atom2 = self.atom2.elementName
composition1 = str(self.composition[i][j][0])
composition2 = str(self.composition[i][j][1])
self.ax.set_title(
("[%s,%s]=[" % (atom1, atom2))
+ composition1
+ ","
+ composition2
+ "]",
fontsize=10,
) | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/calculations_atom_pairstate.py | calculations_atom_pairstate.py |
from __future__ import division, print_function, absolute_import
from arc.alkali_atom_functions import AlkaliAtom, printStateLetter
from arc.wigner import Wigner3j, Wigner6j
from scipy.constants import physical_constants
import csv
import os
import numpy as np
from math import sqrt
from arc._database import sqlite3, UsedModulesARC
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.int32, int)
class DivalentAtom(AlkaliAtom):
"""
Implements general calculations for Alkaline Earths, and other divalent
atoms.
This class inherits :obj:`arc.alkali_atom_functions.AlkaliAtom` .
Most of the methods can be directly used from there, and the source
for them is provided in the base class. Few methods that are
implemented differently for Alkaline Earths are defined here.
Args:
preferQuantumDefects (bool):
Use quantum defects for energy level calculations. If False,
uses NIST ASD values where available. If True, uses quantum
defects for energy calculations for principal quantum numbers
within the range specified in :obj:`defectFittingRange` which
is specified for each element and series separately.
For principal quantum numbers below this value, NIST ASD values
are used if existing, since quantum defects. Default is True.
cpp_numerov (bool):
This switch for Alkaline Earths at the moment doesn't have
any effect since wavefunction calculation function is not
implemented (d.m.e. and quadrupole matrix elements are
calculated directly semiclassically)
"""
modelPotential_coef = dict()
"""
Model potential parameters fitted from experimental observations for
different l (electron angular momentum)
"""
quantumDefect = [
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
]
""" Contains list of modified Rydberg-Ritz coefficients for calculating
quantum defects for
[[ :math:`^1S_{0},^1P_{1},^1D_{2},^1F_{3}`],
[ :math:`^3S_{0},^3P_{0},^3D_{1},^3F_{2}`],
[ :math:`^3S_{0},^3P_{1},^3D_{2},^3F_{3}`],
[ :math:`^3S_{1},^3P_{2},^3D_{3},^3F_{4}`]]."""
#: file with .csv data, each row is
#: `[n, l, s, j, energy, source, absolute uncertanty]`
levelDataFromNIST = ""
#: Not used with DivalentAtom, see :obj:`defectFittingRange` instead.
minQuantumDefectN = None
#: Used for AlkalineEarths to define minimum and maximum principal quantum
#: number for which quantum defects are valid. Ranges are stored under
#: keys defined as state terms ({'stateLabel':[minN, maxN]}, e.g. '1S0').
#: Dictionary returns array
#: stating minimal and maximal principal quantum number for which quantum
#: defects were fitted. For example::
#: limits = self.defectFittingRange['1S0']
#: print("Minimal n = %d" % limits[0])
#: print("Maximal n = %d" % limits[1]) 1
defectFittingRange = {}
#: flag that is turned to True if the energy levels of this atom were
#: calculated by extrapolating with quantum defects values outside the
#: quantum defect fitting range.
energyLevelsExtrapolated = False
def __init__(self, preferQuantumDefects=True, cpp_numerov=True):
UsedModulesARC.divalent_atoms = True
self.cpp_numerov = cpp_numerov
self.preferQuantumDefects = preferQuantumDefects
self._databaseInit()
c = self.conn.cursor()
if self.cpp_numerov:
from .arc_c_extensions import NumerovWavefunction
self.NumerovWavefunction = NumerovWavefunction
# load dipole matrix elements previously calculated
data = []
if self.dipoleMatrixElementFile != "":
if preferQuantumDefects is False:
self.dipoleMatrixElementFile = (
"NIST_" + self.dipoleMatrixElementFile
)
try:
data = np.load(
os.path.join(self.dataFolder, self.dipoleMatrixElementFile),
encoding="latin1",
allow_pickle=True,
)
except IOError as e:
print(
"Error reading dipoleMatrixElement File "
+ os.path.join(
self.dataFolder, self.dipoleMatrixElementFile
)
)
print(e)
# save to SQLite database
try:
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='dipoleME';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS dipoleME
(n1 TINYINT UNSIGNED, l1 TINYINT UNSIGNED,
j1 TINYINT UNSIGNED,
n2 TINYINT UNSIGNED, l2 TINYINT UNSIGNED,
j2 TINYINT UNSIGNED, s TINYINT UNSIGNED,
dme DOUBLE,
PRIMARY KEY (n1,l1,j1,n2,l2,j2,s)
) """
)
if len(data) > 0:
c.executemany(
"INSERT INTO dipoleME VALUES (?,?,?,?,?,?,?,?)", data
)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database")
print(e)
exit()
# load quadrupole matrix elements previously calculated
data = []
if self.quadrupoleMatrixElementFile != "":
if preferQuantumDefects is False:
self.quadrupoleMatrixElementFile = (
"NIST_" + self.quadrupoleMatrixElementFile
)
try:
data = np.load(
os.path.join(
self.dataFolder, self.quadrupoleMatrixElementFile
),
encoding="latin1",
allow_pickle=True,
)
except IOError as e:
print(
"Error reading quadrupoleMatrixElementFile File "
+ os.path.join(
self.dataFolder, self.quadrupoleMatrixElementFile
)
)
print(e)
# save to SQLite database
try:
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='quadrupoleME';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS quadrupoleME
(n1 TINYINT UNSIGNED, l1 TINYINT UNSIGNED,
j1 TINYINT UNSIGNED,
n2 TINYINT UNSIGNED, l2 TINYINT UNSIGNED,
j2 TINYINT UNSIGNED, s TINYINT UNSIGNED,
qme DOUBLE,
PRIMARY KEY (n1,l1,j1,n2,l2,j2,s)
) """
)
if len(data) > 0:
c.executemany(
"INSERT INTO quadrupoleME VALUES (?,?,?,?,?,?,?,?)",
data,
)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database")
print(e)
exit()
if self.levelDataFromNIST == "":
print(
"NIST level data file not specified."
" Only quantum defects will be used."
)
else:
levels = self._parseLevelsFromNIST(
os.path.join(self.dataFolder, self.levelDataFromNIST)
)
br = 0
while br < len(levels):
self._addEnergy(*levels[br])
br = br + 1
try:
self.conn.commit()
except sqlite3.Error as e:
print(
"Error while loading precalculated values"
"into the database"
)
print(e)
exit()
self._readLiteratureValues()
def _parseLevelsFromNIST(self, fileData):
data = np.loadtxt(fileData, delimiter=",", usecols=(0, 1, 3, 2, 4))
return data
def _addEnergy(self, n, l, j, s, energy):
"""
Adds energy level relative to
NOTE:
Requres changes to be commited to the sql database afterwards!
Args:
n: principal quantum number
l: orbital angular momentum quantum number
j: total angular momentum quantum number
s: spin quantum number
energy: energy in cm^-1 relative to the ground state
"""
c = self.conn.cursor()
c.execute(
"INSERT INTO energyLevel VALUES (?,?,?,?,?)",
(
round(n),
round(l),
round(j),
round(s),
energy
* 1.0e2
* physical_constants[
"inverse meter-electron volt relationship"
][0]
- self.ionisationEnergy,
),
)
self.NISTdataLevels = max(self.NISTdataLevels, round(n))
# saves energy in eV
def _databaseInit(self):
self.conn = sqlite3.connect(
os.path.join(self.dataFolder, self.precalculatedDB)
)
c = self.conn.cursor()
# create space for storing NIST/literature energy levels
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='energyLevel';"""
)
if c.fetchone()[0] != 0:
c.execute("""DROP TABLE energyLevel""")
# create fresh table
c.execute(
"""CREATE TABLE IF NOT EXISTS energyLevel
(n TINYINT UNSIGNED, l TINYINT UNSIGNED, j TINYINT UNSIGNED,
s TINYINT UNSIGNED,
energy DOUBLE,
PRIMARY KEY (n, l, j, s)
) """
)
self.conn.commit()
def getEnergy(self, n, l, j, s=None):
if s is None:
raise ValueError(
"Spin state for DivalentAtom has to be "
"explicitly defined as a keyword argument "
"s=0 or s=1"
)
if l >= n:
raise ValueError(
"Requested energy for state l=%d >= n=%d !" % (l, n)
)
stateLabel = "%d%s%d" % (round(2 * s + 1), printStateLetter(l), j)
minQuantumDefectN = 100000
maxQuantumDefectN = 0
if stateLabel in self.defectFittingRange.keys():
minQuantumDefectN = self.defectFittingRange[stateLabel][0]
maxQuantumDefectN = self.defectFittingRange[stateLabel][1]
# use NIST data ?
if not self.preferQuantumDefects or n < minQuantumDefectN:
savedEnergy = self._getSavedEnergy(n, l, j, s=s)
if abs(savedEnergy) > 1e-8:
return savedEnergy
else:
if n < minQuantumDefectN or n > maxQuantumDefectN:
self.energyLevelsExtrapolated = True
# else, use quantum defects
defect = self.getQuantumDefect(n, l, j, s=s)
return -self.scaledRydbergConstant / ((n - defect) ** 2)
def _getSavedEnergy(self, n, l, j, s=0):
c = self.conn.cursor()
c.execute(
"""SELECT energy FROM energyLevel WHERE
n= ? AND l = ? AND j = ? AND
s = ? """,
(n, l, j, s),
)
energy = c.fetchone()
if energy:
return energy[0]
else:
return 0 # there is no saved energy level measurement
def getRadialMatrixElement(
self, n1, l1, j1, n2, l2, j2, s=None, useLiterature=True
):
"""
Radial part of the dipole matrix element
Calculates :math:`\\int \\mathbf{d}r~R_{n_1,l_1,j_1}(r)\\cdot \
R_{n_1,l_1,j_1}(r) \\cdot r^3`.
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number of state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
s (float): is required argument, total spin angular momentum of
state. Specify `s=0` for singlet state or `s=1` for
triplet state.
useLiterature (bool): optional, should literature values for
dipole matrix element be used if existing? If true,
compiled values stored in `literatureDMEfilename` variable
for a given atom (file is stored locally at ~/.arc-data/),
will be checked, and if the value is found, selects the
value with smallest error estimate (if there are multiple
entries). If no value is found, it will default to numerical
integration of wavefunctions. By default True.
Returns:
float: dipole matrix element (:math:`a_0 e`).
"""
if s is None:
raise ValueError(
"You must specify total angular momentum s " "explicitly."
)
dl = abs(l1 - l2)
dj = abs(j2 - j2)
if not (dl == 1 and (dj < 1.1)):
return 0
if self.getEnergy(n1, l1, j1, s=s) > self.getEnergy(n2, l2, j2, s=s):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
n1 = round(n1)
n2 = round(n2)
l1 = round(l1)
l2 = round(l2)
c = self.conn.cursor()
if useLiterature:
# is there literature value for this DME? If there is,
# use the best one (smalles error)
c.execute(
"""SELECT dme FROM literatureDME WHERE
n1= ? AND l1 = ? AND j1 = ? AND
n2 = ? AND l2 = ? AND j2 = ? AND s = ?
ORDER BY errorEstimate ASC""",
(n1, l1, j1, n2, l2, j2, s),
)
answer = c.fetchone()
if answer:
# we did found literature value
return answer[0]
# was this calculated before? If it was, retrieve from memory
c.execute(
"""SELECT dme FROM dipoleME WHERE
n1= ? AND l1 = ? AND j1 = ? AND
n2 = ? AND l2 = ? AND j2 = ? AND s = ?""",
(n1, l1, j1, n2, l2, j2, s),
)
dme = c.fetchone()
if dme:
return dme[0]
dipoleElement = self._getRadialDipoleSemiClassical(
n1, l1, j1, n2, l2, j2, s=s
)
c.execute(
""" INSERT INTO dipoleME VALUES (?,?,?, ?,?,?, ?, ?)""",
[n1, l1, j1, n2, l2, j2, s, dipoleElement],
)
self.conn.commit()
return dipoleElement
def _readLiteratureValues(self):
# clear previously saved results, since literature file
# might have been updated in the meantime
c = self.conn.cursor()
c.execute("""DROP TABLE IF EXISTS literatureDME""")
c.execute(
"""SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='literatureDME';"""
)
if c.fetchone()[0] == 0:
# create table
c.execute(
"""CREATE TABLE IF NOT EXISTS literatureDME
(n1 TINYINT UNSIGNED, l1 TINYINT UNSIGNED, j1 TINYINT UNSIGNED,
n2 TINYINT UNSIGNED, l2 TINYINT UNSIGNED, j2 TINYINT UNSIGNED,
s TINYINT UNSIGNED,
dme DOUBLE,
typeOfSource TINYINT,
errorEstimate DOUBLE,
comment TINYTEXT,
ref TINYTEXT,
refdoi TINYTEXT
);"""
)
c.execute(
"""CREATE INDEX compositeIndex
ON literatureDME (n1,l1,j1,n2,l2,j2,s); """
)
self.conn.commit()
if self.literatureDMEfilename == "":
return 0 # no file specified for literature values
try:
fn = open(
os.path.join(self.dataFolder, self.literatureDMEfilename), "r"
)
dialect = csv.Sniffer().sniff(fn.read(2024), delimiters=";,\t")
fn.seek(0)
data = csv.reader(fn, dialect, quotechar='"')
literatureDME = []
# i=0 is header
i = 0
for row in data:
if i != 0:
n1 = round(row[0])
l1 = round(row[1])
j1 = round(row[2])
s1 = round(row[3])
n2 = round(row[4])
l2 = round(row[5])
j2 = round(row[6])
s2 = round(row[7])
if s1 != s2:
raise ValueError(
"Error reading litearture: database "
"cannot accept spin changing "
"transitions"
)
s = s1
if self.getEnergy(n1, l1, j1, s=s) > self.getEnergy(
n2, l2, j2, s=s
):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
# convered from reduced DME in J basis (symmetric notation)
# to radial part of dme as it is saved for calculated
# values
# To-DO : see in what notation are Strontium literature elements saved
print(
"To-do (_readLiteratureValues): see in what notation are Sr literature saved (angular part)"
)
dme = float(row[8]) / (
(-1) ** (round(l1 + s + j2 + 1.0))
* sqrt((2.0 * j1 + 1.0) * (2.0 * j2 + 1.0))
* Wigner6j(j1, 1.0, j2, l2, s, l1)
* (-1) ** l1
* sqrt((2.0 * l1 + 1.0) * (2.0 * l2 + 1.0))
* Wigner3j(l1, 1, l2, 0, 0, 0)
)
comment = row[9]
typeOfSource = round(row[10]) # 0 = experiment; 1 = theory
errorEstimate = float(row[11])
ref = row[12]
refdoi = row[13]
literatureDME.append(
[
n1,
l1,
j1,
n2,
l2,
j2,
s,
dme,
typeOfSource,
errorEstimate,
comment,
ref,
refdoi,
]
)
i += 1
fn.close()
try:
if i > 1:
c.executemany(
"""INSERT INTO literatureDME
VALUES (?,?,?, ?,?,?,?
?,?,?,?,?,?)""",
literatureDME,
)
self.conn.commit()
except sqlite3.Error as e:
print(
"Error while loading precalculated values "
"into the database"
)
print(e)
print(literatureDME)
exit()
except IOError as e:
print(
"Error reading literature values File "
+ self.literatureDMEfilename
)
print(e)
def getLiteratureDME(self, n1, l1, j1, n2, l2, j2, s=0):
"""
Returns literature information on requested transition.
Args:
n1,l1,j1: one of the states we are coupling
n2,l2,j2: the other state to which we are coupling
s: (optional) spin of the state. Default s=0.
Returns:
bool, float, [int,float,string,string,string]:
hasLiteratureValue?, dme, referenceInformation
**If Boolean value is True**, a literature value for
dipole matrix element was found and reduced DME in J basis
is returned as the number. The third returned argument
(array) contains additional information about the
literature value in the following order [ typeOfSource,
errorEstimate , comment , reference, reference DOI]
upon success to find a literature value for dipole matrix
element:
* typeOfSource=1 if the value is theoretical\
calculation; otherwise, if it is experimentally \
obtained value typeOfSource=0
* comment details where within the publication the \
value can be found
* errorEstimate is absolute error estimate
* reference is human-readable formatted reference
* reference DOI provides link to the publication.
**Boolean value is False**, followed by zero and an empty
array if no literature value for dipole matrix element is
found.
Note:
The literature values are stored in /data folder in
<element name>_literature_dme.csv files as a ; separated
values. Each row in the file consists of one literature entry,
that has information in the following order:
* n1
* l1
* j1
* n2
* l2
* j2
* s
* dipole matrix element reduced l basis (a.u.)
* comment (e.g. where in the paper value appears?)
* value origin: 1 for theoretical; 0 for experimental values
* accuracy
* source (human readable formatted citation)
* doi number (e.g. 10.1103/RevModPhys.82.2313 )
If there are several values for a given transition, program
outputs the value that has smallest error (under column
accuracy). The list of values can be expanded - every time
program runs this file is read and the list is parsed again
for use in calculations.
"""
if self.getEnergy(n1, l1, j1, s=s) > self.getEnergy(n2, l2, j2, s=s):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
# is there literature value for this DME? If there is,
# use the best one (wit the smallest error)
c = self.conn.cursor()
c.execute(
"""SELECT dme, typeOfSource,
errorEstimate ,
comment ,
ref,
refdoi FROM literatureDME WHERE
n1= ? AND l1 = ? AND j1 = ? AND
n2 = ? AND l2 = ? AND j2 = ? AND s = ?
ORDER BY errorEstimate ASC""",
(n1, l1, j1, n2, l2, j2, s),
)
answer = c.fetchone()
if answer:
# we did found literature value
return (
True,
answer[0],
[answer[1], answer[2], answer[3], answer[4], answer[5]],
)
# if we are here, we were unsucessfull in literature search
# for this value
return False, 0, []
def getQuadrupoleMatrixElement(self, n1, l1, j1, n2, l2, j2, s=0.5):
"""
Radial part of the quadrupole matrix element
Calculates :math:`\\int \\mathbf{d}r~R_{n_1,l_1,j_1}(r)\\cdot \
R_{n_1,l_1,j_1}(r) \\cdot r^4`.
See `Quadrupole calculation example snippet`_ .
.. _`Quadrupole calculation example snippet`:
./Rydberg_atoms_a_primer.html#Quadrupole-matrix-elements
Args:
n1 (int): principal quantum number of state 1
l1 (int): orbital angular momentum of state 1
j1 (float): total angular momentum of state 1
n2 (int): principal quantum number of state 2
l2 (int): orbital angular momentum of state 2
j2 (float): total angular momentum of state 2
s (float): optional. Spin of the state. Default 0.5 is for
Alkali
Returns:
float: quadrupole matrix element (:math:`a_0^2 e`).
"""
dl = abs(l1 - l2)
dj = abs(j1 - j2)
if not ((dl == 0 or dl == 2 or dl == 1) and (dj < 2.1)):
return 0
if self.getEnergy(n1, l1, j1, s=s) > self.getEnergy(n2, l2, j2, s=s):
temp = n1
n1 = n2
n2 = temp
temp = l1
l1 = l2
l2 = temp
temp = j1
j1 = j2
j2 = temp
n1 = round(n1)
n2 = round(n2)
l1 = round(l1)
l2 = round(l2)
# was this calculated before? If yes, retrieve from memory.
c = self.conn.cursor()
c.execute(
"""SELECT qme FROM quadrupoleME WHERE
n1= ? AND l1 = ? AND j1 = ? AND
n2 = ? AND l2 = ? AND j2 = ? AND s= ?""",
(n1, l1, j1, n2, l2, j2, s),
)
qme = c.fetchone()
if qme:
return qme[0]
# if it wasn't, calculate now
quadrupoleElement = self._getRadialQuadrupoleSemiClassical(
n1, l1, j1, n2, l2, j2, s=s
)
c.execute(
""" INSERT INTO quadrupoleME VALUES (?,?,?, ?,?,?,?, ?)""",
[n1, l1, j1, n2, l2, j2, s, quadrupoleElement],
)
self.conn.commit()
return quadrupoleElement
def radialWavefunction(
self, l, s, j, stateEnergy, innerLimit, outerLimit, step
):
"""
Not implemented for Alkaline earths
"""
raise NotImplementedError(
"radialWavefunction calculation for alkaline"
" earths has not been implemented yet."
)
def effectiveCharge(self, l, r):
"""
Not implemented for Alkaline earths
"""
raise NotImplementedError(
"effectiveCharge calculation for alkaline"
" earths has not been implemented yet."
)
def corePotential(self, l, r):
"""
Not implemented for Alkaline earths
"""
raise NotImplementedError(
"corePotential calculation for alkaline"
" earths has not been implemented yet."
)
def potential(self, l, s, j, r):
"""
Not implemented for Alkaline earths
"""
raise NotImplementedError(
"potential calculation for alkaline"
" earths has not been implemented yet."
)
def getStateLifetime(
self, n, l, j, temperature=0, includeLevelsUpTo=0, s=0
):
print(
"WARNING: For AlkalineEarths, lifetimes are observed to be "
"significantly modified by inter-electron correlations that are "
"not included in this code (see Vaillant et al., J. Phys B 47 "
"155001 (2015) for examples). Use with caution."
)
# after waring user, call method from the parent class
# (parent of DivalentAtom is AlkaliAtom)
return super(DivalentAtom, self).getStateLifetime(
n,
l,
j,
temperature=temperature,
includeLevelsUpTo=includeLevelsUpTo,
s=s,
) | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/divalent_atom_functions.py | divalent_atom_functions.py |
from scipy.integrate import odeint
from lmfit import minimize, Parameters, report_fit
import matplotlib.pyplot as plt
import numpy as np
import sys
from arc._database import UsedModulesARC
"""
**Contributors:**
getPopulationLifetime - written by Alessandro Greco,
Dipartimento di Fisica *E. Fermi*, Università di Pisa,
Largo Bruno Pontecorvo 3, 56127 Pisa, Italy (alessandrogreco08 at gmail dot com),
the simulations have been compared with experimental data [#greco2019]_
"""
def getPopulationLifetime(
atom,
n,
l,
j,
temperature=0,
includeLevelsUpTo=0,
period=1,
plotting=1,
thresholdState=False,
detailedOutput=False,
):
r"""
Calculates lifetime of atomic **population** taking into account
redistribution of population to other states under spontaneous and
black body induced transitions.
It simulates the time evolution of a system in which all the states,
from the fundamental one to the highest state which you want to include,
are taken into account.
The orbital angular momenta taken into account are only S,P,D,F.
This function is based on getStateLifetime but it takes into account
the re-population processess due to BBR-induced transitions.
For this reason lifetimes of Rydberg states are slightly longer
than those returned by getStateLifetime up to 5-10%.
This function creates a .txt file, plots the time evolution of the
population of the Rydberg states and yields the lifetime values by using
the fitting method from Ref. [#fit]_ .
**Contributed by:** Alessandro Greco (alessandrogreco08 at gmail dot com),
Dipartimento di Fisica *E. Fermi*, Università di Pisa, Largo Bruno Pontecorvo 3, 56127 Pisa, Italy.
The simulations have been compared with experimental data [#greco2019]_ .
**Please cite as:** `original ARC paper`_ and paper introducing
extension [#greco2019]_
.. _`original ARC paper`:
https://doi.org/10.1016/j.cpc.2017.06.015
References:
.. [#fit] https://people.duke.edu/~ccc14/sta-663/CalibratingODEs.html
.. [#greco2019] M. Archimi, C. Simonelli, L. Di Virgilio, A. Greco,
M. Ceccanti, E. Arimondo, D. Ciampini, I. I. Ryabtsev, I. I. Beterov,
and O. Morsch, *Phys. Rev. A* **100**, 030501(R) (2019)
https://doi.org/10.1103/PhysRevA.100.030501
**Some definitions:**
What are the **ensemble**, the **support**, the **ground**?
According to https://arxiv.org/abs/1907.01254
The sum of the populations of every state which is detected as Rydberg state
(above the threshold state which must be set\) is called **ensemble**
The sum of the populations of every state which is detected as Rydberg state,
without the target state, is called **support**
The sum of the populations of every state which cannot be detected as Rydberg state
(under the threshold state which must be set) is called **ground**
**gammaTargetSpont** is the rate which describes transitions
from Target State towards all the levels under the threshold
state, i.e. Ground State
**gammaTargetBBR** is the rate which describes transitions towards all
the levels above the threshold state, i.e. Support State
**gammaSupporSpont** is the rate which describes transitions from the
Support State towards all the levels under the threshold state,
i.e. Ground State
**gammaSupportBBR** is the rate which describes transitions from
upport State towards all the levels above the threshold state,
i.e. Target State)
Args:
n (int): principal quantum number of the state whose population lifetime
we are calculating, it's called the *Target* state and its color is
green in the plot.
l (int): orbital angular momentum number of the state whose population lifetime
we are calculating, it's called the *Target* state and its color is
green in the plot.
j (float): total angular momentum of the state whose population lifetime
we are calculating, it's called the *Target* state and its color is
green in the plot.
temperature (float): Temperature at which the atom environment
is, measured in K. If this parameter is non-zero, user has
to specify transitions up to which state (due to black-body
decay) should be included in calculation.
includeLevelsUpTo (int): At non zero temperatures,
this specifies maximum principal quantum number of the state
to which black-body induced transitions will be included.
Minimal value of the parameter in that case is =`n+1
period: Specifies the period that you want to consider for
the time evolution, in microseconds.
plotting (int): optional. It is set to 1 by default. The options are
(see also image at the bottom of documentation):
**plotting=0** no plot;
**plotting=1** plots the population of the target (n,l,j) state
with its fit and it yields the value of the target lifetime
in microseconds;
**plotting=2** plots the whole system (Ensemble, Support, Target),
no fit;
**plotting=3** plots the whole system (Ensemble, Support, Target)
and it fits the Ensemble and Target curves, it yields the values
of the Ensemble lifetime and Target lifetime in microseconds;
**plotting=4** it plots the whole system (Ensemble, Support, Target) +
the Ground (which is the complementary of the ensemble).
It considers the whole system like a three-level model (Ground
State, Support State, Target State) and yields four transition
rates.
thresholdState (int): optional. It specifies the principal quantum
number n of the lowest state (it's referred to S state!) which is
detectable by your experimental apparatus, it directly modifies
the *Ensemble* and the *Support* (whose colors are red and blue
respectively in the plot). It is necessary to define a threshold
state if plotting = 2, 3 or 4 has been selected. It is not necessary
to define a threshold state if plotting = 0 or 1 has been selected.
detailedOutput=True: optional. It writes a .txt file with the time
evolution of all the states. It is set to false by default.
(The first column is the time, the other are the population of all
the states. The order is time, nS, nP0.5, nP1.5, nD1.5, nD2.5,
nF2.5, nF3.5, and n is ordered from the lowest state to the highest one.
For example: time, 4S, 5S ,6S ,ecc... includeLevelsUpToS, 4P0.5,
5P0.5, 6P0.5, ecc... includeLevelsUpToP0.5, 4P1.5, 5P1.5, 6P1.5, ecc...)
Returns:
Plots and a .txt file.
**plotting = 0,1** create a .txt file with two coloumns
(time \t target population);
**plotting = 2,3,4** create a .txt file with four coloumns
(time \t ensemble population \t support population \t target population)
Example:
>>> from arc import *
>>> from arc.advanced.population_lifetime import getPopulationLifetime
>>> atom = Rubidium()
>>> getPopulationLifetime(atom, 10, 1, 1.5, temperature =300,
includeLevelsUpTo=15, detailedOutput=True, plotting=1)
"""
UsedModulesARC.advanced_getPopulationTime = True
if l > 3:
print("Error: this function takes into account only S, P, D, F states.")
return
if plotting > 4:
print("Error: plotting must be equal to 0, 1, 2, 3 or 4.")
return
if ((not thresholdState) and (plotting > 1)) or (thresholdState):
print(
"Error: you need to specify the principal quantum number of the "
"thresholdState if you use plotting=2, 3 or 4."
)
return
if (plotting == 0) or (plotting == 1):
thresholdState = False
import time
start = time.time()
# What state do you want to excite?
STATE = n
L = l
J = j
# Which states do you want to consider for the BBR width?
if includeLevelsUpTo - STATE < 0:
raise ValueError("Error: includeLevelsUpTo must be >= n")
WidthBBR = includeLevelsUpTo - STATE
# What is the temperature?
if temperature == 0:
raise ValueError(
"Error: if you don't want BBR-induced transition, use getStateLifetime"
)
TEMP_BBR = temperature
# What is the critical state for the ionization?
if thresholdState - STATE >= 0:
raise ValueError("Error: thresholdState must be < n")
CState = thresholdState
# It creates the references for the ensemble population
cutoffs = int(
atom.getQuantumDefect(STATE, 0, 0.5)
- atom.getQuantumDefect(STATE, 0, 0.5)
)
cutoffp05 = int(
atom.getQuantumDefect(STATE, 0, 0.5)
- atom.getQuantumDefect(STATE, 1, 0.5)
)
cutoffp15 = int(
atom.getQuantumDefect(STATE, 0, 0.5)
- atom.getQuantumDefect(STATE, 1, 1.5)
)
cutoffd15 = int(
atom.getQuantumDefect(STATE, 0, 0.5)
- atom.getQuantumDefect(STATE, 2, 1.5)
)
cutoffd25 = int(
atom.getQuantumDefect(STATE, 0, 0.5)
- atom.getQuantumDefect(STATE, 2, 2.5)
)
cutofff25 = int(
atom.getQuantumDefect(STATE, 0, 0.5)
- atom.getQuantumDefect(STATE, 3, 2.5)
)
cutofff35 = int(
atom.getQuantumDefect(STATE, 0, 0.5)
- atom.getQuantumDefect(STATE, 3, 3.5)
)
# Total time of the dynamics
totaltime = period * 1e-6
# Parts of gammamax that you take for time step
partg = 2.0
#########################################################
# It takes into account of the extra levels
extraL = atom.extraLevels[1][:]
# It creates the references for the matrix
riftot = (STATE + WidthBBR - extraL[0] + 1) * 7
rifs = ((STATE + WidthBBR - extraL[0] + 1) * 0) - extraL[0]
rifp05 = ((STATE + WidthBBR - extraL[0] + 1) * 1) - extraL[0]
rifp15 = ((STATE + WidthBBR - extraL[0] + 1) * 2) - extraL[0]
rifd15 = ((STATE + WidthBBR - extraL[0] + 1) * 3) - extraL[0]
rifd25 = ((STATE + WidthBBR - extraL[0] + 1) * 4) - extraL[0]
riff25 = ((STATE + WidthBBR - extraL[0] + 1) * 5) - extraL[0]
riff35 = ((STATE + WidthBBR - extraL[0] + 1) * 6) - extraL[0]
# It creates the matrix of the rates
c = np.zeros(shape=(riftot, riftot))
print("Creating the rates matrix:")
for pqn in range(extraL[0], STATE + WidthBBR + 1):
for fpqn in range(extraL[0], STATE + WidthBBR + 1):
# rate from s
c[pqn + rifs, fpqn + rifp05] = atom.getTransitionRate(
pqn, 0, 0.5, fpqn, 1, 0.5, TEMP_BBR
) # rate s -> p0.5
c[pqn + rifs, fpqn + rifp15] = atom.getTransitionRate(
pqn, 0, 0.5, fpqn, 1, 1.5, TEMP_BBR
) # rate s -> p1.5
# rate from p0.5
c[pqn + rifp05, fpqn + rifs] = atom.getTransitionRate(
pqn, 1, 0.5, fpqn, 0, 0.5, TEMP_BBR
) # rate p0.5 -> s
c[pqn + rifp05, fpqn + rifd15] = atom.getTransitionRate(
pqn, 1, 0.5, fpqn, 2, 1.5, TEMP_BBR
) # rate p0.5 -> d1.5
# rate from p1.5
c[pqn + rifp15, fpqn + rifs] = atom.getTransitionRate(
pqn, 1, 1.5, fpqn, 0, 0.5, TEMP_BBR
) # rate p1.5 -> s
c[pqn + rifp15, fpqn + rifd15] = atom.getTransitionRate(
pqn, 1, 1.5, fpqn, 2, 1.5, TEMP_BBR
) # rate p1.5 -> d1.5
c[pqn + rifp15, fpqn + rifd25] = atom.getTransitionRate(
pqn, 1, 1.5, fpqn, 2, 2.5, TEMP_BBR
) # rate p1.5 -> d2.5
# rate from d1.5
c[pqn + rifd15, fpqn + rifp05] = atom.getTransitionRate(
pqn, 2, 1.5, fpqn, 1, 0.5, TEMP_BBR
) # rate d1.5 -> p0.5
c[pqn + rifd15, fpqn + rifp15] = atom.getTransitionRate(
pqn, 2, 1.5, fpqn, 1, 1.5, TEMP_BBR
) # rate d1.5 -> p1.5
c[pqn + rifd15, fpqn + riff25] = atom.getTransitionRate(
pqn, 2, 1.5, fpqn, 3, 2.5, TEMP_BBR
) # rate d1.5 -> f2.5
# rate from d2.5
c[pqn + rifd25, fpqn + rifp15] = atom.getTransitionRate(
pqn, 2, 2.5, fpqn, 1, 1.5, TEMP_BBR
) # rate d2.5 -> p1.5
c[pqn + rifd25, fpqn + riff25] = atom.getTransitionRate(
pqn, 2, 2.5, fpqn, 3, 2.5, TEMP_BBR
) # rate d2.5 -> f2.5
c[pqn + rifd25, fpqn + riff35] = atom.getTransitionRate(
pqn, 2, 2.5, fpqn, 3, 3.5, TEMP_BBR
) # rate d2.5 -> f3.5
# rate from f2.5
c[pqn + riff25, fpqn + rifd15] = atom.getTransitionRate(
pqn, 3, 2.5, fpqn, 2, 1.5, TEMP_BBR
) # rate f2.5 -> d1.5
c[pqn + riff25, fpqn + rifd25] = atom.getTransitionRate(
pqn, 3, 2.5, fpqn, 2, 2.5, TEMP_BBR
) # rate f2.5 -> d2.5
# rate from f3.5
c[pqn + riff35, fpqn + rifd25] = atom.getTransitionRate(
pqn, 3, 3.5, fpqn, 2, 2.5, TEMP_BBR
) # rate f3.5 -> d2.5
print(pqn, end=" ")
# It deletes all the gammas for states under the ground state which are not the extra levels
if extraL[1] > 2:
c[extraL[0] + rifd15, :] = 0
c[:, extraL[0] + rifd15] = 0
c[extraL[0] + rifd25, :] = 0
c[:, extraL[0] + rifd25] = 0
if extraL[1] > 3:
c[extraL[0] + riff25, :] = 0
c[:, extraL[0] + riff25] = 0
c[extraL[0] + riff35, :] = 0
c[:, extraL[0] + riff35] = 0
c[extraL[0] + rifs, :] = 0
c[:, extraL[0] + rifs] = 0
c[extraL[0] + rifp05, :] = 0
c[:, extraL[0] + rifp05] = 0
c[extraL[0] + rifp15, :] = 0
c[:, extraL[0] + rifp15] = 0
# It finds the maximum rate in the matrix
gammamax = c.max() # is from the 5P1.5 towards the 5S0.5
# It defines Dtmin
Dtmin = round(1 / (partg * gammamax), 9)
print("\n", Dtmin)
#########################################################
# It inizialites the population and the auxiliry population vectors
pop = np.zeros(shape=(1, riftot))
popaus = np.zeros(shape=(1, riftot))
# It inizializes the reference for the population vector
if L == 0:
rifinitial = rifs
if L == 1:
if J == 0.5:
rifinitial = rifp05
if J == 1.5:
rifinitial = rifp15
if L == 2:
if J == 1.5:
rifinitial = rifd15
if J == 2.5:
rifinitial = rifd25
if L == 3:
if J == 2.5:
rifinitial = riff25
if J == 3.5:
rifinitial = riff35
pop[0, (rifinitial + STATE)] = 1
#########################################################
# It inizializes the time and the time step
t = 0.0
Dt = 0.0
#########################################################
# References for the name of the .txt file
if L == 0:
StrL = "S"
elif L == 1:
StrL = "P"
elif L == 2:
StrL = "D"
elif L == 3:
StrL = "F"
if J == 0.5:
StrJ = "05"
elif J == 1.5:
StrJ = "15"
elif J == 2.5:
StrJ = "25"
elif J == 3.5:
StrJ = "35"
# It creates the file for the three curves
with open("Lifetime" + str(STATE) + StrL + StrJ + ".txt", "w") as fi:
fi.writelines("")
if detailedOutput:
# It creates the file for the all states
with open(
"Lifetime" + str(STATE) + StrL + StrJ + "All.txt", "w"
) as fiall:
fiall.writelines("")
#########################################################
# It creates four lists to quickly write the results to the file
ListTime = []
if thresholdState:
ListRed = []
ListBlue = []
ListGreen = []
# The core of the program starts
while t < (totaltime):
if detailedOutput:
ListStates = []
ListStates.append(t * 1e6)
for a in range(0, riftot):
popaus[0, a] = 0.0
for b in range(0, riftot):
popaus[0, a] += -c[a, b] * pop[0, a] + c[b, a] * pop[0, b]
popaus[0, a] = popaus[0, a] * Dt
pop += popaus
if t == 0:
Dt = Dtmin
if detailedOutput:
ListStates.extend(pop[0, :])
with open(
"Lifetime" + str(STATE) + StrL + StrJ + "All.txt", "a"
) as fall:
fall.writelines(
"%.5f \t" % (ListStates[ind])
for ind in range(0, len(ListStates))
)
fall.writelines("\n")
ListTime.append(t * 1e6)
if thresholdState:
popall = 0.0
for k in range(0, riftot):
if (
(CState + rifs - cutoffs <= k < rifp05 + extraL[0])
or (CState + rifp05 - cutoffp05 <= k < rifp15 + extraL[0])
or (CState + rifp15 - cutoffp15 <= k < rifd15 + extraL[0])
or (CState + rifd15 - cutoffd15 <= k < rifd25 + extraL[0])
or (CState + rifd25 - cutoffd25 <= k < riff25 + extraL[0])
or (CState + riff25 - cutofff25 <= k < riff35 + extraL[0])
or (CState + riff35 - cutofff35 <= k < riftot)
):
# above the threshold state
popall += pop[0, k]
ListRed.append(popall)
ListBlue.append(popall - pop[0, (rifinitial + STATE)])
ListGreen.append(pop[0, (rifinitial + STATE)])
sys.stdout.write("\rProgress: %d%%" % ((t / totaltime) * 100))
sys.stdout.flush()
t = t + Dt
if not thresholdState:
with open("Lifetime" + str(STATE) + StrL + StrJ + ".txt", "a") as f:
f.writelines(
"%.4f \t %.5f \n" % (ListTime[index], ListGreen[index])
for index in range(0, len(ListTime))
)
else:
with open("Lifetime" + str(STATE) + StrL + StrJ + ".txt", "a") as f:
f.writelines(
"%.4f \t %.5f \t %.5f \t %.5f \n"
% (
ListTime[index],
ListRed[index],
ListBlue[index],
ListGreen[index],
)
for index in range(0, len(ListTime))
)
#########################################################
if plotting == 1:
def f(xs, t, ps):
"""Lotka-Volterra predator-prey model."""
try:
gammaTarget = ps["gammaTarget"].value
except Exception:
gammaTarget = ps
x, y = xs
return [-gammaTarget * x, -gammaTarget * y]
def g(t, x0, ps):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(ps,))
return x
def residual(ps, ts, data):
x0 = ps["x0"].value, ps["y0"].value
model = g(ts, x0, ps)
return (model - data).ravel()
t = np.array(ListTime)
x0 = np.array([0, 0])
data = np.zeros(shape=(len(t), 2))
data[:, 0] = np.array(ListGreen)
data[:, 1] = np.array(ListGreen)
# set parameters incluing bounds
params = Parameters()
params.add("x0", value=1, vary=False)
params.add("y0", value=1, vary=False)
params.add("gammaTarget", value=0.01, min=0, max=1)
# fit model and find predicted values
result = minimize(residual, params, args=(t, data), method="leastsq")
final = data + result.residual.reshape(data.shape)
LifetimeTarget = 1.0 / (result.params["gammaTarget"].value)
# Grafico
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.plot(t, data[:, 0], "g*", label=r"Target")
axes.plot(t, final[:, 0], "k-", linewidth=2, label=r"Fit Target")
axes.set_ylim(0, max(ListGreen))
axes.set_xlim(0, max(ListTime))
axes.legend(loc=0, fontsize=12)
axes.set_ylabel("Number of Rydberg atoms", fontsize=12)
axes.set_xlabel(r"Time, $\mu s$", fontsize=12)
axes.grid()
plt.legend
plt.show()
# display fitted statistics
print("\n")
report_fit(result)
print("\n")
print("Lifetime Target: %.6f us" % (LifetimeTarget))
if plotting == 2:
# Make the plot of the three curves
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.plot(ListTime, ListRed, "r.", label=r"Ensemble")
axes.plot(ListTime, ListBlue, "b.", label=r"Other")
axes.plot(ListTime, ListGreen, "g.", label=r"Target")
axes.set_ylim(0, 1)
axes.set_xlim(0, ListTime[-1])
axes.legend(loc=0, fontsize=12)
axes.set_ylabel("Number of Rydberg atoms", fontsize=12)
axes.set_xlabel(r"Time [$\mu s$]", fontsize=12)
axes.grid()
plt.legend
plt.show()
if plotting == 3:
def f(xs, t, ps):
"""Lotka-Volterra predator-prey model."""
try:
gammaEnsemble = ps["gammaEnsemble"].value
gammaTarget = ps["gammaTarget"].value
except Exception:
gammaEnsemble, gammaTarget = ps
x, y = xs
return [-gammaEnsemble * x, -gammaTarget * y]
def g(t, x0, ps): # noqa: F821, F811
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(ps,))
return x
def residual(ps, ts, data):
x0 = ps["x0"].value, ps["y0"].value
model = g(ts, x0, ps)
return (model - data).ravel()
t = np.array(ListTime)
x0 = np.array([0, 0]) # noqa: F841
dataAll = np.zeros(shape=(len(t), 3))
dataAll[:, 0] = np.array(ListRed)
dataAll[:, 1] = np.array(ListBlue)
dataAll[:, 2] = np.array(ListGreen)
data = np.zeros(shape=(len(t), 2))
data[:, 0] = dataAll[:, 0]
data[:, 1] = dataAll[:, 2]
# set parameters incluing bounds
params = Parameters()
params.add("x0", value=max(ListRed), vary=False)
params.add("y0", value=max(ListGreen), vary=False)
params.add("gammaEnsemble", value=0.005, min=0.0, max=1.0)
params.add("gammaTarget", value=0.01, min=0.0, max=1.0)
# fit model and find predicted values
result = minimize(residual, params, args=(t, data), method="leastsq")
final = data + result.residual.reshape(data.shape)
LifetimeEnsemble = 1.0 / (result.params["gammaEnsemble"].value)
LifetimeTarget = 1.0 / (result.params["gammaTarget"].value)
# Grafico
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.plot(t, dataAll[:, 0], "r*", label=r"Ensemble")
axes.plot(t, dataAll[:, 1], "b*", label=r"Support")
axes.plot(t, dataAll[:, 2], "g*", label=r"Target")
axes.plot(t, final[:, 0], "k-", linewidth=2, label=r"Fit Ensemble")
axes.plot(t, final[:, 1], "k-", linewidth=2, label=r"Fit Target")
axes.set_ylim(0, max(ListRed))
axes.set_xlim(0, max(ListTime))
axes.legend(loc=0, fontsize=12)
axes.set_ylabel("Number of Rydberg atoms", fontsize=12)
axes.set_xlabel(r"Time, $\mu s$", fontsize=12)
axes.grid()
plt.legend
plt.show()
# display fitted statistics
print("\n")
report_fit(result)
print("\n")
print(
"Lifetime Ensemble: %.6f us \nLifetime Target: %.6f us"
% (LifetimeEnsemble, LifetimeTarget)
)
if plotting == 4:
def f(xs, t, ps):
"""Lotka-Volterra predator-prey model."""
try:
gammaTargetSpont = ps["gammaTargetSpont"].value
gammaTargetBBR = ps["gammaTargetBBR"].value
gammaSupportSpont = ps["gammaSupportSpont"].value
gammaSupportBBR = ps["gammaSupportBBR"].value
except Exception:
(
gammaTargetSpont,
gammaTargetBBR,
gammaSupportSpont,
gammaSupportBBR,
) = ps
x, y, z = xs
return [
+gammaTargetSpont * z + gammaSupportSpont * y,
-gammaSupportSpont * y
- gammaSupportBBR * y
+ gammaTargetBBR * z,
-gammaTargetSpont * z
- gammaTargetBBR * z
+ gammaSupportBBR * y,
]
def g(t, x0, ps): # noqa: F821, F811
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(ps,))
return x
def residual(ps, ts, data):
x0 = ps["x0"].value, ps["y0"].value, ps["z0"].value
model = g(ts, x0, ps)
return (model - data).ravel()
ListRedAus = np.zeros(shape=(len(ListRed)))
for i in range(0, len(ListRed)):
ListRedAus[i] = max(ListRed) - ListRed[i]
t = np.array(ListTime)
data = np.zeros(shape=(len(t), 3))
data[:, 0] = np.array(ListRedAus)
data[:, 1] = np.array(ListBlue)
data[:, 2] = np.array(ListGreen)
# set parameters incluing bounds
params = Parameters()
params.add("x0", value=0, vary=False)
params.add("y0", value=0, vary=False)
params.add("z0", value=max(ListGreen), vary=False)
params.add("gammaTargetSpont", value=0.02, min=0.0, max=1.0)
params.add("gammaTargetBBR", value=0.02, min=0.0, max=1.0)
params.add("gammaSupportSpont", value=0.02, min=0.0, max=1.0)
params.add("gammaSupportBBR", value=0.001, min=0.0, max=1.0)
# fit model and find predicted values
result = minimize(residual, params, args=(t, data), method="leastsq")
final = data + result.residual.reshape(data.shape)
# Grafico
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.plot(t, data[:, 0], "m*", label=r"Ground")
axes.plot(t, ListRed, "r*", label=r"Ensemble")
axes.plot(t, data[:, 1], "b*", label=r"Support")
axes.plot(t, data[:, 2], "g*", label=r"Target")
axes.plot(t, final[:, 0], "k-", linewidth=2, label=r"Fit Ground")
axes.plot(t, final[:, 1], "k-", linewidth=2, label=r"Fit Support")
axes.plot(t, final[:, 2], "k-", linewidth=2, label=r"Fit Target")
axes.set_ylim(0, max(ListRed))
axes.set_xlim(0, max(ListTime))
axes.legend(loc=0, fontsize=12)
axes.set_ylabel("Number of Rydberg atoms", fontsize=12)
axes.set_xlabel(r"Time, $\mu s$", fontsize=12)
axes.grid()
plt.legend()
plt.show()
print("\n")
# display fitted statistics
report_fit(result)
# It returns the time elapsed
print("\nIt took", time.time() - start, "seconds.")
return | ARC-Alkali-Rydberg-Calculator | /ARC_Alkali_Rydberg_Calculator-3.3.0-cp311-cp311-win_amd64.whl/arc/advanced/population_lifetime.py | population_lifetime.py |
# ARCCSSive
ARCCSS Data Access Tools
[](https://readthedocs.org/projects/arccssive/?badge=latest)
[](https://travis-ci.org/coecms/ARCCSSive)
[](https://circleci.com/gh/coecms/ARCCSSive)
[](http://codecov.io/github/coecms/ARCCSSive?branch=master)
[](https://landscape.io/github/coecms/ARCCSSive/master)
[](https://codeclimate.com/github/coecms/ARCCSSive)
[](https://pypi.python.org/pypi/ARCCSSive)
[](https://anaconda.org/coecms/arccssive)
For full documentation please see http://arccssive.readthedocs.org/en/stable
Installing
==========
### Raijin
The stable version of ARCCSSive is available on Rajin in the `analysis27` Anaconda environment:
raijin $ module use /g/data3/hh5/public/modules
raijin $ module load conda/analysis27
and is also available as a module:
raijin $ module use ~access/modules
raijin $ module load pythonlib/ARCCSSive
### NCI Virtual Desktops
NCI's virtual desktops allow you to use ARCCSSive from a Jupyter notebook. For
details on how to use virtual desktops see http://vdi.nci.org.au/help
ARCCSSive can be accessed on VDI using the Anaconda environments:
vdi $ module use /g/data3/hh5/public/modules
vdi $ module load conda/analysis27
### Local Install
You can install ARCCSSive locally using either Anaconda or Pip. You will need
to copy the database file from Raijin
$ pip install ARCCSSive
# or
$ conda install -c coecms arccssive
$ scp raijin:/g/data1/ua6/unofficial-ESG-replica/tmp/tree/cmip5_raijin_latest.db $PWD/cmip5.db
$ export CMIP5_DB=sqlite:///$PWD/cmip5.db
### Development Version
To install the current development version with a test database:
$ pip install --user git+https://github.com/coecms/ARCCSSive.git
$ export CMIP5_DB=sqlite:///$HOME/cmip5.db
CMIP5
=====
Query and access the CMIP5 data from Raijin
```python
from ARCCSSive import CMIP5
cmip = CMIP5.DB.connect()
for output in cmip.outputs(model='ACCESS1-0'):
variable = output.variable
files = output.filenames()
```
Uses
[SQLAlchemy](http://docs.sqlalchemy.org/en/rel_1_0/orm/tutorial.html#querying)
to filter and sort the data files.
| ARCCSSive | /ARCCSSive-0.3.3-py3-none-any.whl/ARCCSSive-0.3.3.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
README for AREM 1.0.1, based on MACS 1.4.0rc2
Time-stamp: <2011-03-01 18:21:42 Jake Biesinger>
* Introduction
High-throughput sequencing coupled to chromatin immuno-
precipitation (ChIP-Seq) is widely used in characterizing genome-wide
binding patterns of transcription factors, cofactors, chromatin modifiers,
and other DNA binding proteins. A key step in ChIP-Seq data analysis
is to map short reads from high-throughput sequencing to a reference
genome and identify peak regions enriched with short reads. Although
several methods have been proposed for ChIP-Seq analysis, most ex-
isting methods only consider reads that can be uniquely placed in the
reference genome, and therefore have low power for detecting peaks lo-
cated within repeat sequences. Here we introduce a probabilistic ap-
proach for ChIP-Seq data analysis which utilizes all reads, providing a
truly genome-wide view of binding patterns. Reads are modeled using a
mixture model corresponding to K enriched regions and a null genomic
background. We use maximum likelihood to estimate the locations of the
enriched regions, and implement an expectation-maximization (E-M) al-
gorithm, called AREM, to update the alignment probabilities of each
read to different genomic locations.
For additional information, see our paper in RECOMB 2011 or visit our website:
http://cbcl.ics.uci.edu/AREM
AREM is based on the popular MACS peak caller, as described below:
With the improvement of sequencing techniques, chromatin
immunoprecipitation followed by high throughput sequencing (ChIP-Seq)
is getting popular to study genome-wide protein-DNA interactions. To
address the lack of powerful ChIP-Seq analysis method, we present a
novel algorithm, named Model-based Analysis of ChIP-Seq (MACS), for
identifying transcript factor binding sites. MACS captures the
influence of genome complexity to evaluate the significance of
enriched ChIP regions, and MACS improves the spatial resolution of
binding sites through combining the information of both sequencing tag
position and orientation. MACS can be easily used for ChIP-Seq data
alone, or with control sample with the increase of specificity.
The original MACS package is available at: http://liulab.dfci.harvard.edu/MACS/
* Install
Please check the file 'INSTALL' in the distribution.
* Usage
Usage: arem <-t tfile> [-n name] [-g genomesize] [options]
Example: arem -t ChIP.bam -c Control.bam -f BAM -g h -n test -w --call-subpeaks
arem -- Aligning Reads by Expectation-Maximization, based on Model-based Analysis for ChIP-Sequencing (MACS)
Options:
--version show program's version number and exit
-h, --help show this help message and exit.
-t TFILE, --treatment=TFILE
ChIP-seq treatment files. REQUIRED. When ELANDMULTIPET
is selected, you must provide two files separated by
comma, e.g.
s_1_1_eland_multi.txt,s_1_2_eland_multi.txt
-c CFILE, --control=CFILE
Control files. When ELANDMULTIPET is selected, you
must provide two files separated by comma, e.g.
s_2_1_eland_multi.txt,s_2_2_eland_multi.txt
-n NAME, --name=NAME Experiment name, which will be used to generate output
file names. DEFAULT: "NA"
-f FORMAT, --format=FORMAT
Format of tag file, "AUTO", "BED" or "ELAND" or
"ELANDMULTI" or "ELANDMULTIPET" or "ELANDEXPORT" or
"SAM" or "BAM" or "BOWTIE". The default AUTO option
will let MACS decide which format the file is. Please
check the definition in 00README file if you choose EL
AND/ELANDMULTI/ELANDMULTIPET/ELANDEXPORT/SAM/BAM/BOWTI
E. DEFAULT: "AUTO"
--petdist=PETDIST Best distance between Pair-End Tags. Only available
when format is 'ELANDMULTIPET'. DEFAULT: 200
-g GSIZE, --gsize=GSIZE
Effective genome size. It can be 1.0e+9 or 1000000000,
or shortcuts:'hs' for human (2.7e9), 'mm' for mouse
(1.87e9), 'ce' for C. elegans (9e7) and 'dm' for
fruitfly (1.2e8), Default:hs
-s TSIZE, --tsize=TSIZE
Tag size. This will overide the auto detected tag
size. DEFAULT: 25
--bw=BW Band width. This value is only used while building the
shifting model. DEFAULT: 300
-p PVALUE, --pvalue=PVALUE
Pvalue cutoff for peak detection. DEFAULT: 1e-5
-m MFOLD, --mfold=MFOLD
Select the regions within MFOLD range of high-
confidence enrichment ratio against background to
build model. The regions must be lower than upper
limit, and higher than the lower limit. DEFAULT:10,30
--nolambda If True, MACS will use fixed background lambda as
local lambda for every peak region. Normally, MACS
calculates a dynamic local lambda to reflect the local
bias due to potential chromatin structure.
--slocal=SMALLLOCAL The small nearby region in basepairs to calculate
dynamic lambda. This is used to capture the bias near
the peak summit region. Invalid if there is no control
data. DEFAULT: 1000
--llocal=LARGELOCAL The large nearby region in basepairs to calculate
dynamic lambda. This is used to capture the surround
bias. DEFAULT: 10000.
--off-auto Whether turn off the auto pair model process. If not
set, when MACS failed to build paired model, it will
use the nomodel settings, the '--shiftsize' parameter
to shift and extend each tags. DEFAULT: False
--nomodel Whether or not to build the shifting model. If True,
MACS will not build model. by default it means
shifting size = 100, try to set shiftsize to change
it. DEFAULT: False
--shiftsize=SHIFTSIZE
The arbitrary shift size in bp. When nomodel is true,
MACS will use this value as 1/2 of fragment size.
DEFAULT: 100
--keep-dup=KEEPDUPLICATES
It controls the MACS behavior towards duplicate tags
at the exact same location -- the same coordination
and the same strand. The default 'auto' option makes
MACS calculate the maximum tags at the exact same
location based on binomal distribution using 1e-5 as
pvalue cutoff; and the 'all' option keeps every tags.
If an integer is given, at most this number of tags
will be kept at the same location. Default: auto
--to-small When set, scale the larger dataset down to the smaller
dataset, by default, the smaller dataset will be
scaled towards the larger dataset. DEFAULT: False
-w, --wig Whether or not to save extended fragment pileup at
every WIGEXTEND bps into a wiggle file. When --single-
profile is on, only one file for the whole genome is
saved. WARNING: this process is time/space consuming!!
-B, --bdg Whether or not to save extended fragment pileup at
every bp into a bedGraph file. When it's on, -w,
--space and --call-subpeaks will be ignored. When
--single-profile is on, only one file for the whole
genome is saved. WARNING: this process is time/space
consuming!!
-S, --single-profile When set, a single wiggle file will be saved for
treatment and input. Default: False
--space=SPACE The resoluation for saving wiggle files, by default,
MACS will save the raw tag count every 10 bps. Usable
only with '--wig' option.
--call-subpeaks If set, MACS will invoke Mali Salmon's PeakSplitter
soft through system call. If PeakSplitter can't be
found, an instruction will be shown for downloading
and installing the PeakSplitter package. -w option
needs to be on and -B should be off to let it work.
DEFAULT: False
--verbose=VERBOSE Set verbose level. 0: only show critical message, 1:
show additional warning message, 2: show process
information, 3: show debug messages. DEFAULT:2
--diag Whether or not to produce a diagnosis report. It's up
to 9X time consuming. Please check 00README file for
detail. DEFAULT: False
--fe-min=FEMIN For diagnostics, min fold enrichment to consider.
DEFAULT: 0
--fe-max=FEMAX For diagnostics, max fold enrichment to consider.
DEFAULT: maximum fold enrichment
--fe-step=FESTEP For diagnostics, fold enrichment step. DEFAULT: 20
--no-EM Do NOT iteratively align multi-reads by E-M. Multi-
read probabilities will be based on quality scores or
uniform (if --no-quals) DEFAULT : FALSE
--EM-converge-diff=MIN_CHANGE
The minimum entropy change between iterations before
halting E-M steps. DEFAULT : 1e-05
--EM-min-score=MIN_SCORE
Minimum enrichment score. Windows below this threshold
will all look the same to the aligner. DEFAULT : 1.5
--EM-max-score=MAX_SCORE
Maximum enrichment score. Windows above this threshold
will all look the same to the aligner, DEFAULT : No
Maximum
--EM-show-graphs generate diagnostic graphs for E-M. (requires
MATPLOTLIB). DEFAULT : FALSE
--quality-scale=QUAL_SCALE
Initial alignment probabilities are determined by read
quality and mismatches. Each possible alignment is
assigned a probability from the product over all bases
of either 1-p(ReadError_base) when there is no
mismatch, or p(ReadError_base) when the called base
disagrees with the reference. You may also select a
uniform initialization. Read quality scale is the must
be one of ['auto', 'sanger+33', 'illumina+64'].
DEFAULT : auto
--random-multi Convert all multi reads to unique reads by selecting
one alignment at random for each read. DEFAULT : False
--no-multi Throw away all reads that have more than one alignment
--no-greedy-caller Use AREM default peak caller instead of the greedy
caller. This normally results in wider, less enriched
peaks, especially with multi-reads. DEFAULT : False
--no-map-quals Do not use mapping probabilities as priors in each
update step; just use relative enrichment. DEFAULT :
False
--prior-snp=PRIOR_PROB_SNP
Prior probability that a SNP occurs at any base in the
genome. DEFAULT : 0.001
--write-read-probs Write out all final reads, including their alignment
probabilities as a BED file. DEFAULT : FALSE
** Parameters:
*** -t/--treatment FILENAME
This is the only REQUIRED parameter for MACS. If the format is
ELANDMULTIPET, user must provide two treatment files separated by
comma, e.g. s_1_1_eland_multi.txt,s_1_2_eland_multi.txt.
*** -c/--control
The control or mock data file in either BED format or any ELAND output
format specified by --format option. Please follow the same direction
as for -t/--treatment.
*** -n/--name
The name string of the experiment. MACS will use this string NAME to
create output files like 'NAME_peaks.xls', 'NAME_negative_peaks.xls',
'NAME_peaks.bed' , 'NAME_summits.bed', 'NAME_model.r' and so on. So
please avoid any confliction between these filenames and your existing
files.
*** -f/--format FORMAT
Format of tag file, can be "ELAND", "BED", "ELANDMULTI",
"ELANDEXPORT", "ELANDMULTIPET" (for pair-end tags), "SAM", "BAM" or
"BOWTIE". Default is "AUTO" which will allow MACS to decide the format
automatically. Please use "AUTO" only when you combine different
formats of files.
The BED format is defined in "http://genome.ucsc.edu/FAQ/FAQformat#format1".
If the format is ELAND, the file must be ELAND result output file,
each line MUST represents only ONE tag, with fields of:
1. Sequence name (derived from file name and line number if format is not Fasta)
2. Sequence
3. Type of match:
NM - no match found.
QC - no matching done: QC failure (too many Ns basically).
RM - no matching done: repeat masked (may be seen if repeatFile.txt was specified).
U0 - Best match found was a unique exact match.
U1 - Best match found was a unique 1-error match.
U2 - Best match found was a unique 2-error match.
R0 - Multiple exact matches found.
R1 - Multiple 1-error matches found, no exact matches.
R2 - Multiple 2-error matches found, no exact or 1-error matches.
4. Number of exact matches found.
5. Number of 1-error matches found.
6. Number of 2-error matches found.
Rest of fields are only seen if a unique best match was found (i.e. the match code in field 3 begins with "U").
7. Genome file in which match was found.
8. Position of match (bases in file are numbered starting at 1).
9. Direction of match (F=forward strand, R=reverse).
10. How N characters in read were interpreted: ("."=not applicable, "D"=deletion, "I"=insertion).
Rest of fields are only seen in the case of a unique inexact match (i.e. the match code was U1 or U2).
11. Position and type of first substitution error (e.g. 12A: base 12 was A, not whatever is was in read).
12. Position and type of first substitution error, as above.
If the format is ELANDMULTI, the file must be ELAND output file from
multiple-match mode, each line MUST represents only ONE tag, with
fields of:
1. Sequence name
2. Sequence
3. Either NM, QC, RM (as described above) or the following:
4. x:y:z where x, y, and z are the number of exact, single-error, and 2-error matches found
5. Blank, if no matches found or if too many matches found, or the following:
BAC_plus_vector.fa:163022R1,170128F2,E_coli.fa:3909847R1 This says
there are two matches to BAC_plus_vector.fa: one in the reverse
direction starting at position 160322 with one error, one in the
forward direction starting at position 170128 with two
errors. There is also a single-error match to E_coli.fa.
If the data is from Pair-End sequencing. You can sepecify the format
as ELANDMULTIPET ( stands for ELAND Multiple-match Pair-End Tags),
then the --treat (and --control if needed) parameter must be two file
names separated by comma. Each file must be in ELAND multiple-match
format described above. e.g.
macs14 --format ELANDMULTIPET -t s_1_1_eland_multi.txt,s_2_1_eland_multi.txt ...
If you use ELANDMULTIPET, you may need to modify --petdist parameter.
If the format is BAM/SAM, please check the definition in
(http://samtools.sourceforge.net/samtools.shtml). Pair-end mapping
results can be saved in a single BAM file, if so, MACS will
automatically keep the left mate(5' end) tag.
If the format is BOWTIE, you need to provide the ASCII bowtie output
file with the suffix '.map'. Please note that, you need to make sure
that in the bowtie output, you only keep one location for one
read. Check the bowtie manual for detail if you want at
(http://bowtie-bio.sourceforge.net/manual.shtml)
Here is the definition for Bowtie output in ASCII characters I copied
from the above webpage:
1. Name of read that aligned
2. Orientation of read in the alignment, - for reverse complement,
+ otherwise
3. Name of reference sequence where alignment occurs, or ordinal ID
if no name was provided
4. 0-based offset into the forward reference strand where leftmost
character of the alignment occurs
5. Read sequence (reverse-complemented if orientation is -)
6. ASCII-encoded read qualities (reversed if orientation is -). The
encoded quality values are on the Phred scale and the encoding is
ASCII-offset by 33 (ASCII char !).
7. Number of other instances where the same read aligns against the
same reference characters as were aligned against in this
alignment. This is not the number of other places the read aligns
with the same number of mismatches. The number in this column is
generally not a good proxy for that number (e.g., the number in
this column may be '0' while the number of other alignments with
the same number of mismatches might be large). This column was
previously described as "Reserved".
8. Comma-separated list of mismatch descriptors. If there are no
mismatches in the alignment, this field is empty. A single
descriptor has the format offset:reference-base>read-base. The
offset is expressed as a 0-based offset from the high-quality (5')
end of the read.
Notes:
1) For BED format, the 6th column of strand information is required by
MACS. And please pay attention that the coordinates in BED format is
zero-based and half-open
(http://genome.ucsc.edu/FAQ/FAQtracks#tracks1).
2) For plain ELAND format, only matches with match type U0, U1 or U2 is
accepted by MACS, i.e. only the unique match for a sequence with less
than 3 errors is involed in calculation. If multiple hits of a single
tag are included in your raw ELAND file, please remove the redundancy
to keep the best hit for that sequencing tag.
3) For the experiment with several replicates, it is recommended to
concatenate several ChIP-seq treatment files into a single file. To do
this, under Unix/Mac or Cygwin (for windows OS), type:
$ cat replicate1.bed replicate2.bed replicate3.bed > all_replicates.bed
4) ELAND export format support sometimes may not work on your
datasets, because people may mislabel the 11th and 12th column. MACS
uses 11th column as the sequence name which should be the chromosome
names.
** --petdist=PETDIST
Best distance between Pair-End Tags. Only available when format is
'ELANDMULTIPE'. Default is 200bps. When MACS reads mapped positions
for 5' tag and 3' tag, it will decide the best pairing for them using
this best distance parameter. A simple scoring system is used as following,
score = abs(abs(p5-p3)-200)+e5+e5
Where p5 is one of the position of 5' tag, and e5 is the
mismatch/error for this mapped position of 5' tag. p3 and e3 are for
3' tag. Then the lowest scored paring is regarded as the best
pairing. The 5' tag position of the pair is kept in model building and
peak calling.
*** -g/--gsize
PLEASE assign this parameter to fit your needs!
It's the mappable genome size or effective genome size which is
defined as the genome size which can be sequenced. Because of the
repetitive features on the chromsomes, the actual mappable genome size
will be smaller than the original size, about 90% or 70% of the genome
size. The default hs -- 2.7e9 is recommended for UCSC human hg18
assembly. Here are all precompiled parameters for effective genome size:
-g hs = -g 2.7e9
-g mm = -g 1.87e9
-g ce = -g 9e7
-g dm = -g 1.2e8
*** -s/--tsize
The size of sequencing tags. If you don't specify it, MACS will try to
use the first 10 sequences from your input treatment file to determine
the tag size. Specifying it will override the automatic determined tag
size.
*** --bw
The band width which is used to scan the genome for model
building. You can set this parameter as the sonication fragment size
expected from wet experiment. The previous side effect on the peak
detection process has been removed. So this parameter only affects the
model building.
*** -p/--pvalue
The pvalue cutoff. Default is 1e-5.
*** -m/--mfold
This parameter is used to select the regions within MFOLD range of
high-confidence enrichment ratio against background to build
model. The regions must be lower than upper limit, and higher than the
lower limit of fold enrichment. DEFAULT:10,30 means using all regions
not too low (>10) and not too high (<30) to build paired-peaks
model. If MACS can not find more than 100 regions to build model, it
will use the --shiftsize parameter to continue the peak detection.
Check related *--off-auto* and *--shiftsize* for detail.
** --nolambda
With this flag on, MACS will use the background lambda as local
lambda. This means MACS will not consider the local bias at peak
candidate regions.
** --slocal, --llocal
These two parameters control which two levels of regions will be
checked around the peak regions to calculate the maximum lambda as
local lambda. By default, MACS considers 1000bp for small local
region(--slocal), and 10000bps for large local region(--llocal)
which captures the bias from a long range effect like an open
chromatin domain. You can tweak these according to your
project. Remember that if the region is set too small, a sharp spike
in the input data may kill the significant peak.
** --off-auto
Whether turn off the auto paired-peak model process. If not set, when
MACS failed to build paired model, it will use the nomodel settings,
the '--shiftsize' parameter to shift and extend each tags. If set,
MACS will be terminated if paried-peak model is failed.
** --nomodel
While on, MACS will bypass building the shifting model.
** --shiftsize
While '--nomodel' is set, MACS uses this parameter to shift tags to
their midpoint. For example, if the size of binding region for your
transcription factor is 200 bp, and you want to bypass the model
building by MACS, this parameter can be set as 100. This option is
only valid when --nomodel is set or when MACS fails to build
paired-peak model.
** --keep-dup
It controls the MACS behavior towards duplicate tags at the exact same
location -- the same coordination and the same strand. The default
'auto' option makes MACS calculate the maximum tags at the exact same
location based on binomal distribution using 1e-5 as pvalue cutoff;
and the 'all' option keeps every tags. If an integer is given, at
most this number of tags will be kept at the same location. Default:
auto
** --to-small
When set scale the larger dataset down to the smaller dataset, by
default, the smaller dataset will be scaled towards the larger
dataset.
** -w/--wig
If this flag is on, MACS will store the fragment pileup in wiggle
format for every chromosome. The gzipped wiggle files will be stored
in subdirectories named NAME+'_MACS_wiggle/treat' for treatment data
and NAME+'_MACS_wiggle/control' for control data. --single-profile
option can be combined to generate a single wig file for the whole
genome.
** -B/--bdg
If this flag is on, MACS will store the fragment pileup in bedGraph
format for every chromosome. The bedGraph file is in general much
smaller than wiggle file. However, The process will take a little bit
longer than -w option, since theoratically 1bp resolution data will be
saved. The bedGraph files will be gzipped and stored in subdirectories
named NAME+'_MACS_bedGraph/treat' for treatment and
NAME+'_MACS_bedGraph/control' for control data. --single-profile
option can be combined to generate a single bedGraph file for the
whole genome.
** -S/--single-profile (formerly --single-wig)
If this flag is on, MACS will store the fragment pileup in wiggle or
bedGraph format for the whole genome instead of for every
chromosomes. The gzipped wiggle files will be stored in subdirectories
named EXPERIMENT_NAME+'_MACS_wiggle'+'_MACS_wiggle/treat/'
+EXPERIMENT_NAME+'treat_afterfiting_all.wig.gz' or
'treat_afterfiting_all.bdg.gz' for treatment data, and
EXPERIMENT_NAME+'_MACS_wiggle'+'_MACS_wiggle/control/'
+EXPERIMENT_NAME+'control_afterfiting_all.wig.gz' or
'control_afterfiting_all.bdg.gz' for control data.
** --space=SPACE
By default, the resoluation for saving wiggle files is 10 bps,i.e.,
MACS will save the raw tag count every 10 bps. You can change it along
with '--wig' option.
Note this option doesn't work if -B/--bdg is on.
** --call-subpeaks
If set, MACS will invoke Mali Salmon's PeakSplitter software through
system call. If PeakSplitter can't be found, an instruction will be
shown for downloading and installing the PeakSplitter package. The
PeakSplitter can refine the MACS peaks and split the wide peaks into
smaller subpeaks. For more information, please check the following URL:
http://www.ebi.ac.uk/bertone/software/PeakSplitter_Cpp_usage.txt
Note this option doesn't work if -B/--bdg is on.
*** --verbose
If you don't want to see any message during the running of MACS, set
it to 0. But the CRITICAL messages will never be hidden. If you want
to see rich information like how many peaks are called for every
chromosome, you can set it to 3 or larger than 3.
** --diag
A diagnosis report can be generated through this option. This report
can help you get an assumption about the sequencing saturation. This
funtion is only in beta stage.
** --fe-min, --fe-max & --fe-step
For diagnostics, FEMIN and FEMAX are the minimum and maximum fold
enrichment to consider, and FESTEP is the interval of fold
enrichment. For example, "--fe-min 0 --fe-max 40 --fe-step 10" will
let MACS choose the following fold enrichment ranges to consider:
[0,10), [10,20), [20,30) and [30,40).
* Output files
1. NAME_peaks.xls is a tabular file which contains information about
called peaks. You can open it in excel and sort/filter using excel
functions. Information include: chromosome name, start position of
peak, end position of peak, length of peak region, peak summit
position related to the start position of peak region, number of tags
in peak region, -10*log10(pvalue) for the peak region (e.g. pvalue
=1e-10, then this value should be 100), fold enrichment for this
region against random Poisson distribution with local lambda, FDR in
percentage. Coordinates in XLS is 1-based which is different with BED
format.
2. NAME_peaks.bed is BED format file which contains the peak
locations. You can load it to UCSC genome browser or Affymetrix IGB
software.
3. NAME_summits.bed is in BED format, which contains the peak summits
locations for every peaks. The 5th column in this file is the summit
height of fragment pileup. If you want to find the motifs at the
binding sites, this file is recommended.
4. NAME_negative_peaks.xls is a tabular file which contains
information about negative peaks. Negative peaks are called by
swapping the ChIP-seq and control channel.
5. NAME_model.r is an R script which you can use to produce a PDF
image about the model based on your data. Load it to R by:
$ R --vanilla < NAME_model.r
Then a pdf file NAME_model.pdf will be generated in your current
directory. Note, R is required to draw this figure.
6. NAME_treat/control_afterfiting.wig.gz files in NAME_MACS_wiggle
directory are wiggle format files which can be imported to UCSC
genome browser/GMOD/Affy IGB. The .bdg.gz files are in bedGraph
format which can also be imported to UCSC genome browser or be
converted into even smaller bigWig files.
7. NAME_diag.xls is the diagnosis report. First column is for various
fold_enrichment ranges; the second column is number of peaks for that fc
range; after 3rd columns are the percentage of peaks covered after
sampling 90%, 80%, 70% ... and 20% of the total tags.
8. NAME_peaks.subpeaks.bed is a text file which IS NOT in BED
format. This file is generated by PeakSplitter
(<http://www.ebi.ac.uk/bertone/software/PeakSplitter_Cpp_usage.txt>)
when --call-subpeaks option is set.
* Other useful links
Cistrome web server for ChIP-chip/seq analysis: http://cistrome.org/ap/
bedTools -- a super useful toolkits for genome annotation files: http://code.google.com/p/bedtools/
UCSC toolkits: http://hgdownload.cse.ucsc.edu/admin/exe/ | AREM | /AREM-1.0.1.tar.gz/AREM-1.0.1/README | README |
from pydoc import plain
import sys
import math
from os.path import exists
testKey = "PeSgVkYp3s6v9y$B&E)H@McQfTjWmZq4t7w!z%C*F-JaNdRgUkXp2r5u8x/A?D(G+KbPeShVmYq3t6v9y$B&E)H@McQfTjWnZr4u7x!z%C*F-JaNdRgUkXp2s5v8y/B?D(G+KbPeShVmYq3t6w9z$C&F)H@McQfTjWnZr4u7x!A%D*G-KaNdRgUkXp2s5v8y/B?E(H+MbQeThVmYq3t6w9z$C&F)J@NcRfUjXnZr4u7x!A%D*G-KaPdSgVkYp3s5v8y/B?E(H+MbQeThWmZq4t7w9z$C&F)J@NcRfUjXn2r5u8x/A%D*G-KaPdSgVkYp3s6v9y$B&E(H+MbQeThWmZq4t7w!z%C*F-J@NcRfUjXn2r5u8x/A?D(G+KbPdSgVkYp3s6v9y$B&E)H@McQfThWmZq4t7!z%C*F-JaNdRgUkXn2r5u8x/A?D(G+KbPeShVmYq3s6v9y$B&E)H@McQfTjWnZr4u7w!z%C*F-JaNdRgUkXp2s5v8y/A?D(G+Kb"
def encrypt(message, key):
# Layer 1: Exponential Encoding
messageArray = []
for i in message:
messageArray.append(i)
keyArray = []
for i in key:
keyArray.append(i)
encryptedPreBinary = []
for i in range(len(message)):
encryptedPreBinary.append(math.floor(math.exp(ord(messageArray[i]))))
keyEncrypted = []
for i in range(len(keyArray)):
keyEncrypted.append(ord(keyArray[i]))
# Layer 2: XOR Encryption
binaryKey = ""
for i in range(len(keyEncrypted)):
binaryKey += f'{keyEncrypted[i]:08b}'
binaryMessage = ""
for i in range(len(encryptedPreBinary)):
binaryMessage += f'{encryptedPreBinary[i]:0190b}'
xor = ""
for i in range(len(binaryMessage)):
if binaryMessage[i] == binaryKey[i]:
xor += "0"
else:
xor += "1"
# Layer 3: Random Character Substitution
# split xor into 8 bit chunks
xorArray = []
for i in range(0, len(xor), 8):
xorArray.append(xor[i:i+8])
# convert each 8 bit chunk to decimal
xordecimal = []
for i in range(len(xorArray)):
# if (int(xorArray[i], 2) >= 100) or (int(xorArray[i], 2) < 20) or (chr(int(xorArray[i], 2)) == '0') or (chr(int(xorArray[i], 2)) == '1'):
if (chr(int(xorArray[i], 2)) == '0') or (chr(int(xorArray[i], 2)) == '1'):
xordecimal.append(xorArray[i])
elif (int(xorArray[i], 2)) < 32 or (int(xorArray[i], 2)) > 126:
xordecimal.append(xorArray[i])
else:
xordecimal.append(chr(int(xorArray[i], 2)))
# join xordecimal
xordecimal = "".join(str(x) for x in xordecimal)
return xordecimal
def decrypt(encrypted, key):
# Layer 1: Random Character Substitution:
xorArray = []
count = 0
while count < len(encrypted):
if (encrypted[count] == '1') or (encrypted[count] == '0'):
xorArray.append(encrypted[count:count+8])
count += 8
else:
xorArray.append(f'{int(ord(encrypted[count:count+1])):08b}')
count += 1
rcs = "".join(str(x) for x in xorArray)
# Layer 2: XOR Decryption
keyArray = []
for i in key:
keyArray.append(i)
keyEncrypted = []
for i in range(len(keyArray)):
keyEncrypted.append(ord(keyArray[i]))
binaryKey = ""
for i in range(len(keyEncrypted)):
binaryKey += f'{keyEncrypted[i]:08b}'
xor = ""
for i in range(len(rcs)):
if rcs[i] == binaryKey[i]:
xor += "0"
else:
xor += "1"
xorArray = []
for i in range(0, len(xor), 190):
xorArray.append(xor[i:i+190])
xordecimal = []
for i in range(len(xorArray)):
xordecimal.append(int(xorArray[i], 2))
# Layer 3: Exponential Decoding
xorLog = []
for i in range(len(xordecimal)):
xorLog.append(int(math.log(xordecimal[i])))
xorChar = []
for i in range(len(xorLog)):
if xorLog[i] == 31:
xorChar.append(' ')
else:
xorChar.append(chr(xorLog[i]))
xorChar = "".join(xorChar)
return xorChar
def generateKeyPair(name, passphrase, testKey):
while (len(passphrase) < len(name) * 20):
passphrase += passphrase
while (len(testKey) < len(passphrase) * 32):
testKey += testKey
passphrase2 = keySchedule(passphrase)
pubKey = encrypt(name, passphrase2)
privKey = encrypt(passphrase, testKey)
return pubKey, privKey
def pubKeyEncrypt(plaintext, pubKey):
return encrypt(plaintext, pubKey)
def privKeyDecrypt(encrypted, privKey, name, testKey):
passphrase = input("What is your password?")
while (len(passphrase) < len(name) * 20):
passphrase += passphrase
while (len(testKey) < len(passphrase) * 32):
testKey += testKey
expectedPrivKey = encrypt(passphrase, testKey)
if (privKey == expectedPrivKey):
passphrase2 = keySchedule(passphrase)
pubKey = encrypt(name, passphrase2)
return (decrypt(encrypted, pubKey))
def keySchedule(key):
# convert to binary
keyBinary = []
for i in key:
keyBinary.append(f'{ord(i):08b}')
subKeys = []
for i in range(len(keyBinary)):
if (i == 0):
currXor = ""
for j in range(len(keyBinary[i])):
if keyBinary[i][j] == keyBinary[15][j]:
currXor += "0"
else:
currXor += "1"
subKeys.append(currXor)
else:
currXor = ""
for j in range(len(keyBinary[i])):
if keyBinary[i][j] == subKeys[i-1][j]:
currXor += "0"
else:
currXor += "1"
subKeys.append(currXor)
for i in range(len(subKeys)):
if (i == 0):
currXor = ""
for j in range(len(subKeys[i])):
if subKeys[i][j] == subKeys[15][j]:
currXor += "0"
else:
currXor += "1"
subKeys[i] = currXor
else:
currXor = ""
for j in range(len(subKeys[i])):
if subKeys[i][j] == subKeys[i-1][j]:
currXor += "0"
else:
currXor += "1"
subKeys[i] = currXor
# join subkeys
subKeys = "".join(str(x) for x in subKeys)
return subKeys
if sys.argv[1] == '-gen':
if (len(sys.argv) == 6):
keys = generateKeyPair(sys.argv[2], sys.argv[3], testKey)
if (exists(sys.argv[4])):
f = open(sys.argv[4], "w")
f.write(keys[0])
f.close()
else:
f = open(sys.argv[4], "x")
f.write(keys[0])
f.close()
if (exists(sys.argv[5])):
f = open(sys.argv[5], "w")
f.write(keys[1])
f.close()
else:
f = open(sys.argv[5], "x")
f.write(keys[1])
f.close()
else:
print('Too many or too few arguments')
elif(sys.argv[1] == '-pubEnc'):
if (len(sys.argv) == 5):
if (exists(sys.argv[2])):
f = open(sys.argv[2], "r")
plaintext = f.read()
f.close()
else:
print('Plaintext file does not exist')
exit()
if (exists(sys.argv[3])):
f = open(sys.argv[3], "r")
pubKey = f.read()
f.close()
else:
print('Public key file does not exist')
exit()
encrypted = pubKeyEncrypt(plaintext, pubKey)
if (exists(sys.argv[4])):
f = open(sys.argv[4], "w")
f.write(encrypted)
f.close()
else:
f = open(sys.argv[4], "x")
f.write(encrypted)
f.close()
else:
print('Too many or too few arguments')
elif(sys.argv[1] == '-privDec'):
if (len(sys.argv) == 6):
if (exists(sys.argv[2])):
f = open(sys.argv[2], "r")
encrypted = f.read()
f.close()
else:
print('Encrypted file does not exist')
exit()
if (exists(sys.argv[3])):
f = open(sys.argv[3], "r")
privKey = f.read()
f.close()
else:
print('Private key file does not exist')
exit()
decrypted = privKeyDecrypt(encrypted, privKey, sys.argv[4], testKey)
if (decrypted == None):
print('Invalid private key')
else:
if (exists(sys.argv[5])):
f = open(sys.argv[5], "w")
f.write(decrypted)
f.close()
else:
f = open(sys.argv[5], "x")
f.write(decrypted)
f.close()
else:
print('Too many or too few arguments')
elif (sys.argv[1] == '-help') or (sys.argv[1] == '-h'):
print('-gen [name] [passphrase] [save pubKey file path] [save privKey file path]: Generates a public and private key pair')
print('-pubEnc [plaintext file path] [pubKey file path] [save encrypted file path]: Encrypts a plaintext with a public key')
print('-privDec [encrypted file path] [privKey file path] [name] [save decrypted file path]: Decrypts an encrypted message with a private key')
print('-help: Prints this message')
else:
print('Invalid command') | ARES-EP | /ARES_EP-0.0.2.tar.gz/ARES_EP-0.0.2/src/aep/aep.py | aep.py |
# ARGs_OAP
Python wrapper of [ARGs_OAP](https://github.com/biofuture/Ublastx_stageone)
+ linux only
+ no modification of the source code and binaries, pure wrapper
## install
+ if both python2 and python3 are installed, please explicitly specify python3
```
git clone https://github.com/xinehc/ARGs_OAP
cd ARGs_OAP
python setup.py install
```
## example
```
# git clone https://github.com/xinehc/ARGs_OAP.git
ARGs_OAP stage_one -i ARGs_OAP/example/inputfqs -m ARGs_OAP/example/meta-data.txt -o ARGs_OAP/example/output -f 'fa' -n 8
ARGs_OAP stage_two -i ARGs_OAP/example/output/extracted.fa -m ARGs_OAP/example/output/meta_data_online.txt -o ARGs_OAP/example/output -n 8
```
| ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/README.md | README.md |
import sys
from argparse import ArgumentParser
import subprocess
from .stage_one import stage_one
from .stage_two import stage_two
def parse_options(argv):
parser = ArgumentParser(prog='ARGs_OAP')
subparsers = parser.add_subparsers(help='Running modes', metavar='{stage_one, stage_two}')
## stage_one parameters
parser_stage_one = subparsers.add_parser('stage_one', help='run stage_one')
mandatory_one = parser_stage_one.add_argument_group('MANDATORY')
mandatory_one.add_argument('-i','--i', help='input files directory', metavar='DIR', required=True)
mandatory_one.add_argument('-o','--o', help='output files directory', metavar='DIR', required=True)
mandatory_one.add_argument('-m','--m', help='meta data file', metavar='FILE', required=True)
optional_one = parser_stage_one.add_argument_group('OPTIONAL')
optional_one.add_argument('-n', '--n', help='number of threads, default 1', default=1)
optional_one.add_argument('-f', '--f', help='the format of processed files, default fq', default='fq')
optional_one.add_argument('-q', '--q', help='quality control of fastq sequences defualt not take effect, set to 1, then will do QC with fastp', default=0)
optional_one.add_argument('-z', '--z', help='whether the fq files were .gz format, if -z, then firstly gzip -d, default(none) ', default=False, action='store_true')
optional_one.add_argument('-x', '--x', help='evalue for searching 16S in usearch default 1e-10', default=1e-10)
optional_one.add_argument('-y', '--y', help='evalue for searching universal single copy marker gene default 3', default=3)
optional_one.add_argument('-v', '--v', help='the identity value for diamond to search the USCMGs default 0.45', default=0.45)
parser_stage_one.set_defaults(func=stage_one)
## stage_one parameters
parser_stage_two = subparsers.add_parser('stage_two', help='run stage_two')
mandatory_two = parser_stage_two.add_argument_group('MANDATORY')
mandatory_two.add_argument('-i','--i', help='the potential arg reads from stage one', required=True)
mandatory_two.add_argument('-o','--o', help='Output prefix', required=True)
mandatory_two.add_argument('-m','--m', help='meta data online from stage one', required=True)
optional_two = parser_stage_two.add_argument_group('OPTIONAL')
optional_two.add_argument('-n', '--n', help='lnumber of threads used for blastx, default 1', default=1)
optional_two.add_argument('-b', '--b', help='if set then process the blastx results directly [default off], useful if user want to accelerate the stage two by running blastx paralell', default=False, action='store_true')
optional_two.add_argument('-l', '--l', help='length cutoff, default 25', default=25)
optional_two.add_argument('-e', '--e', help='evalue cutoff, default 1e-7', default=1e-7)
optional_two.add_argument('-d', '--d', help='identity cutoff, default 80', default=80)
parser_stage_two.set_defaults(func=stage_two)
if len(argv) < 2:
parser.print_help()
sys.exit(1)
if len(argv) < 3:
if argv[1] == 'stage_one':
parser_stage_one.print_help()
elif argv[1] == 'stage_two':
parser_stage_two.print_help()
else:
parser.print_help()
sys.exit(1)
return(parser.parse_args(argv[1:]))
def main(argv=sys.argv):
options = parse_options(argv)
options.func(options)
if __name__ == "__main__":
main(sys.argv) | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/ARGs_OAP.py | ARGs_OAP.py |
===============
ARG_OAP_v2.0 manual
===============
**We have simplified the running process of ublastx_stage_one. We have made a step by step video about how to use ARGs-OAP platform, hopefully, this video will guid new users to go through the process within ten minutes. The address is: https://www.youtube.com/watch?v=PCr1ctXvZPk**
**A mirror site was added in Shenzhen China for mainland China users to solve the slow data uploading problem [SUSTC-MIRROR-ARGS-OAP](http://smile.sustc.edu.cn:8080/)**
New release of ublastx verion 2.0
The change log of this version (2019.12.07) includes:
1. database modification:
Added polymyxin resistance genotypes mcr-1,mcr-1.2,mcr-1.3,mcr-1.4,mcr-1.5,mcr-1.6,mcr-1.7,mcr-1.8,mcr-1.9,mcr-2.1,mcr-2.2,mcr-3,mcr-4,mcr-5,eptA,icr-Mo,icr-Mc
Added sulfonamide resistance genotypes sul4
Added quinolone resistance genotypes abaQ
Added beta-lactam resistance genotypes NDM-1,NDM-9,NDM-10,NDM-11,NDM-12,NDM-13,NDM-16,NDM-14,NDM-15,NDM-17,NDM-20,NDM-18,NDM-19,vim-48,OXA-232
Added multidrug resistance genotype qacEdelta1
tetracycline resistance genotypes tetK,tetX,tetX1,tetX2,tetX3,tetX4
2. pipeline modification:
remove usearch (users need to download usearch by themselves. the .udb file is generated from usearch bit32, if users have their own usearch bit64, a suitable udb can be generated follow two steps:firstly, retrieve fasta “usearch -udb2fasta db.udb -output db.fasta", secondly, generate udb "usearch -makeudb_ublast db.fasta -output db.udb".
However, we are working in progress to replace usearch by minimap2 and diamond. A follow up update is coming soon after parameter optimization and validation.
3. pipeline modification 2:
The calculation is copy of ARGs divided by copies of 16S
The copy of ARGs has been changed to Number-of-ARG- like-sequence × Length-of-hit-length / Length-of-ARG-reference-sequence
The previous calculation method of copy of ARGs was Number-of-ARG- like-sequence × Length-of-reads (i.e. 100 or 150) / Length-of-ARG-reference-sequence
Novel features
1. The SARG database was expanded about three times in the version 2.0
2. SARGfam was supplied
3. Cell number estimation with universial single-copy marker gene was added
New release of Ublastx Version 1.2
**1. adding a method to obtain microbial community structure from the shotgun metagenomics data set.**
**2. adding copy number correction using Copyrighter database and normalize ARGs abundance by cell number.**
Detail introduction of copy number correction can be referred to [Transform ARGs abundance against cell number](https://github.com/biofuture/Ublastx_stageone/wiki/Transform-ARGs-abundance-against-cell-number)
There are some questions raised by users, please refer to the [FAQ](https://github.com/biofuture/Ublastx_stageone/wiki/FAQ) for details.
To run Ublastx, users should download the stage one source code into local computer system (Unix/Linux) and upload the generated files for stage two onto our Galaxy analysis platform (http://smile.hku.hk/SARGs).
What does Ublastx do:
=====================
1. Fast environmental searching of antibiotic resistant gene in multiple metagenomics data sets; the ARGs abundance can be normalized to cell number
2. Generate mother table of type and sub-type level ARGs of users' samples and a merged sub-type level mother table
3. Generate a PcoA of users samples with other typical environment samples such as human gut, ocean and sediment to show the relationship of user concerned samples with already sequenced environment.
clone source code into local computer
=====================================
git clone https://github.com/biofuture/Ublastx_stageone.git
Prepare the meta-data file of your samples
==========================================
To run the stage one pipeline, users need to prepare relative meta-data.txt file and put all the pair-end fastq file into one directory
Example of meta-data file **meta-data.txt** Tips:
* You need keep the first and second column's name as SampleID and Name
* The SampleID are required to be numbers counting from 1 to 2 to 3 etc.
* Category is the classification of your samples into groups and we will colored your samples in PcoA by this informaton
* The meta-data table should be separated by tabular for each of the items
* The Name of each sample should be the fastq file names for your pair-end Illumina sequencing data, your fastq files will automatically be recognized by Name_1.fq and Name_2.fq, so you need to keep the name consistent with your fq file name. (if you files are end with .fastq or .fasta, you need to change them to end with .fq or .fa)
**Please make sure the meta-data file is pure txt format, if you edit the file under windows, using nodepad++ and check the end of each line by cliking View-> Show Symbol -> Show All Characters. If the line is end up with CRLF, please remove the CR by replace \r to nothing in the replace dialogue frame**
SampleID | Name | Category
---------|------|-----------
1 | STAS | ST
2 | SWHAS104 | SWH
Prepare database and usearch
============================
SARG Database and 32 bit usearch is avaliable in DB/ and bin/ directory, respectively. **Users donot need to download CARD and ARDB anymore!!**
Stage one pipeline
==================
When meta-data.txt and database files are prepared, then put all your fastq files into one directory in your local system (notice the name of your fastq files should be Name_1.fq and Name_2.fq). your can give -h to show the help information. Examples could be found in source directory example, in example directory run test:
`nohup ../ublastx_stage_one -i inputfqs -o testoutdir -m meta-data.txt -n 2`
Usage: ./ublastx_stage_one -i <Fq input dir> -m <Metadata_map.txt> -o <output dir> -n [number of threads] -f [fa|fq] -z -h -c
-i Input files directory, required
-m meta data file, required
-o Output files directory, default current directory
-n number of threads used for usearch, default 1
-f the format of processed files, default fq
-z whether the fq files were .gz format, if -z, then firstly gzip -d, default(none)
-c This option fulfill copy number correction by Copywriter database to transfrom 16S information into cell number [ direct searching hyper variable region database by usearch; default 1]
-h print this help information
This step will search reads against SARG databbase and 16S greengene non-redundant 85 OTUs database to identify potential ARG reads and 16S reads. This step will generate searching results files for each fastq. This step also obtain the microbial community structure information of samples by searching against hyper-variable region database, and then perform copy number correction using Copyrighter copy number database (release date) to finally estimate the cell number of samples.
The results are in testoutdir/, it looks like this:
extracted.fa STAS_2.16s SWHAS104.16s_hyperout.txt
meta_data_online.txt STAS_2.us SWHAS104_1.us
STAS_1.16s STAS.extract_1.fa SWHAS104_2.16s
STAS.16s_1v6.us STAS.extract_2.fa SWHAS104_2.us
STAS.16s_2v6.us SWHAS104_1.16s SWHAS104.extract_1.fa
STAS.16s_hvr_community.txt SWHAS104.16s_1v6.us SWHAS104.extract_2.fa
STAS.16s_hvr_normal.copy.txt SWHAS104.16s_2v6.us ublastx_bash_Mon-Feb-1-16:20:59-2016.sh
STAS.16s_hyperout.txt SWHAS104.16s_hvr_community.txt
STAS_1.us SWHAS104.16s_hvr_normal.copy.txt
The **extracted.fa** and **meta_data_online.txt** are two files needed for ublastx_stage_two analysis. The STAS.16s_hvr_community.txt is the microbial community of sample STAS and STAS.16s_hvr_normal.copy.txt is the averagely copy number of the microbial community after CopyRighter database correction.
The meta-data-online.txt looks like this
SampleID | Name | Category | #ofreads | #of16S| **#ofCell**
---------|------|-----------|----------|-------|--------
1 | STAS | ST |200000 | 10.1 | 4.9
2 | SWHAS104 | SWH |200000 | 9.7 | 4.1
Stage two pipeline on Galaxy system and download results
========================================================
Go to http://smile.hku.hk/SARGs and using the module ARG_OAP.
1. Using **ARG_OAP** -> **Upload Files** module to upload the extracted fasta file and meta_data_online.txt file generated in stage one into Galaxy
2. Click **ARG_OAP** and **Ublast_stagetwo**, select your uploaded files
3. For \"Column in Metadata:\" chose the column you want to classify your samples (default: 3)
Click **Execute** and you can find four output files for your information
After a while or so, you will notice that their are four files generated for your information.
**File 1 and 2**: PcoA figures of your samples and other environment samples generated by ARGs abundance matrix normalization to 16s reads number and cell number
**File 3 and 4**: Other tabular mother tables which including the profile of ARGs type and sub type information, as long as with other environment samples mother table. File3 results of ARGs abundance normalization aganist 16S reads number; File 4 results of ARGs abundance normalization aganist cell number
------------------------------------------------------------------------------------------------------------------------
**Notice:**
This tools only provide the required scripts for ARGs-OAP pipeline (Bioinformatics (2016) doi: 10.1093/bioinformatics/btw136).
This pipeline is distributed in the hope to achieve the aim of management of antibiotic resistant genes in envrionment, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.This pipeline is only allowed to be used for non-commercial and academic purpose.
The copyrights of the following tools/databases which could be used in this pipeline belong to their original developers. The user of this pipeline should follow the guideline and regulations of these tools/database which could be found at the websites of their developers.
1) Usearch: (http://www.drive5.com/usearch/)
2) Copyrighter: (http://microbiomejournal.biomedcentral.com/articles/10.1186/2049-2618-2-11)
3) Greengenes: (http://greengenes.lbl.gov/cgi-bin/nph-index.cgi)
Please check the above websites for details of these tools/databases.
| ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/index.rst | index.rst |
ARGs_OAP manual
==========================================
The change log of this version (2020.07.15) includes:
1. pipeline modification
no usearch now (if users prefer usearch, please go to previous version)
Now, we use minimap2 and diamond.
2. pipeline modification 2
In stage one, we removed parameter "-s" and "-c"
More importantly, you need to state you Read Length in the meta-data.txt!!!!
Prepare compulsory command
============================
1. bbmap
a. download packages from here (https://sourceforge.net/projects/bbmap/)
b. install bbmap
c. copy the whole folder "bbmap" into the subfoler "bin" under "Ublastx_stageone"
2. samtools
a. download packages from here (http://www.htslib.org/download/)
b. install samtools
c. copy executable "samtools" into the subfoler "bin" under "Ublastx_stageone"
3. minimap2
a. download packages from here (https://github.com/lh3/minimap2)
b. install minimap2
c. copy executable "mimimap2" into the subfoler "bin" under "Ublastx_stageone"
Prepare the meta-data file of your samples
==========================================
To run the stage one pipeline, users need to prepare relative meta-data.txt file and put all the pair-end fastq file into one directory
Example of meta-data file **meta-data.txt** Tips:
* You need keep the first and second column's name as SampleID and Name
* The SampleID are required to be unique numbers counting from 1 to 2 to 3 etc.
* Category is the classification of your samples into groups and we will colored your samples in PcoA by this informaton
* The meta-data table should be separated by tabular for each of the items
* The Name of each sample should be the fastq file names for your pair-end Illumina sequencing data, your fastq files will automatically be recognized by Name_1.fq and Name_2.fq, so you need to keep the name consistent with your fq file name. (if you files are end with .fastq or .fasta, you need to change them to end with .fq or .fa)
**Please make sure the meta-data file is pure txt format, if you edit the file under windows, using nodepad++ and check the end of each line by cliking View-> Show Symbol -> Show All Characters. If the line is end up with CRLF, please remove the CR by replace \r to nothing in the replace dialogue frame**
SampleID | Name | Category |ReadLength
---------|------|-----------
1 | STAS | ST |100
2 | SWHAS104 | SWH |100
Stage one
==================
Put all your fastq files into one directory in your local system (notice the name of your fastq files should be Name_1.fq and Name_2.fq). your can give -h to show the help information. Examples could be found in source directory example, in example directory run test:
nohup ./argoap_pipeline_stageone_version2.3 -i inputfqs -o testoutdir -m meta-data.txt -n 8
./argoap_pipeline_stageone_version2.3 -h
The results are in testoutdir/
The **extracted.fa** and **meta_data_online.txt** are two files needed for ublastx_stage_two analysis.
The meta-data-online.txt looks like this
SampleID | Name | Category | ReadLength |#ofreads | #of16S| **#ofCell**
---------|------|-----------|----------|-------|----|----
1 | STAS | ST | 100| 200000 | 10.1 | 4.9
2 | SWHAS104 | SWH | 100|200000 | 9.7 | 4.1
Stage two
========================================================
Normally, juse run
nohup perl argoap_pipeline_stagetwo_version2 -i extracted.fa -m meta_data_online.txt -o testout -l 25 -d 80 -e 1e-5
For users have very big data and prefer complex running:
1. users run locally by themselves to get the blastx outfmt 6 format resutls by alighment against SARG2.2.
**A typical scene is that users can paralelly run the blastx on clusters by multi-nodes, and then merge the blastx output as the input for the -b option.**
2. Prerequest
a. download the whole fold of this repo.
b. install R packages **vegan, labdsv, ggplot2 and scales** (Enter R and use install.packages(pkgs="vegan") to install these packages).
3. use -b option for the stage two script:
perl argoap_pipeline_stagetwo_version2 -i extracted.fa -m meta_data_online.txt -o testout -b merge_blastx.out.txt
Stage two pipeline on Galaxy system and download results
========================================================
Go to http://smile.hku.hk/SARGs and using the module ARG_OAP.
1. Using **ARG_OAP** -> **Upload Files** module to upload the extracted fasta file and meta_data_online.txt file generated in stage one into Galaxy
2. Click **ARG_OAP** and **Ublast_stagetwo**, select your uploaded files
3. For \"Column in Metadata:\" chose the column you want to classify your samples (default: 3)
Click **Execute** and you can find four output files for your information
After a while or so, you will notice that their are four files generated for your information.
**File 1 and 2**: PcoA figures of your samples and other environment samples generated by ARGs abundance matrix normalization to 16s reads number and cell number
**File 3 and 4**: Other tabular mother tables which including the profile of ARGs type and sub type information, as long as with other environment samples mother table. File3 results of ARGs abundance normalization aganist 16S reads number; File 4 results of ARGs abundance normalization aganist cell number
There are some questions raised by users, please refer to the [FAQ](https://github.com/biofuture/Ublastx_stageone/wiki/FAQ) for details. To run ARG OAP locally, users should download the source code into local computer system (Unix/Linux). Users can upload the generated files for stage two onto our Galaxy analysis platform (http://smile.hku.hk/SARGs) or use the local version of stage two script.
------------------------------------------------------------------------------------------------------------------------
**Notice:**
This tools only provide the required scripts for ARGs-OAP1.0/2.0 pipeline
This pipeline is distributed in the hope to achieve the aim of management of antibiotic resistant genes in envrionment, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.This pipeline is only allowed to be used for non-commercial and academic purpose.
**The SARG database is distributed only freely used for academic prupose, any commercial use should require the agreement from the developer team.**
| ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/README.md | README.md |
usage(){
echo "
Written by Brian Bushnell
Last modified March 24, 2020
Description: Estimates cardinality of unique kmers in sequence data.
See also kmercountmulti.sh.
Usage: loglog.sh in=<file> k=<31>
Parameters:
in=<file> (in1) Input file, or comma-delimited list of files.
in2=<file> ptional second file for paired reads.
k=31 Use this kmer length for counting.
buckets=2048 Use this many buckets for counting; higher decreases
variance, for large datasets. Must be a power of 2.
seed=-1 Use this seed for hash functions. A negative number forces
a random seed.
minprob=0 Set to a value between 0 and 1 to exclude kmers with a lower
probability of being correct.
Shortcuts:
The # symbol will be substituted for 1 and 2.
For example:
loglog.sh in=read#.fq
...is equivalent to:
loglog.sh in1=read1.fq in2=read2.fq
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Supported input formats are fastq, fasta, scarf, sam, and bam.
Supported compression formats are gzip and bz2.
To read from stdin, set 'in=stdin'. The format should be specified with an extension, like 'in=stdin.fq.gz'
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx200m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function loglog() {
local CMD="java $EA $EOOM $z -cp $CP cardinality.LogLogWrapper $@"
echo $CMD >&2
eval $CMD
}
loglog "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/loglog.sh | loglog.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified May 18, 2016
Description: Filters NCBI assembly summaries according to their taxonomy.
The specific files are available here:
ftp://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_genbank.txt
or ftp://ftp.ncbi.nlm.nih.gov/genomes/genbank/assembly_summary_genbank.txt
ftp://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_refseq.txt
or ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/assembly_summary_refseq.txt
Usage: filterassemblysummary.sh in=<input file> out=<output file> tree=<tree file> table=<table file> ids=<numbers> level=<name or number>
Standard parameters:
in=<file> Primary input.
out=<file> Primary output.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Processing parameters:
level= Taxonomic level, such as phylum. Filtering will operate on
sequences within the same taxonomic level as specified ids.
reqlevel= Require nodes to have ancestors at these levels. For example,
reqlevel=species,genus would ban nodes that are not defined
at both the species and genus levels.
ids= Comma-delimited list of NCBI numeric IDs.
names= Alternately, a list of names (such as 'Homo sapiens').
Note that spaces need special handling.
include=f 'f' will discard filtered sequences, 't' will keep them.
tree= A taxonomic tree made by TaxTree, such as tree.taxtree.gz.
table= A table translating gi numbers to NCBI taxIDs.
Only needed if gi numbers will be used.
* Note *
Tree and table files are in /global/projectb/sandbox/gaag/bbtools/tax
For non-Genepool users, or to make new ones, use taxtree.sh and gitable.sh
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 1000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
filterassemblysummary() {
local CMD="java $EA $EOOM $z -cp $CP driver.FilterAssemblySummary $@"
echo $CMD >&2
eval $CMD
}
filterassemblysummary "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/filterassemblysummary.sh | filterassemblysummary.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified May 4, 2020
Description: Finds PacBio reads containing inverted repeats.
These are candidate triangle reads (ice cream cones).
Either ice cream cones only, or all inverted repeats, can be filtered.
Usage: icecreamfinder.sh in=<input file> out=<output file> outb=<bad reads>
File I/O parameters:
in=<file> Primary input.
out=<file> (outgood) Output for good reads.
outa=<file> (outambig) Output for with inverted repeats, but it is unclear
whether that is natural or artifactual.
outb=<file> (outbad) Output for reads suspected as chimeric.
outj=<file> (outjunction) Output for junctions in inverted repeat reads.
stats=<file> Print screen output here instead of to the screen.
json=f Print stats as json.
arshist=<file> Adapter alignment score ratio histogram.
arshist=<file> Inverted repeat alignment score ratio histogram.
ambig= Determine where ambiguous reads are sent. They will ALWAYS
be sent to outa if specified. If not, they will be sent
to outg (good) unless overridden by this flag. Options:
ambig=good: Send ambiguous reads to outg.
ambig=bad: Send ambiguous reads to outb.
ambig=good,bad: Send ambiguous reads to outg and outb.
ambig=null: Do not send to outg or outb.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
alignrc=t Align the reverse-complement of the read to itself to look
for inverted repeats.
alignadapter=t Align adapter sequence to reads.
adapter= default: ATCTCTCTCAACAACAACAACGGAGGAGGAGGAAAAGAGAGAGAT
icecreamonly=t (ico) Only remove suspected triangle reads. Otherwise, all
inverted repeats are removed.
ksr=t (keepshortreads) Keep non-triangle reads from triangle ZMWs.
kzt=f (keepzmwstogether) Send all reads from a ZMW to the same file.
targetqlen=352 (qlen) Make queries of this length from a read tip.
qlenfraction=0.15 Try to make queries at most this fraction of read length.
For short reads this will override targetqlen.
minlen=40 Do not output reads shorter than this, after trimming.
minqlen=100 Do not make queries shorter than this. For very short
reads this will override qlenfraction.
shortfraction=0.4 Only declare a read to be a triangle if the short half
of the repeat is at least this fraction of read length.
ccs=f Input reads are CCS, meaning they are all full-pass.
In this case you should increase minratio.
trim=t Trim adapter sequence from read tips.
trimpolya=f Trim terminal poly-A and poly-T sequences, for some isoseq
libraries.
minpolymer=5 Don't trim poly-A sequence shorter than this.
polyerror=0.2 Max error rate for trimming poly-A.
Speed and sensitivity:
jni=f Enable C code for higher speed and identical results.
minratio= Fraction of maximal alignment score to consider as matching.
Higher is more stringent; lower allows more sequencing errors.
This is VERY SENSITIVE. For error-corrected reads it should
be set higher. It is roughly the expected identity of one
read to another (double the per-read error rate).
minratio1=0.59 Set minratio for the first alignment pass only.
minratio2=0.64 Set minratio for the second alignment pass only.
adapterratio=0.18 Initial adapter detection sensitivity; affects speed.
adapterratio2=.325 Final adapter detection sensitivity.
minscore=-800 Exit alignment early if score drops below this.
Entropy parameters (recommended setting is 'entropy=t'):
minentropy=-1 Set to 0.4 or above to remove low-entropy reads;
range is 0-1, recommended value is 0.55. 0.7 is too high.
Negative numbers disable this function.
entropyk=3 Kmer length for entropy calculation.
entropylen=450 Reads with entropy below cutoff for at least this many
consecutive bases will be removed.
entropyfraction=0.5 Alternative minimum length for short reads; the shorter
of entropylen and entfraction*readlength will be used.
entropywindow=50 Window size used for entropy calculation.
maxmonomerfraction=0.75 (mmf) Also require this fraction of bases in each
window to be the same base.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
#JNI=""
z="-Xmx2g"
z2="-Xms2g"
z3="-Xss16m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 2000m 42
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
icecream() {
local CMD="java $EA $EOOM $z $z2 $z3 $JNI -cp $CP icecream.IceCreamFinder $@"
if [[ $silent != 1 ]]; then
echo $CMD >&2
fi
eval $CMD
}
icecream "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/icecreamfinder.sh | icecreamfinder.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 17, 2015
Description: Concatenates barcodes and quality onto read names.
Usage: mergebarcodes.sh in=<file> out=<file> barcode=<file>
Input may be stdin or a fasta or fastq file, raw or gzipped.
If you pipe via stdin/stdout, please include the file type; e.g. for gzipped fasta input, set in=stdin.fa.gz
Optional parameters (and their defaults)
Input parameters:
in=<file> Input reads. 'in=stdin.fq' will pipe from standard in.
bar=<file> File containing barcodes.
int=auto (interleaved) If true, forces fastq input to be paired and interleaved.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
Output parameters:
out=<file> Write muxed sequences here. 'out=stdout.fa' will pipe to standard out.
overwrite=t (ow) Set to false to force the program to abort rather than overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression level; lower compression is faster.
qout=auto ASCII offset for output quality. May be 33 (Sanger), 64 (Illumina), or auto (same as input).
Other parameters:
pigz=t Use pigz to compress. If argument is a number, that will set the number of pigz threads.
unpigz=t Use pigz to decompress.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
mergebarcodes() {
local CMD="java $EA $EOOM $z -cp $CP jgi.MergeBarcodes $@"
echo $CMD >&2
eval $CMD
}
mergebarcodes "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/mergebarcodes.sh | mergebarcodes.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 4, 2019
Description: Discards all but the best copy of a ribosomal gene per TaxID.
Gene sequences should be named like this: >tid|123|whatever
Sequences are selected based on the number of fully defined bases.
Usage: keepbest.sh in=<input file> out=<output file> rate=<float>
Input may be fasta or fastq, compressed or uncompressed.
Standard parameters:
in=<file> Input sequences.
out=<file> Output sequences.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
maxlen=1600 Prefer sequences shorter than this.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx8g"
z2="-Xms8g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 8g 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
keepbest() {
local CMD="java $EA $EOOM $z -cp $CP jgi.KeepBestCopy $@"
echo $CMD >&2
eval $CMD
}
keepbest "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/keepbestcopy.sh | keepbestcopy.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 3, 2017
Description: Creates polymer sequences.
Can be used in conjunction with mutate.sh to generate low-complexity sequence.
Usage: makepolymers.sh out=<output file> k=<repeat length> minlen=<sequence length>
I/O parameters:
out=<file> Output genome.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Processing parameters:
k=1 Length of repeating polymeric units.
To generate a sweep of multiple values of k,
specify both mink and maxk.
minlen=31 Ensure sequences are at least this long.
Specifically, minlen=X will ensure sequences are long enough
that all possible kmers of length X are present.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
makepolymers() {
local CMD="java $EA $EOOM $z $z2 -cp $CP jgi.MakePolymers $@"
echo $CMD >&2
eval $CMD
}
makepolymers "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/makepolymers.sh | makepolymers.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 30, 2019
Description: Performs set operations on VCF files:
Union, intersection, and subtraction.
Usage: comparevcf.sh in=<file,file,...> out=<file>
I/O parameters:
in=<file> Input; must be at least 2 files.
out=<file> Output file.
ref=<file> Reference file; optional. Usually not needed.
shist=<file> (scorehist) Output for variant score histogram.
overwrite=f (ow) Set to false to force the program to abort rather than
bgzip=f Use bgzip for gzip compression.
Processing Mode (choose one only):
subtract=t Subtract all other files from the first file.
union=f Make a union of all files.
intersection=f Make an intersection of all files.
Processing Parameters:
addsamples=t Include all samples in the output lines. (TODO)
splitalleles=f Split multi-allelic lines into multiple lines.
splitsubs=f Split multi-base substitutions into SNPs.
canonize=t Trim variations down to a canonical representation.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
comparevcf() {
local CMD="java $EA $EOOM $z $z2 -cp $CP var2.CompareVCF $@"
echo $CMD >&2
eval $CMD
}
comparevcf "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/comparevcf.sh | comparevcf.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified January 28, 2020
Description: Creates one or more sketches from a fasta file,
optionally annotated with taxonomic information.
Please read bbmap/docs/guides/BBSketchGuide.txt for more information.
Usage: sketch.sh in=<fasta file> out=<sketch file>
Standard parameters:
in=<file> A fasta file containing one or more sequences.
out=<file> Output filename. If multiple files are desired it must
contain the # symbol.
blacklist=<file> Ignore keys in this sketch file. Additionaly, there are
built-in blacklists that can be specified:
nt: Blacklist for nt
refseq: Blacklist for Refseq
silva: Blacklist for Silva
img: Blacklist for IMG
files=1 Number of output sketch files to produce, for parallel
loading. Independent of the number of sketches produced;
sketches will be randomly distributed between files.
k=32,24 Kmer length, 1-32. To maximize sensitivity and
specificity, dual kmer lengths may be used, e.g. k=32,24
Query and reference k must match.
rcomp=t Look at reverse-complement kmers also.
amino=f Use amino acid mode. Input should be amino acids.
translate=f Call genes and translate to proteins. Input should be
nucleotides. Designed for prokaryotes.
mode=single Possible modes:
single: Write one sketch.
sequence: Write one sketch per sequence.
taxa: Write one sketch per taxonomic unit.
Requires more memory, and taxonomic annotation.
img: Write one sketch per IMG id.
delta=t Delta-compress sketches.
a48=t Encode sketches as ASCII-48 rather than hex.
depth=f Track the number of times kmers appear.
Required for the depth2 field in comparisons.
entropy=0.66 Ignore sequence with entropy below this value.
ssu=t Scan for and retain full-length SSU sequence.
Size parameters:
size=10000 Desired size of sketches (if not using autosize).
maxfraction=0.01 (mgf) Max fraction of genomic kmers to use.
minsize=100 Do not generate sketches for genomes smaller than this.
autosize=t Use flexible sizing instead of fixed-length. This is
nonlinear; a human sketch is only ~6x a bacterial sketch.
sizemult=1 Multiply the autosized size of sketches by this factor.
Normally a bacterial-size genome will get a sketch size
of around 10000; if autosizefactor=2, it would be ~20000.
density= If this flag is set (to a number between 0 and 1),
autosize and sizemult are ignored, and this fraction of
genomic kmers are used. For example, at density=0.001,
a 4.5Mbp bacteria will get a 4500-kmer sketch.
Metadata flags (optional; intended for single-sketch mode):
taxid=-1 Set the NCBI taxid.
imgid=-1 Set the IMG id.
spid=-1 Set the JGI sequencing project id.
name= Set the name (taxname).
name0= Set name0 (normally the first sequence header).
fname= Set fname (normally the file name).
meta_= Set an arbitrary metadata field.
For example, meta_Month=March.
Taxonomy-specific flags:
tree= Specify a taxtree file. On Genepool, use 'auto'.
gi= Specify a gitable file. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to
taxid files. On Genepool, use 'auto'.
imgdump= Specify an IMG dump file containing NCBI taxIDs,
for IMG mode.
taxlevel=subspecies Taxa hits below this rank will be promoted and merged
with others.
prefilter=f For huge datasets full of junk like nt, this flag
will save memory by ignoring taxa smaller than minsize.
Requires taxonomic information (tree and gi).
tossjunk=t For taxa mode, discard taxonomically uninformative
sequences. This includes sequences with no taxid,
with a tax level NO_RANK, of parent taxid of LIFE.
silva=f Parse headers using Silva or semicolon-delimited syntax.
Ribosomal flags, which allow SSU sequences to be attached to sketches:
processSSU=t Run gene-calling to detect ribosomal SSU sequences.
16Sfile=<file> Optional file of 16S sequences, annotated with TaxIDs.
18Sfile=<file> Optional file of 18S sequences, annotated with TaxIDs.
preferSSUMap=f Prefer file SSUs over called SSUs.
preferSSUMapEuks=t Prefer file SSUs over called SSUs for Eukaryotes.
SSUMapOnly=f Only use file SSUs.
SSUMapOnlyEuks=f Only use file SSUs for Eukaryotes. This prevents
associating an organism with its mitochondrial or
chloroplast 16S/18S, which is otherwise a problem.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
For more detailed information, please read /bbmap/docs/guides/BBSketchGuide.txt.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
sketch() {
local CMD="java $EA $EOOM $z $z2 -cp $CP sketch.SketchMaker $@"
echo $CMD >&2
eval $CMD
}
sketch "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/sketch.sh | sketch.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified September 13, 2019
Description: Uses mapped paired reads to generate scaffolds from contigs.
Designed for use with ordinary paired-end Illumina libraries.
Usage: lilypad.sh in=mapped.sam ref=contigs.fa out=scaffolds.fa
Standard parameters:
in=<file> Reads mapped to the reference; should be sam or bam.
ref=<file> Reference; may be fasta or fastq.
out=<file> Modified reference; should be fasta.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Processing parameters:
gap=10 Pad gaps with a minimum of this many Ns.
mindepth=4 Minimum spanning read pairs to join contigs.
maxinsert=3000 Maximum allowed insert size for proper pairs.
mincontig=200 Ignore contigs under this length if there is a
longer alternative.
minwr=0.8 (minWeightRatio) Minimum fraction of outgoing edges
pointing to the same contig. Lower values will increase
continuity at a risk of misassemblies.
minsr=0.8 (minStrandRatio) Minimum fraction of outgoing edges
indicating the same orientation. Lower values will increase
continuity at a possible risk of inversions.
passes=8 More passes may increase continuity.
samestrandpairs=f Read pairs map to the same strand. Currently untested.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
lilypad() {
local CMD="java $EA $EOOM $z -cp $CP consensus.Lilypad $@"
echo $CMD >&2
eval $CMD
}
lilypad "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/lilypad.sh | lilypad.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified March 3, 2020
Description: Generates basic assembly statistics such as scaffold count,
N50, L50, GC content, gap percent, etc. For multiple files, please use
statswrapper.sh. Works with fasta and fastq only (gzipped is fine).
Please read bbmap/docs/guides/StatsGuide.txt for more information.
Usage: stats.sh in=<file>
Parameters:
in=file Specify the input fasta file, or stdin.
out=stdout Destination of primary output; may be directed to a file.
gc=file Writes ACGTN content per scaffold to a file.
gchist=file Filename to output scaffold gc content histogram.
shist=file Filename to output cumulative scaffold length histogram.
gcbins=200 Number of bins for gc histogram.
n=10 Number of contiguous Ns to signify a break between contigs.
k=13 Estimate memory usage of BBMap with this kmer length.
minscaf=0 Ignore scaffolds shorter than this.
phs=f (printheaderstats) Set to true to print total size of headers.
n90=t (printn90) Print the N/L90 metrics.
extended=f Print additional metrics such as L90, logsum, and score.
pdl=f (printduplicatelines) Set to true to print lines in the
scaffold size table where the counts did not change.
n_=t This flag will prefix the terms 'contigs' and 'scaffolds'
with 'n_' in formats 3-6.
addname=f Adds a column for input file name, for formats 3-6.
Logsum and Powsum:
logoffset=1000 Minimum length for calculating log sum.
logbase=2 Log base for calculating log sum.
logpower=1 Raise the log to a power to increase the weight
of longer scaffolds for log sum.
powsum=0.25 Use this power of the length to increase weight
of longer scaffolds for power sum.
Assembly Score Metric:
score=f Print assembly score.
aligned=0.0 Set the fraction of aligned reads (0-1).
assemblyscoreminlen=2000 Minimum length of scaffolds to include in
assembly score calculation.
assemblyscoremaxlen=50000 Maximum length of scaffolds to get bonus points
for being long.
format=<0-7> Format of the stats information; default 1.
format=0 prints no assembly stats.
format=1 uses variable units like MB and KB, and is designed for compatibility with existing tools.
format=2 uses only whole numbers of bases, with no commas in numbers, and is designed for machine parsing.
format=3 outputs stats in 2 rows of tab-delimited columns: a header row and a data row.
format=4 is like 3 but with scaffold data only.
format=5 is like 3 but with contig data only.
format=6 is like 3 but the header starts with a #.
format=7 is like 1 but only prints contig info.
format=8 is like 3 but in JSON. You can also just use the 'json' flag.
gcformat=<0-5> Select GC output format; default 1.
gcformat=0: (no base content info printed)
gcformat=1: name length A C G T N GC
gcformat=2: name GC
gcformat=4: name length GC
gcformat=5: name length GC logsum powsum
Note that in gcformat 1, A+C+G+T=1 even when N is nonzero.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx120m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
stats() {
local CMD="java $EA $EOOM $z -cp $CP jgi.AssemblyStats2 $@"
# echo $CMD >&2
eval $CMD
}
stats "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/stats.sh | stats.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 17, 2015
Description: Generates fake read pairs from ends of contigs or single reads.
Usage: bbfakereads.sh in=<file> out=<outfile> out2=<outfile2>
Out2 is optional; if there is only one output file, it will be written interleaved.
Standard parameters:
ow=f (overwrite) Overwrites files that already exist.
zl=4 (ziplevel) Set compression level, 1 (low) to 9 (max).
fastawrap=100 Length of lines in fasta output.
tuc=f (touppercase) Change lowercase letters in reads to uppercase.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
qout=auto ASCII offset for output quality. May be 33 (Sanger), 64 (Illumina), or auto (same as input).
qfin=<.qual file> Read qualities from this qual file, for the reads coming from 'in=<fasta file>'
qfout=<.qual file> Write qualities from this qual file, for the reads going to 'out=<fasta file>'
qfout2=<.qual file> Write qualities from this qual file, for the reads coming from 'out2=<fasta file>'
verifyinterleaved=f (vint) When true, checks a file to see if the names look paired. Prints an error message if not.
tossbrokenreads=f (tbr) Discard reads that have different numbers of bases and qualities. By default this will be detected and cause a crash.
Faking parameters:
length=250 Generate reads of this length.
minlength=1 Don't generate reads shorter than this.
overlap=0 If you set overlap, then reads will by variable length, overlapping by 'overlap' in the middle.
identifier=null (id) Output read names are prefixed with this.
addspace=t Set to false to omit the space before /1 and /2 of paired reads.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx600m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function fakereads() {
local CMD="java $EA $EOOM $z -cp $CP jgi.FakeReads $@"
echo $CMD >&2
eval $CMD
}
fakereads "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbfakereads.sh | bbfakereads.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified September 4, 2019
Description: Makes a representative set of taxa from all-to-all identity
comparison. Input should be in 3+ column TSV format (first 3 are required):
(query, ref, ANI, qsize, rsize, qbases, rbases)
...as produced by CompareSketch with format=3 and usetaxidname.
Additional columns are allowed and will be ignored.
Usage: representative.sh in=<input file> out=<output file>
Parameters:
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
threshold=0 Ignore edges under threshold value. This also affects the
choice of centroids; a high threshold gives more weight to
higher-value edges.
minratio=0 Ignores edges with a ratio below this value.
invertratio=f Invert the ratio when greater than 1.
printheader=t Print a header line in the output.
printsize=t Print the size of retained nodes.
printclusters=t Print the nodes subsumed by each retained node.
minsize=0 Ignore nodes under this size (in unique kmers).
maxsize=0 If positive, ignore nodes over this size (unique kmers).
minbases=0 Ignore nodes under this size (in total bases).
maxbases=0 If positive, ignore nodes over this size (total bases).
Taxonomy parameters:
level= Taxonomic level, such as phylum. Filtering will operate on
sequences within the same taxonomic level as specified ids.
If not set, only matches to a node or its descendants will
be considered.
ids= Comma-delimited list of NCBI numeric IDs. Can also be a
file with one taxID per line.
names= Alternately, a list of names (such as 'Homo sapiens').
Note that spaces need special handling.
include=f 'f' will discard filtered sequences, 't' will keep them.
tree=<file> Specify a TaxTree file like tree.taxtree.gz.
On Genepool, use 'auto'.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will
specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically around 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
a_sample_mt() {
local CMD="java $EA $EOOM $z -cp $CP jgi.RepresentativeSet $@"
echo $CMD >&2
eval $CMD
}
a_sample_mt "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/representative.sh | representative.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 16, 2015
Description: Counts the number of reads with each barcode.
Usage: countbarcodes.sh in=<file> counts=<file>
Input may be stdin or a fasta or fastq file, raw or gzipped.
If you pipe via stdin/stdout, please include the file type; e.g. for gzipped fasta input, set in=stdin.fa.gz
Input parameters:
in=<file> Input reads, whose names end in a colon then barcode.
counts=<file> Output of counts.
interleaved=auto (int) If true, forces fastq input to be paired and interleaved.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
unpigz=t Use pigz to decompress.
expected= Comma-delimited list of expected bar codes.
valid= Comma-delimited list of valid bar codes.
countundefined=t Count barcodes that contain non-ACGT symbols.
printheader=t Print a header.
maxrows=-1 Optionally limit the number of rows printed.
Output parameters:
out=<file> Write bar codes and counts here. 'out=stdout' will pipe to standard out.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx200m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
countbarcodes() {
local CMD="java $EA $EOOM $z -cp $CP jgi.CountBarcodes $@"
echo $CMD >&2
eval $CMD
}
countbarcodes "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/countbarcodes.sh | countbarcodes.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 30, 2019
Description: Sorts sequences to put similar reads near each other.
Can be used for increased compression or error correction.
Please read bbmap/docs/guides/ClumpifyGuide.txt for more information.
Usage: clumpify.sh in=<file> out=<file> reorder
Input may be fasta or fastq, compressed or uncompressed. Cannot accept sam.
Parameters and their defaults:
in=<file> Input file.
in2=<file> Optional input for read 2 of twin paired files.
out=<file> Output file. May not be standard out.
out2=<file> Optional output for read 2 of twin paired files.
groups=auto Use this many intermediate files (to save memory).
1 group is fastest. Auto will estimate the number
of groups needed based on the file size, so it should
not ever run out of memory.
lowcomplexity=f For compressed low-complexity libraries such as RNA-seq,
this will more conservatively estimate how much memory
is needed to automatically decide the number of groups.
rcomp=f Give read clumps the same orientation to increase
compression. Should be disabled for paired reads.
overwrite=f (ow) Set to false to force the program to abort rather
than overwrite an existing file.
qin=auto Auto-detect input quality encoding. May be set to:
33: ASCII-33 (Sanger) encoding.
64: ASCII-64 (old Illumina) encoding.
All modern sequence is encoded as ASCII-33.
qout=auto Use input quality encoding as output quality encoding.
changequality=f (cq) If true, fix broken quality scores such as Ns with
Q>0. Default is false to ensure lossless compression.
fastawrap=70 Set to a higher number like 4000 for longer lines in
fasta format, which increases compression.
Compression parameters:
ziplevel=6 (zl) Gzip compression level (1-11). Higher is slower.
Level 11 is only available if pigz is installed and is
extremely slow to compress, but faster to decompress.
Naming the output file to *.bz2 will use bzip2 instead of
gzip for ~9% additional compression, which requires
bzip2, pbzip2, or lbzip2 in the path.
blocksize=128 Size of blocks for pigz, in kb. Higher gives slightly
better compression.
shortname=f Make the names as short as possible. 'shortname=shrink'
will shorten the names where possible, but retain the
flowcell and barcode information.
reorder=f Reorder clumps for additional compression. Only valid
when groups=1, passes=1, and ecc=f. Possible modes:
f: Do not reorder clumps.
c: Reorder using consensus reads. Uses additional
time and memory.
p: Reorder using pair information. Requires paired
reads. Yields the highest compression.
a: Automatically choose between 'c' and 'p'. The
flag reorder with no argument will set 'reorder=a'.
quantize=f Bin the quality scores, like NextSeq. This greatly
increases compression, but information is lost.
Temp file parameters:
compresstemp=auto (ct) Gzip temporary files. By default temp files will be
compressed if the output file is compressed.
deletetemp=t Delete temporary files.
deleteinput=f Delete input upon successful completion.
usetmpdir=f Use tmpdir for temp files.
tmpdir= By default, this is the environment variable TMPDIR.
Hashing parameters:
k=31 Use kmers of this length (1-31). Shorter kmers may
increase compression, but 31 is recommended for error
correction.
mincount=0 Don't use pivot kmers with count less than this.
Setting mincount=2 can increase compression.
Increases time and memory usage.
seed=1 Random number generator seed for hashing.
Set to a negative number to use a random seed.
hashes=4 Use this many masks when hashing. 0 uses raw kmers.
Often hashes=0 increases compression, but it should
not be used with error-correction.
border=1 Do not use kmers within this many bases of read ends.
Deduplication parameters:
dedupe=f Remove duplicate reads. For pairs, both must match.
By default, deduplication does not occur.
If dedupe and markduplicates are both false, none of
the other duplicate-related flags will have any effect.
markduplicates=f Don't remove; just append ' duplicate' to the name.
allduplicates=f Mark or remove all copies of duplicates, instead of
keeping the highest-quality copy.
addcount=f Append the number of copies to the read name.
Mutually exclusive with markduplicates or allduplicates.
subs=2 (s) Maximum substitutions allowed between duplicates.
subrate=0.0 (dsr) If set, the number of substitutions allowed will be
max(subs, subrate*min(length1, length2)) for 2 sequences.
allowns=t No-called bases will not be considered substitutions.
scanlimit=5 (scan) Continue for this many reads after encountering a
nonduplicate. Improves detection of inexact duplicates.
containment=f Allow containments (where one sequence is shorter).
affix=f For containments, require one sequence to be an affix
(prefix or suffix) of the other.
optical=f If true, mark or remove optical duplicates only.
This means they are Illumina reads within a certain
distance on the flowcell. Normal Illumina names needed.
Also for tile-edge and well duplicates.
dupedist=40 (dist) Max distance to consider for optical duplicates.
Higher removes more duplicates but is more likely to
remove PCR rather than optical duplicates.
This is platform-specific; recommendations:
NextSeq 40 (and spany=t)
HiSeq 1T 40
HiSeq 2500 40
HiSeq 3k/4k 2500
Novaseq 12000
spany=f Allow reads to be considered optical duplicates if they
are on different tiles, but are within dupedist in the
y-axis. Should only be enabled when looking for
tile-edge duplicates (as in NextSeq).
spanx=f Like spany, but for the x-axis. Not necessary
for NextSeq.
spantiles=f Set both spanx and spany.
adjacent=f Limit tile-spanning to adjacent tiles (those with
consecutive numbers).
*** Thus, for NextSeq, the recommended deduplication flags are: ***
dedupe optical spany adjacent
Pairing/ordering parameters (for use with error-correction):
unpair=f For paired reads, clump all of them rather than just
read 1. Destroys pairing. Without this flag, for paired
reads, only read 1 will be error-corrected.
repair=f After clumping and error-correction, restore pairing.
If groups>1 this will sort by name which will destroy
clump ordering; with a single group, clumping will
be retained.
Error-correction parameters:
ecc=f Error-correct reads. Requires multiple passes for
complete correction.
ecco=f Error-correct paired reads via overlap before clumping.
passes=1 Use this many error-correction passes. 6 passes are
suggested.
consensus=f Output consensus sequence instead of clumps.
Advanced error-correction parameters:
mincc=4 (mincountcorrect) Do not correct to alleles occuring less
often than this.
minss=4 (minsizesplit) Do not split into new clumps smaller than
this.
minsfs=0.17 (minsizefractionsplit) Do not split on pivot alleles in
areas with local depth less than this fraction of clump size.
minsfc=0.20 (minsizefractioncorrect) Do not correct in areas with local
depth less than this.
minr=30.0 (minratio) Correct to the consensus if the ratio of the
consensus allele to second-most-common allele is >=minr,
for high depth. Actual ratio used is:
min(minr, minro+minorCount*minrm+quality*minrqm).
minro=1.9 (minratiooffset) Base ratio.
minrm=1.8 (minratiomult) Ratio multiplier for secondary allele count.
minrqm=0.08 (minratioqmult) Ratio multiplier for base quality.
minqr=2.8 (minqratio) Do not correct bases when cq*minqr>rqsum.
minaqr=0.70 (minaqratio) Do not correct bases when cq*minaqr>5+rqavg.
minid=0.97 (minidentity) Do not correct reads with identity to
consensus less than this.
maxqadjust=0 Adjust quality scores by at most maxqadjust per pass.
maxqi=-1 (maxqualityincorrect) Do not correct bases with quality
above this (if positive).
maxci=-1 (maxcountincorrect) Do not correct alleles with count
above this (if positive).
findcorrelations=t Look for correlated SNPs in clumps to split into alleles.
maxcorrelations=12 Maximum number of eligible SNPs per clump to consider for
correlations. Increasing this number can reduce false-
positive corrections at the possible expense of speed.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx2g"
z2="-Xms2g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
#Note that this uses slighly less (82%) of memory than normal to account for multiple pigz instances.
freeRam 2000m 82
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
clumpify() {
local CMD="java $EA $EOOM $z $z2 -cp $CP clump.Clumpify $@"
echo $CMD >&2
eval $CMD
}
clumpify "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/clumpify.sh | clumpify.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified December 19, 2019
Description: Analyzes sketch results from query, ref, ani format.
Usage: analyzesketchresults.sh in=<file> out=<outfile>
Parameters and their defaults:
in=<file> Required input file of Sketch results in 3column format.
in2=<file> Optional second input file of Sketch results in amino mode.
out=stdout.txt Output file for summary of per-tax-level averages.
outaccuracy=<file> Output file for accuracy results; requires query taxIDs and printcal.
outmap=<file> Output file for ANI vs AAI. Requires in2.
bbsketch Parse BBSketch output format (default).
mash Parse Mash output format. Files should be named like this:
tid_511145_Escherichia_coli_str._K-12_substr._MG1655.fa.gz
blast Parse Blast output format (TODO).
ow=f (overwrite) Overwrites files that already exist.
app=f (append) Append to files that already exist.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx300m"
z="-Xms300m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function analyzesketchresults() {
local CMD="java $EA $EOOM $z $z2 -cp $CP sketch.AnalyzeSketchResults $@"
echo $CMD >&2
eval $CMD
}
analyzesketchresults "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/analyzesketchresults.sh | analyzesketchresults.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified September 17, 2018
This script requires at least 16GB RAM.
It is designed for NERSC and uses hard-coded paths.
Description: Removes all reads that map to the human genome with at least 95% identity after quality trimming.
Removes approximately 98.6% of human 2x150bp reads, with zero false-positives to non-animals.
NOTE! This program uses hard-coded paths and will only run on Nersc systems unless you change the path.
Usage: removehuman.sh in=<input file> outu=<clean output file>
Input may be fasta or fastq, compressed or uncompressed.
Parameters:
threads=auto (t) Set number of threads to use; default is number of logical processors.
overwrite=t (ow) Set to false to force the program to abort rather than overwrite an existing file.
interleaved=auto (int) If true, forces fastq input to be paired and interleaved.
trim=t Trim read ends to remove bases with quality below minq.
Values: t (trim both ends), f (neither end), r (right end only), l (left end only).
untrim=t Undo the trimming after mapping.
minq=4 Trim quality threshold.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression level; lower compression is faster.
outm=<file> File to output the reads that mapped to human.
path= Set the path to an indexed human genome.
***** All BBMap parameters can be used; run bbmap.sh for more details. *****
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx15000m"
z2="-Xms15000m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function removehuman() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP align2.BBMap minratio=0.9 maxindel=3 bwr=0.16 bw=12 quickmatch fast minhits=2 path=/global/projectb/sandbox/gaag/bbtools/hg19 pigz unpigz zl=6 qtrim=r trimq=10 untrim idtag usemodulo printunmappedcount ztd=2 kfilter=25 maxsites=1 k=14 bloomfilter $@"
echo $CMD >&2
eval $CMD
}
removehuman "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/removehuman.sh | removehuman.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified July 31, 2018
Description: Stops producing reads when the unique kmer limit is reached.
This is approximate. If the input has been Clumpified, the order should be
randomized first with shuffle2.sh or sortbyname.sh with the flowcell flag.
Differences between versions:
kmerlimit.sh uses 1 pass and outputs all reads until a limit is hit,
meaning the input reads should be in random order with respect to sequence.
kmerlimit2.sh uses 2 passes and randomly subsamples from the file, so
it works with reads in any order.
Usage: kmerlimit.sh in=<input file> out=<output file> limit=<number>
Standard parameters:
in=<file> Primary input, or read 1 input.
in2=<file> Read 2 input if reads are in two files.
out=<file> Primary output, or read 1 output.
out2=<file> Read 2 output if reads are in two files.
overwrite=t (ow) Set to false to force the program to abort rather than
overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
k=31 Kmer length, 1-32.
limit= The number of unique kmers to produce.
mincount=1 Ignore kmers seen fewer than this many times.
minqual=0 Ignore bases with quality below this.
minprob=0.2 Ignore kmers with correctness probability below this.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1000m"
z2="-Xms1000m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
kmerlimit() {
local CMD="java $EA $EOOM $z -cp $CP sketch.KmerLimit $@"
echo $CMD >&2
eval $CMD
}
kmerlimit "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/kmerlimit.sh | kmerlimit.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified August 9, 2018
Description: Generates statistics about flowcell positions.
Usage: plotflowcell.sh in=<input> out=<output>
Input parameters:
in=<file> Primary input file.
in2=<file> Second input file for paired reads in two files.
indump=<file> Specify an already-made dump file to use instead of
analyzing the input reads.
reads=-1 Process this number of reads, then quit (-1 means all).
interleaved=auto Set true/false to override autodetection of the
input file as paired interleaved.
Output parameters:
out=<file> Output file for filtered reads.
dump=<file> Write a summary of quality information by coordinates.
Tile parameters:
xsize=500 Initial width of micro-tiles.
ysize=500 Initial height of micro-tiles.
size= Allows setting xsize and ysize tot he same value.
target=800 Iteratively increase the size of micro-tiles until they
contain an average of at least this number of reads.
Other parameters:
trimq=-1 If set to a positive number, trim reads to that quality
level instead of filtering them.
qtrim=r If trimq is positive, to quality trimming on this end
of the reads. Values are r, l, and rl for right,
left, and both ends.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 GB of RAM; -Xmx200m will specify
200 MB. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx8g"
z2="-Xms8g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
plotflowcell() {
local CMD="java $EA $EOOM $z $z2 -cp $CP hiseq.PlotFlowCell $@"
echo $CMD >&2
eval $CMD
}
plotflowcell "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/plotflowcell.sh | plotflowcell.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified August 1, 2017
Description: Runs stats.sh on multiple assemblies to produce one output line per file.
Usage: statswrapper.sh in=<input file>
Parameters:
in=<file> Specify the input fasta file, or stdin. For multiple files a, b, and c: 'statswrapper.sh in=a,b,c'.
'in=' may be omitted if this is the first arg, and asterisks may be used; e.g. statswrapper.sh *.fa
gc=<file> Writes ACGTN content per scaffold to a file.
gchist=<file> Filename to output scaffold gc content histogram.
gcbins=<200> Number of bins for gc histogram.
n=<10> Number of contiguous Ns to signify a break between contigs.
k=<13> Estimate memory usage of BBMap with this kmer length.
minscaf=<0> Ignore scaffolds shorter than this.
n_=<t> This flag will prefix the terms 'contigs' and 'scaffolds' with 'n_' in formats 3-6.
addname=<t> Adds a column for input file name, for formats 3-6.
format=<1 through 6> Format of the stats information. Default is format=3.
format=1 uses variable units like MB and KB, and is designed for compatibility with existing tools.
format=2 uses only whole numbers of bases, with no commas in numbers, and is designed for machine parsing.
format=3 outputs stats in 2 rows of tab-delimited columns: a header row and a data row.
format=4 is like 3 but with scaffold data only.
format=5 is like 3 but with contig data only.
format=6 is like 3 but the header starts with a #.
gcformat=<1 or 2> Select GC output format.
gcformat=1: name start stop A C G T N GC
gcformat=2: name GC
Note that in gcformat 1, A+C+G+T=1 even when N is nonzero.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx200m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
stats() {
local CMD="java $EA $EOOM $z -cp $CP jgi.AssemblyStatsWrapper format=3 $@"
echo $CMD >&2
eval $CMD
}
stats "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/statswrapper.sh | statswrapper.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified March 6, 2015
Description: Splits Nextera LMP libraries into subsets based on linker orientation:
LMP, fragment, unknown, and singleton.
Please read bbmap/docs/guides/SplitNexteraGuide.txt for more information.
Usage: splitnextera.sh in=<file> out=<file> outf=<file> outu=<file> outs=<file>
For pairs in two files, use in1, in2, out1, out2, etc.
*** Note ***
For maximal speed, before running splitnextera, the linkers can be replaced with a constant first.
In other words, you can either do this (which is slightly faster):
bbduk.sh in=reads.fq out=replaced.fq ktmask=J k=19 hdist=1 mink=11 hdist2=0 literal=CTGTCTCTTATACACATCTAGATGTGTATAAGAGACAG
splitnextera.sh in=replaced.fq out=longmate.fq outf=frag.fq outu=unknown.fq outs=singleton.fq
Or this:
splitnextera.sh in=reads.fq out=longmate.fq outf=frag.fq outu=unknown.fq outs=singleton.fq mask=t
I/O parameters:
in=<file> Input reads. Set to 'stdin.fq' to read from stdin.
out=<file> Output for pairs with LMP orientation.
outf=<file> Output for pairs with fragment orientation.
outu=<file> Pairs with unknown orientation.
outs=<file> Singleton output.
ow=f (overwrite) Overwrites files that already exist.
app=f (append) Append to files that already exist.
zl=4 (ziplevel) Set compression level, 1 (low) to 9 (max).
int=f (interleaved) Determines whether INPUT file is considered interleaved.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
qout=auto ASCII offset for output quality. May be 33 (Sanger), 64 (Illumina), or auto (same as input).
Processing Parameters:
mask=f Set to true if you did not already convert junctions to some symbol, and it will be done automatically.
junction=J Look for this symbol to designate the junction bases.
innerlmp=f Generate long mate pairs from the inner pair also, when the junction is found in both reads.
rename=t Rename read 2 of output when using single-ended input.
minlength=40 (ml) Do not output reads shorter than this.
merge=f Attempt to merge overlapping reads before looking for junctions.
testmerge=0.0 If nonzero, only merge reads if at least the fraction of input reads are mergable.
Sampling parameters:
reads=-1 Set to a positive number to only process this many INPUT reads (or pairs), then quit.
samplerate=1 Randomly output only this fraction of reads; 1 means sampling is disabled.
sampleseed=-1 Set to a positive number to use that prng seed for sampling (allowing deterministic sampling).
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx200m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function splitnextera() {
local CMD="java $EA $EOOM $z -cp $CP jgi.SplitNexteraLMP $@"
echo $CMD >&2
eval $CMD
}
splitnextera "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/splitnextera.sh | splitnextera.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified August 29, 2017
Description: Generates synthetic contaminated partial genomes from clean genomes.
Output is formatted as (prefix)_bases1_fname1_bases2_fname2_counter_(suffix).
Usage: makecontaminatedgenomes.sh in=<file> out=<pattern>
I/O parameters:
in=<file> A file containing one input file path per line.
out=<pattern> A file name containing a # symbol (or other regex).
The regex will be replaced by source filenames.
Processing Parameters:
count=1 Number of output files to make.
seed=-1 RNG seed; negative for a random seed.
exp1=1 Exponent for genome 1 size fraction.
exp2=1 Exponent for genome 2 size fraction.
subrate=0 Rate to add substitutions to new genomes (0-1).
indelrate=0 Rate to add substitutions to new genomes (0-1).
regex=# Use this substitution regex for replacement.
delimiter=_ Use this delimiter in the new file names.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 42
z="-Xmx${RAM}m"
}
calcXmx "$@"
makecontaminatedgenomes() {
local CMD="java $EA $EOOM $z -cp $CP jgi.MakeContaminatedGenomes $@"
echo $CMD >&2
eval $CMD
}
makecontaminatedgenomes "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/makecontaminatedgenomes.sh | makecontaminatedgenomes.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified July 31, 2019
Description: Does nothing. Should be fast.
This is a template for making wrappers for new tools.
Usage: a_sample_mt.sh in=<input file> out=<output file>
Input may be fasta or fastq, compressed or uncompressed.
Standard parameters:
in=<file> Primary input, or read 1 input.
in2=<file> Read 2 input if reads are in two files.
out=<file> Primary output, or read 1 output.
out2=<file> Read 2 output if reads are in two files.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
showspeed=t (ss) Set to 'f' to suppress display of processing speed.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
None yet!
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
a_sample_mt() {
local CMD="java $EA $EOOM $z -cp $CP templates.A_SampleMT $@"
echo $CMD >&2
eval $CMD
}
a_sample_mt "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/a_sample_mt.sh | a_sample_mt.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified January 21, 2020
Description: Generates synthetic PacBio reads to mimic the chimeric
inverted repeats from 'triangle reads', aka 'ice cream cones' -
reads missing one adapter.
Usage: icecreammaker.sh in=<file> out=<file> reads=100k minlen=500 maxlen=5000
Standard parameters:
in=<file> A reference genome fasta (optional).
out=<file> Synthetic read output.
idhist=<file> Identity histogram output.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Length parameters:
NOTE: "length" parameters dictate subread length (for normal reads).
"Movie" parameters dictate sequence length (with concatenated subreads).
minlen=500 (minlength) Minimum length of genomic sequence molecules.
maxlen=5000 (maxlength) Maximum length of genomic sequence molecules.
len= (length) Set minlen and maxlen to the same number.
minmovie=500 (minmov) Minimum length of movies.
maxmovie=40k (maxmov) Maximum length of movies.
movie= (mov) Set minmov and maxmov to the same number.
Ice cream parameters:
missingrate=0 (missing) Fraction of reads missing an adapter.
hiddenrate=0 (hidden) Fraction of adapters not detected.
bothends=f Allow missing or hiddden adapters on both ends.
Other parameters:
zmws (reads) Number of ZMWs to generate. There are actually
multiple subreads per zmw.
ccs=f Make CCS reads (one read per ZMW, full pass only).
You still need to specify the error rate.
gc=0.6 If a random genome is generated, use this GC fraction.
genomesize=10m If a random genome is generated, make it this big.
irrate=0.0 Add inverted repeats until this fraction of the genome
is inverted repeats.
irminlen=500 Minimum length of inverted repeats.
irmaxlen=5000 Maximum length of inverted repeats
irlen= Set minirlen and maxirlen to the same number.
miner=0.05 (minerrorrate) Minimum error rate.
maxer=0.28 (maxerrorrate) Maximum error rate.
er= (errorrate) Set minerrorrate and maxerrorrate.
NOTE: You can alternatively set minid, maxid, or id.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx2g"
z2="-Xms2g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 2000m 42
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
icecreammaker() {
local CMD="java $EA $EOOM $z -cp $CP icecream.IceCreamMaker $@"
echo $CMD >&2
eval $CMD
}
icecreammaker "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/icecreammaker.sh | icecreammaker.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified August 1, 2016
Description: Generates a kmer uniqueness histogram, binned by file position.
There are 3 columns for single reads, 6 columns for paired:
count number of reads or pairs processed
r1_first percent unique 1st kmer of read 1
r1_rand percent unique random kmer of read 1
r2_first percent unique 1st kmer of read 2
r2_rand percent unique random kmer of read 2
pair percent unique concatenated kmer from read 1 and 2
Please read bbmap/docs/guides/CalcUniquenessGuide.txt for more information.
Usage: bbcountunique.sh in=<input> out=<output>
Input parameters:
in2=null Second input file for paired reads
interleaved=auto Set true/false to override autodetection of the input file as paired interleaved.
samplerate=1 Set to below 1 to sample a fraction of input reads.
reads=-1 Only process this number of reads, then quit (-1 means all)
Output parameters:
out=<file> File for output stats
Processing parameters:
k=25 Kmer length (range 1-31).
interval=25000 Print one line to the histogram per this many reads.
cumulative=f Show cumulative numbers rather than per-interval numbers.
percent=t Show percentages of unique reads.
count=f Show raw counts of unique reads.
printlastbin=f (plb) Print a line for the final undersized bin.
minprob=0 Ignore kmers with a probability of correctness below this (based on q-scores).
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
bbcountunique() {
local CMD="java $EA $EOOM $z $z2 -cp $CP jgi.CalcUniqueness $@"
echo $CMD >&2
eval $CMD
}
bbcountunique "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbcountunique.sh | bbcountunique.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 5, 2020
Description: Aligns all to all to produce an identity matrix.
Usage: alltoall.sh in=<input file> out=<output file>
Input may be fasta or fastq, compressed or uncompressed.
Standard parameters:
in=<file> Input sequences.
out=<file> Output data.
t= Set the number of threads; default is logical processors.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
showspeed=t (ss) Set to 'f' to suppress display of processing speed.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
reads=-1 If positive, quit after this many sequences.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
alltoall() {
local CMD="java $EA $EOOM $z -cp $CP aligner.AllToAll $@"
echo $CMD >&2
eval $CMD
}
alltoall "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/alltoall.sh | alltoall.sh |
usage(){
echo "
BBMap
Written by Brian Bushnell, from Dec. 2010 - present
Last modified February 13, 2020
Description: Fast and accurate splice-aware read aligner.
Please read bbmap/docs/guides/BBMapGuide.txt for more information.
To index: bbmap.sh ref=<reference fasta>
To map: bbmap.sh in=<reads> out=<output sam>
To map without writing an index:
bbmap.sh ref=<reference fasta> in=<reads> out=<output sam> nodisk
in=stdin will accept reads from standard in, and out=stdout will write to
standard out, but file extensions are still needed to specify the format of the
input and output files e.g. in=stdin.fa.gz will read gzipped fasta from
standard in; out=stdout.sam.gz will write gzipped sam.
Indexing Parameters (required when building the index):
nodisk=f Set to true to build index in memory and write nothing
to disk except output.
ref=<file> Specify the reference sequence. Only do this ONCE,
when building the index (unless using 'nodisk').
build=1 If multiple references are indexed in the same directory,
each needs a unique numeric ID (unless using 'nodisk').
k=13 Kmer length, range 8-15. Longer is faster but uses
more memory. Shorter is more sensitive.
If indexing and mapping are done in two steps, K should
be specified each time.
path=<.> Specify the location to write the index, if you don't
want it in the current working directory.
usemodulo=f Throw away ~80% of kmers based on remainder modulo a
number (reduces RAM by 50% and sensitivity slightly).
Should be enabled both when building the index AND
when mapping.
rebuild=f Force a rebuild of the index (ref= should be set).
Input Parameters:
build=1 Designate index to use. Corresponds to the number
specified when building the index.
in=<file> Primary reads input; required parameter.
in2=<file> For paired reads in two files.
interleaved=auto True forces paired/interleaved input; false forces
single-ended mapping. If not specified, interleaved
status will be autodetected from read names.
fastareadlen=500 Break up FASTA reads longer than this. Max is 500 for
BBMap and 6000 for BBMapPacBio. Only works for FASTA
input (use 'maxlen' for FASTQ input). The default for
bbmap.sh is 500, and for mapPacBio.sh is 6000.
unpigz=f Spawn a pigz (parallel gzip) process for faster
decompression than using Java.
Requires pigz to be installed.
touppercase=t (tuc) Convert lowercase letters in reads to upper case
(otherwise they will not match the reference).
Sampling Parameters:
reads=-1 Set to a positive number N to only process the first N
reads (or pairs), then quit. -1 means use all reads.
samplerate=1 Set to a number from 0 to 1 to randomly select that
fraction of reads for mapping. 1 uses all reads.
skipreads=0 Set to a number N to skip the first N reads (or pairs),
then map the rest.
Mapping Parameters:
fast=f This flag is a macro which sets other paramters to run
faster, at reduced sensitivity. Bad for RNA-seq.
slow=f This flag is a macro which sets other paramters to run
slower, at greater sensitivity. 'vslow' is even slower.
maxindel=16000 Don't look for indels longer than this. Lower is faster.
Set to >=100k for RNAseq with long introns like mammals.
strictmaxindel=f When enabled, do not allow indels longer than 'maxindel'.
By default these are not sought, but may be found anyway.
tipsearch=100 Look this far for read-end deletions with anchors
shorter than K, using brute force.
minid=0.76 Approximate minimum alignment identity to look for.
Higher is faster and less sensitive.
minhits=1 Minimum number of seed hits required for candidate sites.
Higher is faster.
local=f Set to true to use local, rather than global, alignments.
This will soft-clip ugly ends of poor alignments.
perfectmode=f Allow only perfect mappings when set to true (very fast).
semiperfectmode=f Allow only perfect and semiperfect (perfect except for
N's in the reference) mappings.
threads=auto (t) Set to number of threads desired. By default, uses
all cores available.
ambiguous=best (ambig) Set behavior on ambiguously-mapped reads (with
multiple top-scoring mapping locations).
best (use the first best site)
toss (consider unmapped)
random (select one top-scoring site randomly)
all (retain all top-scoring sites)
samestrandpairs=f (ssp) Specify whether paired reads should map to the
same strand or opposite strands.
requirecorrectstrand=t (rcs) Forbid pairing of reads without correct strand
orientation. Set to false for long-mate-pair libraries.
killbadpairs=f (kbp) If a read pair is mapped with an inappropriate
insert size or orientation, the read with the lower
mapping quality is marked unmapped.
pairedonly=f (po) Treat unpaired reads as unmapped. Thus they will
be sent to 'outu' but not 'outm'.
rcomp=f Reverse complement both reads prior to mapping (for LMP
outward-facing libraries).
rcompmate=f Reverse complement read2 prior to mapping.
pairlen=32000 Set max allowed distance between paired reads.
(insert size)=(pairlen)+(read1 length)+(read2 length)
rescuedist=1200 Don't try to rescue paired reads if avg. insert size
greater than this. Lower is faster.
rescuemismatches=32 Maximum mismatches allowed in a rescued read. Lower
is faster.
averagepairdist=100 (apd) Initial average distance between paired reads.
Varies dynamically; does not need to be specified.
deterministic=f Run in deterministic mode. In this case it is good
to set averagepairdist. BBMap is deterministic
without this flag if using single-ended reads,
or run singlethreaded.
bandwidthratio=0 (bwr) If above zero, restrict alignment band to this
fraction of read length. Faster but less accurate.
bandwidth=0 (bw) Set the bandwidth directly.
fraction of read length. Faster but less accurate.
usejni=f (jni) Do alignments faster, in C code. Requires
compiling the C code; details are in /jni/README.txt.
maxsites2=800 Don't analyze (or print) more than this many alignments
per read.
ignorefrequentkmers=t (ifk) Discard low-information kmers that occur often.
excludefraction=0.03 (ef) Fraction of kmers to ignore. For example, 0.03
will ignore the most common 3% of kmers.
greedy=t Use a greedy algorithm to discard the least-useful
kmers on a per-read basis.
kfilter=0 If positive, potential mapping sites must have at
least this many consecutive exact matches.
Quality and Trimming Parameters:
qin=auto Set to 33 or 64 to specify input quality value ASCII
offset. 33 is Sanger, 64 is old Solexa.
qout=auto Set to 33 or 64 to specify output quality value ASCII
offset (only if output format is fastq).
qtrim=f Quality-trim ends before mapping. Options are:
'f' (false), 'l' (left), 'r' (right), and 'lr' (both).
untrim=f Undo trimming after mapping. Untrimmed bases will be
soft-clipped in cigar strings.
trimq=6 Trim regions with average quality below this
(phred algorithm).
mintrimlength=60 (mintl) Don't trim reads to be shorter than this.
fakefastaquality=-1 (ffq) Set to a positive number 1-50 to generate fake
quality strings for fasta input reads.
ignorebadquality=f (ibq) Keep going, rather than crashing, if a read has
out-of-range quality values.
usequality=t Use quality scores when determining which read kmers
to use as seeds.
minaveragequality=0 (maq) Do not map reads with average quality below this.
maqb=0 If positive, calculate maq from this many initial bases.
Output Parameters:
out=<file> Write all reads to this file.
outu=<file> Write only unmapped reads to this file. Does not
include unmapped paired reads with a mapped mate.
outm=<file> Write only mapped reads to this file. Includes
unmapped paired reads with a mapped mate.
mappedonly=f If true, treats 'out' like 'outm'.
bamscript=<file> (bs) Write a shell script to <file> that will turn
the sam output into a sorted, indexed bam file.
ordered=f Set to true to output reads in same order as input.
Slower and uses more memory.
overwrite=f (ow) Allow process to overwrite existing files.
secondary=f Print secondary alignments.
sssr=0.95 (secondarysitescoreratio) Print only secondary alignments
with score of at least this fraction of primary.
ssao=f (secondarysiteasambiguousonly) Only print secondary
alignments for ambiguously-mapped reads.
maxsites=5 Maximum number of total alignments to print per read.
Only relevant when secondary=t.
quickmatch=f Generate cigar strings more quickly.
trimreaddescriptions=f (trd) Truncate read and ref names at the first whitespace,
assuming that the remainder is a comment or description.
ziplevel=2 (zl) Compression level for zip or gzip output.
pigz=f Spawn a pigz (parallel gzip) process for faster
compression than Java. Requires pigz to be installed.
machineout=f Set to true to output statistics in machine-friendly
'key=value' format.
printunmappedcount=f Print the total number of unmapped reads and bases.
If input is paired, the number will be of pairs
for which both reads are unmapped.
showprogress=0 If positive, print a '.' every X reads.
showprogress2=0 If positive, print the number of seconds since the
last progress update (instead of a '.').
renamebyinsert=f Renames reads based on their mapped insert size.
Bloom-Filtering Parameters (bloomfilter.sh is the standalone version).
bloom=f Use a Bloom filter to ignore reads not sharing kmers
with the reference. This uses more memory, but speeds
mapping when most reads don't match the reference.
bloomhashes=2 Number of hash functions.
bloomminhits=3 Number of consecutive hits to be considered matched.
bloomk=31 Bloom filter kmer length.
bloomserial=t Use the serialized Bloom filter for greater loading
speed, if available. If not, generate and write one.
Post-Filtering Parameters:
idfilter=0 Independant of minid; sets exact minimum identity
allowed for alignments to be printed. Range 0 to 1.
subfilter=-1 Ban alignments with more than this many substitutions.
insfilter=-1 Ban alignments with more than this many insertions.
delfilter=-1 Ban alignments with more than this many deletions.
indelfilter=-1 Ban alignments with more than this many indels.
editfilter=-1 Ban alignments with more than this many edits.
inslenfilter=-1 Ban alignments with an insertion longer than this.
dellenfilter=-1 Ban alignments with a deletion longer than this.
nfilter=-1 Ban alignments with more than this many ns. This
includes nocall, noref, and off scaffold ends.
Sam flags and settings:
noheader=f Disable generation of header lines.
sam=1.4 Set to 1.4 to write Sam version 1.4 cigar strings,
with = and X, or 1.3 to use M.
saa=t (secondaryalignmentasterisks) Use asterisks instead of
bases for sam secondary alignments.
cigar=t Set to 'f' to skip generation of cigar strings (faster).
keepnames=f Keep original names of paired reads, rather than
ensuring both reads have the same name.
intronlen=999999999 Set to a lower number like 10 to change 'D' to 'N' in
cigar strings for deletions of at least that length.
rgid= Set readgroup ID. All other readgroup fields
can be set similarly, with the flag rgXX=
If you set a readgroup flag to the word 'filename',
e.g. rgid=filename, the input file name will be used.
mdtag=f Write MD tags.
nhtag=f Write NH tags.
xmtag=f Write XM tags (may only work correctly with ambig=all).
amtag=f Write AM tags.
nmtag=f Write NM tags.
xstag=f Set to 'xs=fs', 'xs=ss', or 'xs=us' to write XS tags
for RNAseq using firststrand, secondstrand, or
unstranded libraries. Needed by Cufflinks.
JGI mainly uses 'firststrand'.
stoptag=f Write a tag indicating read stop location, prefixed by YS:i:
lengthtag=f Write a tag indicating (query,ref) alignment lengths,
prefixed by YL:Z:
idtag=f Write a tag indicating percent identity, prefixed by YI:f:
inserttag=f Write a tag indicating insert size, prefixed by X8:Z:
scoretag=f Write a tag indicating BBMap's raw score, prefixed by YR:i:
timetag=f Write a tag indicating this read's mapping time, prefixed by X0:i:
boundstag=f Write a tag indicating whether either read in the pair
goes off the end of the reference, prefixed by XB:Z:
notags=f Turn off all optional tags.
Histogram and statistics output parameters:
scafstats=<file> Statistics on how many reads mapped to which scaffold.
refstats=<file> Statistics on how many reads mapped to which reference
file; only for BBSplit.
sortscafs=t Sort scaffolds or references by read count.
bhist=<file> Base composition histogram by position.
qhist=<file> Quality histogram by position.
aqhist=<file> Histogram of average read quality.
bqhist=<file> Quality histogram designed for box plots.
lhist=<file> Read length histogram.
ihist=<file> Write histogram of insert sizes (for paired reads).
ehist=<file> Errors-per-read histogram.
qahist=<file> Quality accuracy histogram of error rates versus
quality score.
indelhist=<file> Indel length histogram.
mhist=<file> Histogram of match, sub, del, and ins rates by
read location.
gchist=<file> Read GC content histogram.
gcbins=100 Number gchist bins. Set to 'auto' to use read length.
gcpairs=t Use average GC of paired reads.
idhist=<file> Histogram of read count versus percent identity.
idbins=100 Number idhist bins. Set to 'auto' to use read length.
statsfile=stderr Mapping statistics are printed here.
Coverage output parameters (these may reduce speed and use more RAM):
covstats=<file> Per-scaffold coverage info.
rpkm=<file> Per-scaffold RPKM/FPKM counts.
covhist=<file> Histogram of # occurrences of each depth level.
basecov=<file> Coverage per base location.
bincov=<file> Print binned coverage per location (one line per X bases).
covbinsize=1000 Set the binsize for binned coverage output.
nzo=t Only print scaffolds with nonzero coverage.
twocolumn=f Change to true to print only ID and Avg_fold instead of
all 6 columns to the 'out=' file.
32bit=f Set to true if you need per-base coverage over 64k.
strandedcov=f Track coverage for plus and minus strand independently.
startcov=f Only track start positions of reads.
secondarycov=t Include coverage of secondary alignments.
physcov=f Calculate physical coverage for paired reads.
This includes the unsequenced bases.
delcoverage=t (delcov) Count bases covered by deletions as covered.
True is faster than false.
covk=0 If positive, calculate kmer coverage statistics.
Java Parameters:
-Xmx This will set Java's memory usage,
overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx800m
will specify 800 megs. The max is typically 85% of
physical memory. The human genome requires around 24g,
or 12g with the 'usemodulo' flag. The index uses
roughly 6 bytes per reference base.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter
any problems, or post at: http://seqanswers.com/forums/showthread.php?t=41057
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
bbmap() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP align2.BBMap build=1 overwrite=true fastareadlen=500 $@"
echo $CMD >&2
eval $CMD
}
bbmap "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbmap.sh | bbmap.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 15, 2019
Description: Cuts out features defined by a gff file, and writes them
to a new fasta. Features are output in their sense strand.
Usage: cutgff.sh in=<fna file> gff=<gff file> out=<fna file>
in= is optional, and gff filenames will be automaitically assumed based on
the fasta name if not specified. This allows running on multiple files
like this:
cutgff.sh types=rRNA out=16S.fa minlen=1440 maxlen=1620 attributes=16S bacteria/*.fna.gz
File Parameters:
in=<file> Input FNA (fasta) file.
gff=<file> Input GFF file (optional).
out=<file> Output FNA file.
Other Parameters:
types=CDS Types of features to cut.
invert=false Invert selection: rather outputting the features,
mask them with Ns in the original sequences.
attributes= A comma-delimited list of strings. If present, one of
these strings must be in the gff line attributes.
bannedattributes= A comma-delimited list of banned strings.
banpartial=t Ignore lines with 'partial=true' in attributes.
minlen=1 Ignore lines shorter than this.
maxlen=2147483647 Ignore lines longer than this.
renamebytaxid=f Rename sequences with their taxID. Input sequences
must be named appropriately, e.g. in NCBI format.
taxmode=accession Valid modes are:
accession: Sequence names must start with an accession.
gi: Seqence names must start with gi|number
taxid: Sequence names must start with tid|number
header: Best effort for various header formats.
requirepresent=t Crash if a taxID cannot be found for a sequence.
oneperfile=f Only output one sequence per file.
align=f Align ribosomal sequences to consensus (if available);
discard those with low identity, and flip those
annotated on the wrong strand.
maxns=-1 If non-negative, ignore features with more than this many
undefined bases (Ns or IUPAC symbols).
maxnfraction=-1.0 If non-negative, ignore features with more than this
fraction of undefined bases (Ns or IUPAC symbols).
Should be 0.0 to 1.0.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx200m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
gff() {
local CMD="java $EA $EOOM $z -cp $CP gff.CutGff $@"
# echo $CMD >&2
eval $CMD
}
gff "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/cutgff.sh | cutgff.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 4, 2020
Description: Deduplicates mapped reads based on pair mapping coordinates.
Usage: dedupebymapping.sh in=<file> out=<file>
Parameters:
in=<file> The 'in=' flag is needed if the input file is not the
first parameter. 'in=stdin' will pipe from standard in.
out=<file> The 'out=' flag is needed if the output file is not the
second parameter. 'out=stdout' will pipe to standard out.
overwrite=t (ow) Set to false to force the program to abort rather
than overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change
compression level; lower compression is faster.
keepunmapped=t (ku) Keep unmapped reads. This refers to unmapped
single-ended reads or pairs with both unmapped.
keepsingletons=t (ks) Keep all pairs in which only one read mapped. If
false, duplicate singletons will be discarded.
ignorepairorder=f (ipo) If true, consider reverse-complementary pairs
as duplicates.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx3g"
z2="-Xms3g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
dedupebymapping() {
local CMD="java $EA $EOOM $z $z2 -cp $CP jgi.DedupeByMapping $@"
echo $CMD >&2
eval $CMD
}
dedupebymapping "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/dedupebymapping.sh | dedupebymapping.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 22, 2015
Description: Generates synthetic reads following an MDA-amplified single cell's coverage distribution.
Usage: synthmda.sh in=<reference> out=<reads out file>
Input may be fasta or fastq, compressed or uncompressed.
Parameters:
reads=12000000 Generate this many reads.
paired=t Generate paired reads.
length=150 Reads should be this long.
minlen=4000 Min length of MDA contig.
maxlen=150000 Max length of MDA contig.
cycles=9 Number of MDA cycles; higher is more spiky.
initialratio=1.3 Fraction of genome initially replicated;
lower is more spiky.
ratio=1.7 Fraction of genome replicated per cycle.
refout=null Write MDA'd genome to this file.
perfect=0 This fraction of reads will be error-free.
amp=200 'amp' flag sent to RandomReads (higher is more spiky).
build=7 Index MDA'd genome in this location.
prefix=null Generated reads will start with this prefix.
overwrite=t (ow) Set to false to force the program to abort rather
than overwrite an existing file.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 80
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
synthmda() {
local CMD="java $EA $EOOM $z -cp $CP jgi.SynthMDA $@"
echo $CMD >&2
eval $CMD
}
synthmda "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/synthmda.sh | synthmda.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified June 26, 2019
Description: RQCFilter2 is a revised version of RQCFilter that uses a common path for all dependencies.
The dependencies are available at http://portal.nersc.gov/dna/microbial/assembly/bushnell/RQCFilterData.tar
Performs quality-trimming, artifact removal, linker-trimming, adapter trimming, and spike-in removal using BBDuk.
Performs human/cat/dog/mouse/microbe removal using BBMap.
It requires 40 GB RAM for mousecatdoghuman, but only 1GB or so without them.
Usage: rqcfilter2.sh in=<input file> path=<output directory> rqcfilterdata=<path to RQCFilterData directory>
Primary I/O parameters:
in=<file> Input reads.
in2=<file> Use this if 2nd read of pairs are in a different file.
path=null Set to the directory to use for all output files.
Reference file paths:
rqcfilterdata= Path to unzipped RQCFilterData directory. Default is /global/projectb/sandbox/gaag/bbtools/RQCFilterData
ref=<file,file> Comma-delimited list of additional reference files for filtering via BBDuk.
Output parameters:
scafstats=scaffoldStats.txt Scaffold stats file name (how many reads matched which reference scaffold) .
kmerstats=kmerStats.txt Kmer stats file name (duk-like output).
log=status.log Progress log file name.
filelist=file-list.txt List of output files.
stats=filterStats.txt Overall stats file name.
stats2=filterStats2.txt Better overall stats file name.
ihist=ihist_merge.txt Insert size histogram name. Set to null to skip merging.
outribo=ribo.fq.gz Output for ribosomal reads, if removeribo=t.
reproduceName=reproduce.sh Name of shellscript to reproduce these results.
usetmpdir=t Write temp files to TMPDIR.
tmpdir= Override TMPDIR.
Adapter trimming parameters:
trimhdist=1 Hamming distance used for trimming.
trimhdist2= Hamming distance used for trimming with short kmers. If unset, trimhdist will be used.
trimk=23 Kmer length for trimming stage.
mink=11 Minimum kmer length for short kmers when trimming.
trimfragadapter=t Trim all known Illumina adapter sequences, including TruSeq and Nextera.
trimrnaadapter=f Trim Illumina TruSeq-RNA adapters.
bisulfite=f Currently, this trims the last 1bp from all reads after the adapter-trimming phase.
findadapters=t For paired-end files, attempt to discover the adapter sequence with BBMerge and use that rather than a set of known adapters.
swift=f Trim Swift sequences: Trailing C/T/N R1, leading G/A/N R2.
Quality trimming parameters:
qtrim=f Trim read ends to remove bases with quality below minq. Performed AFTER looking for kmers.
Values: rl (trim both ends), f (neither end), r (right end only), l (left end only).
trimq=10 Trim quality threshold. Must also set qtrim for direction.
minlength=45 (ml) Reads shorter than this after trimming will be discarded. Pairs will be discarded only if both are shorter.
mlf=0.333 (minlengthfraction) Reads shorter than this fraction of original length after trimming will be discarded.
minavgquality=5 (maq) Reads with average quality (before trimming) below this will be discarded.
maxns=0 Reads with more Ns than this will be discarded.
forcetrimmod=5 (ftm) If positive, right-trim length to be equal to zero, modulo this number.
forcetrimleft=-1 (ftl) If positive, trim bases to the left of this position
(exclusive, 0-based).
forcetrimright=-1 (ftr) If positive, trim bases to the right of this position
(exclusive, 0-based).
forcetrimright2=-1 (ftr2) If positive, trim this many bases on the right end.
Mapping parameters (for vertebrate contaminants):
mapk=14 Kmer length for mapping stage (9-15; longer is faster).
removehuman=f (human) Remove human reads via mapping.
keephuman=f Keep reads that map to human (or cat, dog, mouse) rather than removing them.
removedog=f (dog) Remove dog reads via mapping.
removecat=f (cat) Remove cat reads via mapping.
removemouse=f (mouse) Remove mouse reads via mapping.
aggressivehuman=f Aggressively remove human reads (and cat/dog/mouse) using unmasked references.
aggressivemicrobe=f Aggressively microbial contaminant reads using unmasked references.
aggressive=f Set both aggressivehuman and aggressivemicrobe at once.
mapref= Remove contaminants by mapping to this fasta file (or comma-delimited list).
Bloom filter parameters (for vertebrate mapping):
bloom=t Use a Bloom filter to accelerate mapping.
bloomminreads=4m Disable Bloom filter if there are fewer than this many reads.
bloomk=29 Kmer length for Bloom filter
bloomhashes=1 Number of hashes for the Bloom filter.
bloomminhits=6 Minimum consecutive hits to consider a read as matching.
bloomserial=t Use the serialized Bloom filter for greater loading speed.
This will use the default Bloom filter parameters.
Microbial contaminant removal parameters:
detectmicrobes=f Detect common microbes, but don't remove them. Use this OR removemicrobes, not both.
removemicrobes=f (microbes) Remove common contaminant microbial reads via mapping, and place them in a separate file.
taxlist= (tax) Remove these taxa from the database before filtering. Typically, this would be the organism name or NCBI ID, or a comma-delimited list. Organism names should have underscores instead of spaces, such as Escherichia_coli.
taxlevel=order (level) Level to remove. For example, 'phylum' would remove everything in the same phylum as entries in the taxlist.
taxtree=auto (tree) Override location of the TaxTree file.
gitable=auto Override location of the gitable file.
loadgitable=f Controls whether gi numbers may be used for taxonomy.
microberef= Path to fasta file of microbes.
microbebuild=1 Chooses which masking was used. 1 is most stringent and should be used for bacteria. Eukaryotes should use 3.
Extended microbial contaminant parameters:
detectmicrobes2=f (detectothermicrobes) Detect an extended set of microbes that are currently being screened. This can be used in conjunction with removemicrobes.
Filtering parameters (for artificial and genomic contaminants):
skipfilter=f Skip this phase. Not recommended.
filterpolya=f Remove reads containing poly-A sequence (for RNA-seq).
filterpolyg=0 Remove reads that start with a G polymer at least this long (0 disables).
trimpolyg=0 Trim reads that start or end with a G polymer at least this long (0 disables).
phix=t Remove reads containing phiX kmers.
lambda=f Remove reads containing Lambda phage kmers.
pjet=t Remove reads containing PJET kmers.
maskmiddle=t (mm) Treat the middle base of a kmer as a wildcard, to increase sensitivity in the presence of errors.
maxbadkmers=0 (mbk) Reads with more than this many contaminant kmers will be discarded.
filterhdist=1 Hamming distance used for filtering.
filterqhdist=1 Query hamming distance used for filtering.
copyundefined=f (cu) Match all possible bases for sequences containing degerate IUPAC symbols.
entropy=f Remove low-complexity reads. The threshold can be specified by e.g entropy=0.4; default is 0.42 if enabled.
entropyk=2 Kmer length to use for entropy calculation.
entropywindow=40 Window size to use for entropy calculation.
Spikein removal/quantification parameters:
mtst=f Remove mtst.
kapa=t Remove and quantify kapa.
spikeink=31 Kmer length for spikein removal.
spikeinhdist=0 Hamming distance for spikein removal.
spikeinref= Additional references for spikein removal (comma-delimited list).
Ribosomal filtering parameters:
ribohdist=1 Hamming distance used for rRNA removal.
riboedist=0 Edit distance used for rRNA removal.
removeribo=f (ribo) Remove ribosomal reads via kmer-matching, and place them in a separate file.
Organelle filtering parameters:
chloromap=f Remove chloroplast reads by mapping to this organism's chloroplast.
mitomap=f Remove mitochondrial reads by mapping to this organism's mitochondria.
ribomap=f Remove ribosomal reads by mapping to this organism's ribosomes.
NOTE: organism TaxID should be specified in taxlist, and taxlevel should be set to genus or species.
FilterByTile parameters:
filterbytile=f Run FilterByTile to remove reads from low-quality parts of the flowcell.
Clumpify parameters:
clumpify=f Run clumpify; all deduplication flags require this.
dedupe=f Remove duplicate reads; all deduplication flags require this.
opticaldupes=f Remove optical duplicates (Clumpify optical flag).
edgedupes=f Remove tile-edge duplicates (Clumpify spany and adjacent flags).
dpasses=1 Use this many deduplication passes.
dsubs=2 Allow this many substitutions between duplicates.
ddist=40 Remove optical/edge duplicates within this distance.
lowcomplexity=f Set to true for low-complexity libraries such as RNA-seq to improve estimation of memory requirements.
clumpifytmpdir=f Use TMPDIR for clumpify temp files.
clumpifygroups=-1 If positive, force Clumpify to use this many groups.
*** For NextSeq, the recommended deduplication flags are: clumpify dedupe edgedupes
*** For NovaSeq, the recommended deduplication flags are: clumpify dedupe opticaldupes ddist=12000
*** For HiSeq, the recommended deduplication flags are: clumpify dedupe opticaldupes
Sketch parameters:
sketch=t Run SendSketch on 2M read pairs.
silvalocal=t Use the local flag for Silva (requires running RQCFilter on NERSC).
sketchreads=1m Number of read pairs to sketch.
sketchsamplerate=1 Samplerate for SendSketch.
sketchminprob=0.2 Minprob for SendSketch.
sketchdb=nt,refseq,silva Servers to use for SendSketch.
Other processing parameters:
threads=auto (t) Set number of threads to use; default is number of logical processors.
library=frag Set to 'frag', 'clip', 'lfpe', or 'clrs'.
filterk=31 Kmer length for filtering stage.
rcomp=t Look for reverse-complements of kmers in addition to forward kmers.
nexteralmp=f Split into different files based on Nextera LMP junction sequence. Only for Nextera LMP, not normal Nextera.
extend=f Extend reads during merging to allow insert size estimation of non-overlapping reads.
monitor=f Kill this process if it crashes. monitor=600,0.01 would kill after 600 seconds under 1% usage.
pigz=t Use pigz for compression.
unpigz=t Use pigz for decompression.
khist=f Set to true to generate a kmer-frequency histogram of the output data.
merge=t Set to false to skip generation of insert size histogram.
Header-specific parameters: (NOTE - Be sure to disable these if the reads have improper headers, like SRA data.)
chastityfilter=t Remove reads failing chastity filter.
barcodefilter=crash Crash when improper barcodes are discovered. Set to 'f' to disable or 't' to just remove improper barcodes.
barcodes= A comma-delimited list of barcodes or files of barcodes.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
***** All additional parameters supported by BBDuk may also be used, and will be passed directly to BBDuk *****
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx40g"
z2="-Xms40g"
set=0
export TZ="America/Los_Angeles"
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 39200m 84
if [[ $NSLOTS == 8 ]]; then
RAM=39200
fi
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
rqcfilter() {
if [[ $SHIFTER_RUNTIME == 1 ]]; then
#Ignore NERSC_HOST
shifter=1
elif [[ $NERSC_HOST == genepool ]]; then
module unload oracle-jdk
module load oracle-jdk/1.8_144_64bit
module load pigz
export TZ="America/Los_Angeles"
elif [[ $NERSC_HOST == denovo ]]; then
module unload java
module load java/1.8.0_144
module load pigz
export TZ="America/Los_Angeles"
elif [[ $NERSC_HOST == cori ]]; then
module use /global/common/software/m342/nersc-builds/denovo/Modules/jgi
module use /global/common/software/m342/nersc-builds/denovo/Modules/usg
module unload java
module load java/1.8.0_144
module load pigz
fi
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP jgi.RQCFilter2 jni=t $@"
echo $CMD >&2
eval $CMD
}
rqcfilter "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/rqcfilter2.sh | rqcfilter2.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 17, 2020
Description: Calls variants from sam or bam input.
In default mode, all input files are combined and treated as a single sample.
In multisample mode, each file is treated as an individual sample,
and gets its own column in the VCF file. Unless overridden, input file
names are used as sample names.
Please read bbmap/docs/guides/CallVariantsGuide.txt for more information,
or bbmap/pipelines/variantPipeline.sh for a usage example.
Usage: callvariants.sh in=<file,file,...> ref=<file> vcf=<file>
Input may be sorted or unsorted.
The reference should be fasta.
I/O parameters:
in=<file> Input; may be one file or multiple comma-delimited files.
list=<file> Optional text file containing one input file per line.
Use list or in, but not both.
out=<file> Output variant list in var format. If the name ends
with .vcf then it will be vcf format.
vcf=<file> Output variant list in vcf format.
outgff=<file> Output variant list in gff format.
ref=<file> Reference fasta. Required to display ref alleles.
Variant calling wil be more accurate with the reference.
vcfin=<file> Force calls at these locations, even if allele count is 0.
shist=<file> (scorehist) Output for variant score histogram.
zhist=<file> (zygosityhist) Output for zygosity histogram.
qhist=<file> (qualityhist) Output for variant base quality histogram.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
extended=t Print additional variant statistics columns.
sample= Optional comma-delimited list of sample names.
multisample=f (multi) Set to true if there are multiple sam/bam files,
and each should be tracked as an individual sample.
vcf0= Optional comma-delimited list of per-sample outputs.
Only used in multisample mode.
bgzip=t Use bgzip for gzip compression.
samstreamer=t (ss) Load reads multithreaded to increase speed.
Disable to reduce the number of threads used. The number of
streamer threads can be set with e.g. 'ss=4'; default is 6.
streamermf=8 (ssmf) Allow multiple sam files to be read simultaneously.
Set ssmf=X to specify the maximum number or ssmf=f
to disable.
Processing Parameters:
prefilter=f Use a Bloom filter to exclude variants seen fewer than
minreads times. Doubles the runtime but greatly reduces
memory usage. The results are identical.
coverage=t (cc) Calculate coverage, to better call variants.
ploidy=1 Set the organism's ploidy.
rarity=1.0 Penalize the quality of variants with allele fraction
lower than this. For example, if you are interested in
4% frequency variants, you could set both rarity and
minallelefraction to 0.04. This is affected by ploidy -
a variant with frequency indicating at least one copy
is never penalized.
covpenalty=0.8 (lowcoveragepenalty) A lower penalty will increase the
scores of low-coverage variants, and is useful for
low-coverage datasets.
useidentity=t Include average read identity in score calculation.
usepairing=t Include pairing rate in score calculation.
usebias=t Include strand bias in score calculation.
useedist=t Include read-end distance in score calculation.
homopolymer=t Penalize scores of substitutions matching adjacent bases.
nscan=t Consider the distance of a variant from contig ends when
calculating strand bias.
callsub=t Call substitutions.
calldel=t Call deletions.
callins=t Call insertions.
calljunct=f Call junctions (in development).
nopassdot=f Use . as genotype for variations failing the filter.
Coverage Parameters (these mainly affect speed and memory use):
32bit=f Set to true to allow coverage tracking over depth 65535,
which increases memory use. Variant calls are impacted
where coverage exceeds the maximum.
atomic=auto Increases multithreaded speed; forces 32bit to true.
Defaults to true if there are more than 8 threads.
strandedcov=f (stranded) Tracks per-strand ref coverage to print the MCOV
and DP4 fields. Requires more memory when enabled. Strand
of variant reads is tracked regardless of this flag.
Trimming parameters:
border=5 Trim at least this many bases on both ends of reads.
qtrim=r Quality-trim reads on this end
r: right, l: left, rl: both, f: don't quality-trim.
trimq=10 Quality-trim bases below this score.
Realignment parameters:
realign=f Realign all reads with more than a couple mismatches.
Decreases speed. Recommended for aligners other than BBMap.
unclip=f Convert clip symbols from exceeding the ends of the
realignment zone into matches and substitutitions.
repadding=70 Pad alignment by this much on each end. Typically,
longer is more accurate for long indels, but greatly
reduces speed.
rerows=602 Use this many rows maximum for realignment. Reads longer
than this cannot be realigned.
recols=2000 Reads may not be aligned to reference seqments longer
than this. Needs to be at least read length plus
max deletion length plus twice padding.
msa= Select the aligner. Options:
MultiStateAligner11ts: Default.
MultiStateAligner9PacBio: Use for PacBio reads, or for
Illumina reads mapped to PacBio/Nanopore reads.
Sam-filtering parameters:
minpos= Ignore alignments not overlapping this range.
maxpos= Ignore alignments not overlapping this range.
minreadmapq=4 Ignore alignments with lower mapq.
contigs= Comma-delimited list of contig names to include. These
should have no spaces, or underscores instead of spaces.
secondary=f Include secondary alignments.
supplimentary=f Include supplimentary alignments.
duplicate=f Include reads flagged as duplicates.
invert=f Invert sam filters.
Variant-Calling Cutoffs:
minreads=2 (minad) Ignore variants seen in fewer reads.
maxreads=BIG (maxad) Ignore variants seen in more reads.
mincov=0 Ignore variants in lower-coverage locations.
maxcov=BIG Ignore variants in higher-coverage locations.
minqualitymax=15 Ignore variants with lower max base quality.
minedistmax=20 Ignore variants with lower max distance from read ends.
minmapqmax=0 Ignore variants with lower max mapq.
minidmax=0 Ignore variants with lower max read identity.
minpairingrate=0.1 Ignore variants with lower pairing rate.
minstrandratio=0.1 Ignore variants with lower plus/minus strand ratio.
minquality=12.0 Ignore variants with lower average base quality.
minedist=10.0 Ignore variants with lower average distance from ends.
minavgmapq=0.0 Ignore variants with lower average mapq.
minallelefraction=0.1 Ignore variants with lower allele fraction. This
should be adjusted for high ploidies.
minid=0 Ignore variants with lower average read identity.
minscore=20.0 Ignore variants with lower Phred-scaled score.
clearfilters Clear all filters. Filter flags placed after
the clearfilters flag will still be applied.
There are additionally max filters for score, quality, mapq, allelefraction,
and identity.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
callvariants() {
local CMD="java $EA $EOOM $z $z2 -cp $CP var2.CallVariants $@"
echo $CMD >&2
eval $CMD
}
callvariants "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/callvariants.sh | callvariants.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 17, 2015
Description: Translates nucleotide sequences to all 6 amino acid frames,
or amino acids to a canonical nucleotide representation.
Input may be fasta or fastq, compressed or uncompressed.
Usage: translate6frames.sh in=<input file> out=<output file>
See also: callgenes.sh
Optional parameters (and their defaults)
Input parameters:
in=<file> Main input. in=stdin.fa will pipe from stdin.
in2=<file> Input for 2nd read of pairs in a different file.
int=auto (interleaved) t/f overrides interleaved autodetection.
qin=auto Input quality offset: 33 (Sanger), 64, or auto.
aain=f False if input is nucleotides, true for amino acids.
reads=-1 If positive, quit after processing X reads or pairs.
Output parameters:
out=<file> Write output here. 'out=stdout.fa' goes to standard out.
out2=<file> Use this to write 2nd read of pairs to a different file.
overwrite=t (ow) Grant permission to overwrite files.
append=f Append to existing files.
ziplevel=2 (zl) Compression level; 1 (min) through 9 (max).
fastawrap=80 Length of lines in fasta output.
qout=auto Output quality offset: 33 (Sanger), 64, or auto.
aaout=t False to output nucleotides, true for amino acids.
tag=t Tag read id with the frame, adding e.g. ' fr1'
frames=6 Only print this many frames.
If you already know the sense, set 'frames=3'.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx2g"
z2="-Xms2g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 2000m 42
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
translate6frames() {
local CMD="java $EA $EOOM $z -cp $CP jgi.TranslateSixFrames $@"
echo $CMD >&2
eval $CMD
}
translate6frames "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/translate6frames.sh | translate6frames.sh |
usage(){
echo "
Written by Brian Bushnell and Jonathan Rood
Last modified June 26, 2019
Description: Merges paired reads into single reads by overlap detection.
With sufficient coverage, can merge nonoverlapping reads by kmer extension.
Kmer modes (Tadpole or Bloom Filter) require much more memory, and should
be used with the bbmerge-auto.sh script rather than bbmerge.sh.
Please read bbmap/docs/guides/BBMergeGuide.txt for more information.
Usage for interleaved files: bbmerge.sh in=<reads> out=<merged reads> outu=<unmerged reads>
Usage for paired files: bbmerge.sh in1=<read1> in2=<read2> out=<merged reads> outu1=<unmerged1> outu2=<unmerged2>
Input may be stdin or a file, fasta or fastq, raw or gzipped.
Input parameters:
in=null Primary input. 'in2' will specify a second file.
interleaved=auto May be set to true or false to override autodetection of
whether the input file as interleaved.
reads=-1 Quit after this many read pairs (-1 means all).
Output parameters:
out=<file> File for merged reads. 'out2' will specify a second file.
outu=<file> File for unmerged reads. 'outu2' will specify a second file.
outinsert=<file> (outi) File to write read names and insert sizes.
outadapter=<file> (outa) File to write consensus adapter sequences.
outc=<file> File to write input read kmer cardinality estimate.
ihist=<file> (hist) Insert length histogram output file.
nzo=t Only print histogram bins with nonzero values.
showhiststats=t Print extra header lines with statistical information.
ziplevel=2 Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
ordered=f Output reads in same order as input.
mix=f Output both the merged (or mergable) and unmerged reads
in the same file (out=). Useful for ecco mode.
Trimming/Filtering parameters:
qtrim=f Trim read ends to remove bases with quality below minq.
Trims BEFORE merging.
Values: t (trim both ends),
f (neither end),
r (right end only),
l (left end only).
qtrim2=f May be specified instead of qtrim to perform trimming
only if merging is unsuccessful, then retry merging.
trimq=10 Trim quality threshold. This may be a comma-delimited
list (ascending) to try multiple values.
minlength=1 (ml) Reads shorter than this after trimming, but before
merging, will be discarded. Pairs will be discarded only
if both are shorter.
maxlength=-1 Reads with longer insert sizes will be discarded.
tbo=f (trimbyoverlap) Trim overlapping reads to remove
rightmost (3') non-overlapping portion, instead of joining.
minavgquality=0 (maq) Reads with average quality below this, after
trimming, will not be attempted to be merged.
maxexpectederrors=0 (mee) If positive, reads with more combined expected
errors than this will not be attempted to be merged.
forcetrimleft=0 (ftl) If nonzero, trim left bases of the read to
this position (exclusive, 0-based).
forcetrimright=0 (ftr) If nonzero, trim right bases of the read
after this position (exclusive, 0-based).
forcetrimright2=0 (ftr2) If positive, trim this many bases on the right end.
forcetrimmod=5 (ftm) If positive, trim length to be equal to
zero modulo this number.
ooi=f Output only incorrectly merged reads, for testing.
trimpolya=t Trim trailing poly-A tail from adapter output. Only
affects outadapter. This also trims poly-A followed
by poly-G, which occurs on NextSeq.
Processing Parameters:
usejni=f (jni) Do overlapping in C code, which is faster. Requires
compiling the C code; details are in /jni/README.txt.
However, the jni path is currently disabled.
merge=t Create merged reads. If set to false, you can still
generate an insert histogram.
ecco=f Error-correct the overlapping part, but don't merge.
trimnonoverlapping=f (tno) Trim all non-overlapping portions, leaving only
consensus sequence. By default, only sequence to the
right of the overlap (adapter sequence) is trimmed.
useoverlap=t Attempt find the insert size using read overlap.
mininsert=35 Minimum insert size to merge reads.
mininsert0=35 Insert sizes less than this will not be considered.
Must be less than or equal to mininsert.
minoverlap=12 Minimum number of overlapping bases to allow merging.
minoverlap0=8 Overlaps shorter than this will not be considered.
Must be less than or equal to minoverlap.
minq=9 Ignore bases with quality below this.
maxq=41 Cap output quality scores at this.
entropy=t Increase the minimum overlap requirement for low-
complexity reads.
efilter=6 Ban overlaps with over this many times the expected
number of errors. Lower is more strict. -1 disables.
pfilter=0.00004 Ban improbable overlaps. Higher is more strict. 0 will
disable the filter; 1 will allow only perfect overlaps.
kfilter=0 Ban overlaps that create kmers with count below
this value (0 disables). If this is used minprob should
probably be set to 0. Requires good coverage.
ouq=f Calculate best overlap using quality values.
owq=t Calculate best overlap without using quality values.
usequality=t If disabled, quality values are completely ignored,
both for overlap detection and filtering. May be useful
for data with inaccurate quality values.
iupacton=f (itn) Change ambiguous IUPAC symbols to N.
adapter= Specify the adapter sequences used for these reads, if
known; this can be a fasta file or a literal sequence.
Read 1 and 2 can have adapters specified independently
with the adapter1 and adapter2 flags. adapter=default
will use a list of common adapter sequences.
Ratio Mode:
ratiomode=t Score overlaps based on the ratio of matching to
mismatching bases.
maxratio=0.09 Max error rate; higher increases merge rate.
ratiomargin=5.5 Lower increases merge rate; min is 1.
ratiooffset=0.55 Lower increases merge rate; min is 0.
maxmismatches=20 Maximum mismatches allowed in overlapping region.
ratiominoverlapreduction=3 This is the difference between minoverlap in
flat mode and minoverlap in ratio mode; generally,
minoverlap should be lower in ratio mode.
minsecondratio=0.1 Cutoff for second-best overlap ratio.
forcemerge=f Disable all filters and just merge everything
(not recommended).
Flat Mode:
flatmode=f Score overlaps based on the total number of mismatching
bases only.
margin=2 The best overlap must have at least 'margin' fewer
mismatches than the second best.
mismatches=3 Do not allow more than this many mismatches.
requireratiomatch=f (rrm) Require the answer from flat mode and ratio mode
to agree, reducing false positives if both are enabled.
trimonfailure=t (tof) If detecting insert size by overlap fails,
the reads will be trimmed and this will be re-attempted.
*** Ratio Mode and Flat Mode may be used alone or simultaneously. ***
*** Ratio Mode is usually more accurate and is the default mode. ***
Strictness (these are mutually exclusive macros that set other parameters):
strict=f Decrease false positive rate and merging rate.
verystrict=f (vstrict) Greatly decrease FP and merging rate.
ultrastrict=f (ustrict) Decrease FP and merging rate even more.
maxstrict=f (xstrict) Maximally decrease FP and merging rate.
loose=f Increase false positive rate and merging rate.
veryloose=f (vloose) Greatly increase FP and merging rate.
ultraloose=f (uloose) Increase FP and merging rate even more.
maxloose=f (xloose) Maximally decrease FP and merging rate.
fast=f Fastest possible mode; less accurate.
Tadpole Parameters (for read extension and error-correction):
*Note: These require more memory and should be run with bbmerge-auto.sh.*
k=31 Kmer length. 31 (or less) is fastest and uses the least
memory, but higher values may be more accurate.
60 tends to work well for 150bp reads.
extend=0 Extend reads to the right this much before merging.
Requires sufficient (>5x) kmer coverage.
extend2=0 Extend reads this much only after a failed merge attempt,
or in rem/rsem mode.
iterations=1 (ei) Iteratively attempt to extend by extend2 distance
and merge up to this many times.
rem=f (requireextensionmatch) Do not merge if the predicted
insert size differs before and after extension.
However, if only the extended reads overlap, then that
insert will be used. Requires setting extend2.
rsem=f (requirestrictextensionmatch) Similar to rem but stricter.
Reads will only merge if the predicted insert size before
and after extension match. Requires setting extend2.
Enables the lowest possible false-positive rate.
ecctadpole=f (ecct) If reads fail to merge, error-correct with Tadpole
and try again. This happens prior to extend2.
reassemble=t If ecct is enabled, use Tadpole's reassemble mode for
error correction. Alternatives are pincer and tail.
removedeadends (shave) Remove kmers leading to dead ends.
removebubbles (rinse) Remove kmers in error bubbles.
mindepthseed=3 (mds) Minimum kmer depth to begin extension.
mindepthextend=2 (mde) Minimum kmer depth continue extension.
branchmult1=20 Min ratio of 1st to 2nd-greatest path depth at high depth.
branchmult2=3 Min ratio of 1st to 2nd-greatest path depth at low depth.
branchlower=3 Max value of 2nd-greatest path depth to be considered low.
ibb=t Ignore backward branches when extending.
extra=<file> A file or comma-delimited list of files of reads to use
for kmer counting, but not for merging or output.
prealloc=f Pre-allocate memory rather than dynamically growing;
faster and more memory-efficient for large datasets.
A float fraction (0-1) may be specified, default 1.
prefilter=0 If set to a positive integer, use a countmin sketch to
ignore kmers with depth of that value or lower, to
reduce memory usage.
filtermem=0 Allows manually specifying prefilter memory in bytes, for
deterministic runs. 0 will set it automatically.
minprob=0.5 Ignore kmers with overall probability of correctness
below this, to reduce memory usage.
minapproxoverlap=26 For rem mode, do not merge reads if the extended reads
indicate that the raw reads should have overlapped by
at least this much, but no overlap was found.
Bloom Filter Parameters (for kmer operations with less memory than Tadpole)
*Note: These require more memory and should be run with bbmerge-auto.sh.*
eccbloom=f (eccb) If reads fail to merge, error-correct with bbcms
and try again.
testmerge=f Test kmer counts around the read merge junctions. If
it appears that the merge created new errors, undo it.
This reduces the false-positive rate, but not as much as
rem or rsem.
Java Parameters:
-Xmx This will set Java's memory usage,
overriding autodetection.
For example, -Xmx400m will specify 400 MB RAM.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
#JNI=""
z="-Xmx1000m"
z2="-Xms1000m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function merge() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP jgi.BBMerge $@"
echo $CMD >&2
eval $CMD
}
merge "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbmerge.sh | bbmerge.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 1, 2020
Description: Renames reads to <prefix>_<number> where you specify the prefix
and the numbers are ordered. There are other renaming modes too.
If reads are paired, pairs should be processed together; if reads are
interleaved, the interleaved flag should be set. This ensures that if a
read number (such as 1: or 2:) is added, it will be added correctly.
Usage: rename.sh in=<file> in2=<file2> out=<outfile> out2=<outfile2> prefix=<>
in2 and out2 are for paired reads and are optional.
If input is paired and there is only one output file, it will be written interleaved.
Parameters:
prefix= The string to prepend to existing read names.
ow=f (overwrite) Overwrites files that already exist.
zl=4 (ziplevel) Set compression level, 1 (low) to 9 (max).
int=f (interleaved) Determines whether INPUT file is considered interleaved.
fastawrap=70 Length of lines in fasta output.
minscaf=1 Ignore fasta reads shorter than this.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
qout=auto ASCII offset for output quality. May be 33 (Sanger), 64 (Illumina), or auto (same as input).
ignorebadquality=f (ibq) Fix out-of-range quality values instead of crashing with a warning.
Renaming modes (if not default):
renamebyinsert=f Rename the read to indicate its correct insert size.
renamebymapping=f Rename the read to indicate its correct mapping coordinates.
renamebytrim=f Rename the read to indicate its correct post-trimming length.
addprefix=f Rename the read by prepending the prefix to the existing name.
prefixonly=f Only use the prefix; don't add _<number>
addunderscore=t Add an underscore after the prefix (if there is a prefix).
addpairnum=t Add a pairnum (e.g. ' 1:') to paired reads in some modes.
fixsra=f Fixes headers of SRA reads renamed from Illumina.
Specifically, it converts something like this:
SRR17611.11 HWI-ST79:17:D091UACXX:4:1101:210:824 length=75
...into this:
HWI-ST79:17:D091UACXX:4:1101:210:824 1:
Sampling parameters:
reads=-1 Set to a positive number to only process this many INPUT reads (or pairs), then quit.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function rename() {
local CMD="java $EA $EOOM $z -cp $CP jgi.RenameReads $@"
echo $CMD >&2
eval $CMD
}
rename "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/rename.sh | rename.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified September 18, 2018
Description: Filters reads potentially sharing a kmer with a reference.
The more memory, the higher the accuracy. Reads going to outu are guaranteed
to not match the reference, but reads going to outm might may or may not
match the reference.
Usage: bloomfilter.sh in=<input file> out=<nonmatches> outm=<matches> ref=<reference>
Example:
bloomfilter.sh in=reads.fq outm=nonhuman.fq outm=human.fq k=31 minhits=3 ref=human.fa
Error correction and depth filtering can be done simultaneously.
File parameters:
in=<file> Primary input, or read 1 input.
in2=<file> Read 2 input if reads are in two files.
outm=<file> (out) Primary matched read output.
outm2=<file> (out2) Matched read 2 output if reads are in two files.
outu=<file> Primary unmatched read output.
outu2=<file> Unmatched read 2 output if reads are in two files.
ref=<file> Reference sequence file, or a comma-delimited list.
For depth-based filtering, set this to the same as the input.
overwrite=t (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Hashing parameters:
k=31 Kmer length.
hashes=2 Number of hashes per kmer. Higher generally reduces
false positives at the expense of speed.
minprob=0.5 Ignore reference kmers with probability of being correct
below this (affects fastq references only).
memmult=1.0 Fraction of free memory to use for Bloom filter. 1.0 should
generally work; if the program crashes with an out of memory
error, set this lower. Higher increases specificity.
cells= Option to set the number of cells manually. By default this
will be autoset to use all available memory. The only reason
to set this is to ensure deterministic output.
seed=0 This will change the hash function used.
Reference-matching parameters:
minhits=3 Consecutive kmer hits for a read to be considered matched.
Higher reduces false positives at the expense of sensitivity.
mincount=1 Minimum number of times a read kmer must occur in the
reference to be considered a match.
requireboth=f Require both reads in a pair to match the ref in order to go
to outm. By default, pairs go to outm if either matches.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
bloomfilter() {
local CMD="java $EA $EOOM $z $z2 -cp $CP bloom.BloomFilterWrapper $@"
echo $CMD >&2
eval $CMD
}
bloomfilter "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bloomfilter.sh | bloomfilter.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified July 16, 2018
Description: Compresses sequence data into a fasta file containing each kmer
exactly once. Allows arbitrary kmer set operations via multiple passes.
Usage: kcompress.sh in=<reads> out=<contigs> min=<1> max=<2147483647>
Input parameters:
in=<file> Primary input file for reads to use as kmer data.
in2=<file> Second input file for paired data.
reads=-1 Only process this number of reads, then quit (-1 means all).
Output parameters:
out=<file> Write contigs (in contig mode).
showstats=t Print assembly statistics after writing contigs.
fuse=0 Fuse output sequences into chunks at least this long,
padded with 1 N between sequences.
Prefiltering parameters:
prefilter=0 If set to a positive integer, use a countmin sketch
to ignore kmers with depth of that value or lower.
prehashes=2 Number of hashes for prefilter.
prefiltersize=0.2 (pff) Fraction of memory to use for prefilter.
minprobprefilter=t (mpp) Use minprob for the prefilter.
prepasses=1 Use this many prefiltering passes; higher be more thorough
if the filter is very full. Set to 'auto' to iteratively
prefilter until the remaining kmers will fit in memory.
Hashing parameters:
k=31 Kmer length (1 to 31).
prealloc=t Pre-allocate memory rather than dynamically growing;
faster and more memory-efficient. A float fraction (0-1)
may be specified; default is 1.
minprob=0.5 Ignore kmers with overall probability of correctness below this.
minprobmain=t (mpm) Use minprob for the primary kmer counts.
threads=X Spawn X threads (default is number of logical processors).
Assembly parameters:
mincount=1 (min) Only retain kmers that occur at least this many times.
maxcount=BIG (max) Only retain kmers that occur at most this many times.
requiresamecount (rsc) Only build contigs from kmers with exactly the same count.
rcomp=t Store forward and reverse kmers together. Setting this to
false will only use forward kmers.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx14g"
z2="-Xms14g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 15000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
kcompress() {
local CMD="java $EA $EOOM $z $z2 -cp $CP assemble.KmerCompressor $@"
echo $CMD >&2
eval $CMD
}
kcompress "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/kcompress.sh | kcompress.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 17, 2019
Description: Filters a sam file to remove reads with variations unsupported
by other reads (bad vars, aka bad subs). For particularly bad data,
it may be advisable to iteratively re-call variants and re-run FilterSam.
Calling variants may be performed like this:
callvariants.sh in=mapped.sam ref=ref.fa out=vars.vcf clearfilters minreads=2
Usage: filtersam.sh in=<file> out=<file> vcf=<file>
Parameters:
in=<file> Input sam or bam file.
ref=<file> Optional fasta reference file.
out=<file> Output file for good reads.
outb=<file> Output file for bad reads.
vcf=<file> VCF file of variants called from these reads.
vars=<file> Alternatively, variants can be provided in CallVariants'
native output format.
mbv=2 (maxbadvars) Discarded reads with more bad vars than this.
mbad=2 (maxbadalleledepth) A var is bad if the allele depth is at
most this much.
mbaf=0.01 (maxbadalleledepth) A var is bad if the allele fraction is at
most this much. The more stringent of mbad or mbaf is used,
so in low depth regions mbad is dominant while in high depth
regions mbaf is more important. Vars are considered bad if
they fail either threshold (meaning ad<=mbad or af<=mbaf).
mbrd=2 (minbadreaddepth) Substitutions may only be considered
bad if the total read depth spanning the variant is
at least this much.
border=5 (minenddist) Ignore vars within this distance of read ends.
sub=t Consider bad substitutions.
ins=f Consider bad insertions.
del=f Consider bad deletions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx8g"
z2="-Xms8g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
filtersam() {
local CMD="java $EA $EOOM $z $z2 -cp $CP var2.FilterSam $@"
# echo $CMD >&2
eval $CMD
}
filtersam "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/filtersam.sh | filtersam.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified June 22, 2016
Description: Multiplexes reads from multiple files after renaming them based on their initial file.
Opposite of demuxbyname.
Usage: muxbyname.sh in=<file,file,file...> out=<output file>
Input files may also be given without an in= prefix, so that you can use wildcards:
muxbyname.sh *.fastq out=muxed.fastq
Standard parameters:
in=<file,file> A list of input files.
in2=<file,file> Read 2 input if reads are in paired files.
out=<file> Primary output, or read 1 output.
out2=<file> Read 2 output if reads are in paired files.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
showspeed=t (ss) Set to 'f' to suppress display of processing speed.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
None yet!
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx400m"
z2="-Xms400m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
muxbyname() {
local CMD="java $EA $EOOM $z -cp $CP driver.RenameAndMux $@"
echo $CMD >&2
eval $CMD
}
muxbyname "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/muxbyname.sh | muxbyname.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 3, 2019
Description: Shrinks or expands homopolymers.
Usage: adjusthomopolymers.sh in=<input file> out=<output file> rate=<float>
Input may be fasta or fastq, compressed or uncompressed.
Standard parameters:
in=<file> Primary input, or read 1 input.
in2=<file> Read 2 input if reads are in two files.
out=<file> Primary output, or read 1 output.
out2=<file> Read 2 output if reads are in two files.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
rate=0.0 0.1 will expand by 10%; -0.1 will shrink by 10%.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 1000m 30
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
adjusthomopolymers() {
local CMD="java $EA $EOOM $z -cp $CP jgi.AdjustHomopolymers $@"
echo $CMD >&2
eval $CMD
}
adjusthomopolymers "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/adjusthomopolymers.sh | adjusthomopolymers.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified March 19, 2018
Description: Summarizes monthly contam files into a single file.
This is for internal JGI use.
Usage: summarizecontam.sh <input files> out=<output file>
Parameters:
in=<file,file> Input contam summary files, comma-delimited.
Alternately, file arguments (from a * expansion) will be
considered input files.
out=<file> Output.
tree=auto Taxtree file location (optional).
overwrite=t (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Filter Parameters (passing all required to pass):
minreads=0 Ignore records with fewer reads than this.
minsequnits=0 Ignore records with fewer seq units than this.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 1000m 24
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
process() {
local CMD="java $EA $EOOM $z -cp $CP driver.SummarizeContamReport $@"
echo $CMD >&2
eval $CMD
}
process "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/summarizecontam.sh | summarizecontam.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 30, 2020
Description: Calculates per-scaffold or per-base coverage information from an unsorted sam or bam file.
Supports SAM/BAM format for reads and FASTA for reference.
Sorting is not needed, so output may be streamed directly from a mapping program.
Requires a minimum of 1 bit per reference base plus 100 bytes per scaffold (even if no reference is specified).
If per-base coverage is needed (including for stdev and median), at least 4 bytes per base is needed.
Usage: pileup.sh in=<input> out=<output>
Input Parameters:
in=<file> The input sam file; this is the only required parameter.
ref=<file> Scans a reference fasta for per-scaffold GC counts, not otherwise needed.
fastaorf=<file> An optional fasta file with ORF header information in PRODIGAL's output format. Must also specify 'outorf'.
unpigz=t Decompress with pigz for faster decompression.
addfromref=t Allow ref scaffolds not present in sam header to be added from the reference.
addfromreads=f Allow ref scaffolds not present in sam header to be added from the reads.
Note that in this case the ref scaffold lengths will be inaccurate.
Output Parameters:
out=<file> (covstats) Per-scaffold coverage info.
rpkm=<file> Per-scaffold RPKM/FPKM counts.
twocolumn=f Change to true to print only ID and Avg_fold instead of all 6 columns.
countgc=t Enable/disable counting of read GC content.
outorf=<file> Per-orf coverage info to this file (only if 'fastaorf' is specified).
outsam=<file> Print the input sam stream to this file (or stdout). Useful for piping data.
hist=<file> Histogram of # occurrences of each depth level.
basecov=<file> Coverage per base location.
bincov=<file> Binned coverage per location (one line per X bases).
binsize=1000 Binsize for binned coverage output.
keepshortbins=t (ksb) Keep residual bins shorter than binsize.
normcov=<file> Normalized coverage by normalized location (X lines per scaffold).
normcovo=<file> Overall normalized coverage by normalized location (X lines for the entire assembly).
normb=-1 If positive, use a fixed number of bins per scaffold; affects 'normcov' and 'normcovo'.
normc=f Normalize coverage to fraction of max per scaffold; affects 'normcov' and 'normcovo'.
delta=f Only print base coverage lines when the coverage differs from the previous base.
nzo=f Only print scaffolds with nonzero coverage.
concise=f Write 'basecov' in a more concise format.
header=t (hdr) Include headers in output files.
headerpound=t (#) Prepend header lines with '#' symbol.
stdev=t Calculate coverage standard deviation.
covminscaf=0 (minscaf) Don't print coverage for scaffolds shorter than this.
covwindow=0 Calculate how many bases are in windows of this size with
low average coverage. Produces an extra stats column.
covwindowavg=5 Average coverage below this will be classified as low.
k=0 If positive, calculate kmer coverage statstics for this kmer length.
keyvalue=f Output statistics to screen as key=value pairs.
mincov=1 When calculating percent covered, ignore bases under this depth.
Processing Parameters:
strandedcov=f Track coverage for plus and minus strand independently.
startcov=f Only track start positions of reads.
stopcov=f Only track stop positions of reads.
secondary=t Use secondary alignments, if present.
softclip=f Include soft-clipped bases in coverage.
minmapq=0 (minq) Ignore alignments with mapq below this.
physical=f (physcov) Calculate physical coverage for paired reads. This includes the unsequenced bases.
tlen=t Track physical coverage from the tlen field rather than recalculating it.
arrays=auto Set to t/f to manually force the use of coverage arrays. Arrays and bitsets are mutually exclusive.
bitsets=auto Set to t/f to manually force the use of coverage bitsets.
32bit=f Set to true if you need per-base coverage over 64k; does not affect per-scaffold coverage precision.
This option will double RAM usage (when calculating per-base coverage).
delcoverage=t (delcov) Count bases covered by deletions or introns as covered.
True is faster than false.
dupecoverage=t (dupes) Include reads flagged as duplicates in coverage.
samstreamer=t (ss) Load reads multithreaded to increase speed.
Trimming Parameters:
** NOTE: These are applied before adding coverage, to allow mimicking **
** tools like CallVariants, which uses 'qtrim=r trimq=10 border=5' **
qtrim=f Quality-trim. May be set to:
f (false): Don't quality-trim.
r (right): Trim right (3') end only.
l (left): Trim right (5') end only.
rl (both): Trim both ends.
trimq=-1 If positive, quality-trim to this threshold.
border=0 Ignore this many bases on the left and right end.
Output format (tab-delimited):
ID, Avg_fold, Length, Ref_GC, Covered_percent, Covered_bases, Plus_reads, Minus_reads, Read_GC, Median_fold, Std_Dev
ID: Scaffold ID
Length: Scaffold length
Ref_GC: GC ratio of reference
Avg_fold: Average fold coverage of this scaffold
Covered_percent: Percent of scaffold with any coverage (only if arrays or bitsets are used)
Covered_bases: Number of bases with any coverage (only if arrays or bitsets are used)
Plus_reads: Number of reads mapped to plus strand
Minus_reads: Number of reads mapped to minus strand
Read_GC: Average GC ratio of reads mapped to this scaffold
Median_fold: Median fold coverage of this scaffold (only if arrays are used)
Std_Dev: Standard deviation of coverage (only if arrays are used)
Java Parameters:
-Xmx This will set Java's memory usage, overriding
autodetection. -Xmx20g will
specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
pileup() {
local CMD="java $EA $EOOM $z -cp $CP jgi.CoveragePileup $@"
echo $CMD >&2
eval $CMD
}
pileup "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/pileup.sh | pileup.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified September 11, 2019
Description: Uses paired read insert sizes to estimate the correct
length of scaffold gaps, and resizes incorrectly-sized gaps.
Usage: fixgaps.sh in=mapped.sam ref=scaffolds.fa out=fixed.fa
Standard parameters:
in=<file> Reads mapped to the reference; should be sam or bam.
ref=<file> Reference; may be fasta or fastq.
out=<file> Modified reference; may be fasta or fastq.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Processing parameters:
gap=10 Consider any consecutive streak of Ns at least this long to
be a scaffold break. Gaps will not be resized to less than
this.
border=0.4 Ignore the outermost (border*readlen) of an insert (read pair)
when incrementing coverage. A higher value is more accurate
but requires more coverage and/or longer inserts. Range: 0-1.
mindepth=10 Minimum spanning read pairs to correct a gap.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
fixgaps() {
local CMD="java $EA $EOOM $z -cp $CP consensus.FixScaffoldGaps $@"
echo $CMD >&2
eval $CMD
}
fixgaps "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/fixgaps.sh | fixgaps.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified June 18, 2018
Description: Filters sequences according to their taxonomy,
as determined by the sequence name. Sequences should
be labeled with a gi number, NCBI taxID, or species name.
Usage: filterbytaxa.sh in=<input file> out=<output file> tree=<tree file> table=<table file> ids=<numbers> level=<name or number>
I/O parameters:
in=<file> Primary input, or read 1 input.
out=<file> Primary output, or read 1 output.
results=<file> Optional; prints a list indicating which taxa were retained.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
showspeed=t (ss) Set to 'f' to suppress display of processing speed.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
level= Taxonomic level, such as phylum. Filtering will operate on
sequences within the same taxonomic level as specified ids.
If not set, only matches to a node or its descendants will
be considered.
reqlevel= Require nodes to have ancestors at these levels. For example,
reqlevel=species,genus would ban nodes that are not defined
at both the species and genus levels.
ids= Comma-delimited list of NCBI numeric IDs. Can also be a
file with one taxID per line. Names (like bacteria) are also
acceptable.
include=f 'f' will discard filtered sequences, 't' will keep them.
besteffort=f Intended for include mode. Iteratively increases level
while the input file has no hits to the tax list.
tree=<file> Specify a TaxTree file like tree.taxtree.gz.
On Genepool, use 'auto'.
gi=<file> Specify a gitable file like gitable.int1d.gz. Only needed
if gi numbers will be used. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to taxid
files. Only needed if accesions will be used; requires ~45GB
of memory. On Genepool, use 'auto'.
printnodes=t Print the names of nodes added to the filter.
requirepresent=t Crash with an error message if a header cannot be resolved
to a taxid.
String-matching parameters:
regex= Filter names matching this Java regular expression.
contains= Filter names containing this substring (case-insensitive).
* Note *
Tree and table files are in /global/projectb/sandbox/gaag/bbtools/tax
For non-Genepool users, or to make new ones, use taxtree.sh and gitable.sh
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 1000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
filterbytaxa() {
local CMD="java $EA $EOOM $z -cp $CP tax.FilterByTaxa $@"
echo $CMD >&2
eval $CMD
}
filterbytaxa "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/filterbytaxa.sh | filterbytaxa.sh |
#usage(){
# echo "CalcMem v1.15"
# echo "Written by Brian Bushnell, Doug Jacobsen, Alex Copeland, Bryce Foster"
# echo "Calculates available memory in megabytes"
# echo "Last modified December 17, 2019"
#}
#Also parses other Java flags
function parseXmx () {
local setxmx=0
local setxms=0
for arg in "$@"
do
if [[ "$arg" == "Xmx="* ]] || [[ "$arg" == "xmx="* ]]; then
z="-Xmx"${arg:4}
setxmx=1
elif [[ "$arg" == "-Xmx="* ]] || [[ "$arg" == "-xmx="* ]]; then
z="-Xmx"${arg:5}
setxmx=1
elif [[ "$arg" == -Xmx* ]] || [[ "$arg" == -xmx* ]]; then
#z="$arg"
z="-X"${arg:2}
setxmx=1
elif [[ "$arg" == Xmx* ]] || [[ "$arg" == xmx* ]]; then
#z="-$arg"
z="-X"${arg:1}
setxmx=1
elif [[ "$arg" == -Xms* ]]; then
z2="$arg"
setxms=1
elif [[ "$arg" == Xms* ]]; then
z2="-$arg"
setxms=1
elif [[ "$arg" == -da ]] || [[ "$arg" == -ea ]]; then
EA="$arg"
elif [[ "$arg" == da ]] || [[ "$arg" == ea ]]; then
EA="-$arg"
elif [[ "$arg" == ExitOnOutOfMemoryError ]] || [[ "$arg" == exitonoutofmemoryerror ]] || [[ "$arg" == eoom ]]; then
EOOM="-XX:+ExitOnOutOfMemoryError"
elif [[ "$arg" == -ExitOnOutOfMemoryError ]] || [[ "$arg" == -exitonoutofmemoryerror ]] || [[ "$arg" == -eoom ]]; then
EOOM="-XX:+ExitOnOutOfMemoryError"
elif [[ "$arg" == json ]] || [[ "$arg" == "json=t" ]] || [[ "$arg" == "json=true" ]] || [[ "$arg" == "format=json" ]]; then
json=1
elif [[ "$arg" == silent ]] || [[ "$arg" == "silent=t" ]] || [[ "$arg" == "silent=true" ]]; then
silent=1
fi
done
if [[ $setxmx == 1 ]] && [[ $setxms == 0 ]]; then
local substring=`echo $z| cut -d'x' -f 2`
z2="-Xms$substring"
setxms=1
elif [[ $setxmx == 0 ]] && [[ $setxms == 1 ]]; then
local substring=`echo $z2| cut -d's' -f 2`
z="-Xmx$substring"
setxmx=1
fi
set=$setxmx
}
function setEnvironment(){
EA="-ea"
EOOM=""
if [[ $SHIFTER_RUNTIME == 1 ]]; then
#Ignore NERSC_HOST
shifter=1
elif [ -v "$EC2_HOME" ]; then
#Let's assume this is the AWS taxonomy server...
PATH=/test1/binaries/bgzip:$PATH
PATH=/test1/binaries/lbzip2/bin:$PATH
PATH=/test1/binaries/sambamba:$PATH
#PATH=/test1/binaries/java/jdk-11.0.2/bin:$PATH
PATH=/test1/binaries/pigz2/pigz-2.4:$PATH
elif [ -z "$NERSC_HOST" ]; then
#Not NERSC; do nothing
:
else
PATH=/global/projectb/sandbox/gaag/bbtools/bgzip:$PATH
PATH=/global/projectb/sandbox/gaag/bbtools/lbzip2/bin:$PATH
PATH=/global/projectb/sandbox/gaag/bbtools/sambamba:$PATH
PATH=/global/projectb/sandbox/gaag/bbtools/java/jdk-11.0.2/bin:$PATH
PATH=/global/projectb/sandbox/gaag/bbtools/pigz2/pigz-2.4:$PATH
if [[ $NERSC_HOST == cori ]]; then
#module unload PrgEnv-intel
#module load PrgEnv-gnu/7.1
PATH=/global/projectb/sandbox/gaag/bbtools/samtools_cori/bin:$PATH
:
fi
fi
}
function freeRam(){
RAM=0;
#Memory is in kilobytes.
local defaultMem=3200000
if [ $# -gt 0 ]; then
defaultMem=$1;
case $defaultMem in
*g)
defaultMem=`echo $defaultMem| cut -d'g' -f 1`
defaultMem=$(( $defaultMem * $(( 1024 * 1024 )) ))
;;
*m)
defaultMem=`echo $defaultMem| cut -d'm' -f 1`
defaultMem=$(( $defaultMem * 1024 ))
;;
*k)
defaultMem=`echo $defaultMem| cut -d'k' -f 1`
;;
esac
fi
local mult=84
if [ $# -gt 1 ]; then
mult=$2;
fi
#echo "mult = $mult" # percent of memory to allocate
#echo "default = $defaultMem"
local ulimit=$(ulimit -v)
ulimit="${ulimit:-0}"
if [ "$ulimit" = "unlimited" ]; then ulimit=0; fi
local x=$ulimit
#echo "x = ${x}" # normally ulimit -v
#local HOSTNAME=`hostname`
local sge_x=0
local slurm_x=$(( SLURM_MEM_PER_NODE * 1024 ))
if [[ $RQCMEM -gt 0 ]]; then
#echo "branch for manual memory"
x=$(( RQCMEM * 1024 ));
elif [ -e /proc/meminfo ]; then
local vfree=$(cat /proc/meminfo | awk -F: 'BEGIN{total=-1;used=-1} /^CommitLimit:/ { total=$2 }; /^Committed_AS:/ { used=$2 } END{ print (total-used) }')
local pfree=$(cat /proc/meminfo | awk -F: 'BEGIN{free=-1;cached=-1;buffers=-1} /^MemFree:/ { free=$2 }; /^Cached:/ { cached=$2}; /^Buffers:/ { buffers=$2} END{ print (free+cached+buffers) }')
#echo "vfree = $vfree"
#echo "pfree = $pfree"
#echo "ulimit = $ulimit"
local x2=0;
if [ $vfree -gt 0 ] && [ $pfree -gt 0 ]; then
if [ $vfree -gt $pfree ]; then x2=$pfree;
else x2=$vfree; fi
elif [ $vfree -gt 0 ]; then x2=$vfree;
elif [ $pfree -gt 0 ]; then x2=$pfree;
fi
#echo $sge_x
#echo $slurm_x
#echo $x
#echo $x2
# set to SGE_HGR_RAMC or SLURM_MEM_PER_NODE value
if [ $sge_x -gt 0 ]; then
if [ $x2 -gt $sge_x ] || [ $x2 -eq 0 ]; then
x=$sge_x;
x2=$x;
fi
elif [ $slurm_x -gt 0 ]; then
if [ $x2 -gt $slurm_x ] || [ $x2 -eq 0 ]; then
x=$slurm_x;
x2=$x;
fi
fi
#echo "x = ${x}"
#echo "x2 = ${x2}"
#echo $vfree
#echo $pfree
if [ "$x" = "unlimited" ] || (("$x" > $x2)); then x=$x2; fi
if [ $x -lt 1 ]; then x=$x2; fi
fi
if [ $x -lt 1 ] || [[ $HOSTNAME == genepool* ]]; then
#echo "branch for unknown memory"
#echo $x
#echo "ram is unlimited"
RAM=$((defaultMem/1024))
echo "Max memory cannot be determined. Attempting to use $RAM MB." 1>&2
echo "If this fails, please add the -Xmx flag (e.g. -Xmx24g) to your command, " 1>&2
echo "or run this program qsubbed or from a qlogin session on Genepool, or set ulimit to an appropriate value." 1>&2
else
#echo "branch for known memory"
#echo "x = ${x}"
#echo "m = ${mult}"
# available (ram - 500k) * 85% / 1024kb = megs of ram to use
# not sure where this formula came from
RAM=$(( ((x-500000)*mult/100)/1024 ))
#echo $RAM
fi
#local z="-Xmx${RAM}m"
#echo $RAM
return 0
}
#freeRam "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/calcmem.sh | calcmem.sh |
usage(){
echo "
Written by Brian Bushnell.
Last modified June 28, 2016
Description: Decontaminates multiplexed assemblies via normalization and mapping.
Usage: decontaminate.sh reads=<file,file> ref=<file,file> out=<directory>
or
decontaminate.sh readnamefile=<file> refnamefile=<file> out=<directory>
Input Parameters:
reads=<file,file> Input reads, one file per library.
ref=<file,file> Input assemblies, one file per library.
readnamefile=<file> List of input reads, one line per library.
refnamefile=<file> List of input assemblies, one line per library.
interleaved=auto True forces paired/interleaved input; false forces single-ended mapping.
If not specified, interleaved status will be autodetected from read names.
unpigz=t Spawn a pigz (parallel gzip) process for faster decompression. Requires pigz to be installed.
touppercase=t (tuc) Convert lowercase letters in reads to upper case (otherwise they will not match the reference).
Output Parameters:
pigz=f Spawn a pigz (parallel gzip) process for faster compression. Requires pigz to be installed.
tmpdir=. Write temp files here. By default is uses the system's $TMPDIR or current directory.
outdir=. Write ouput files here.
Mapping Parameters:
kfilter=55 Set to a positive number N to require minimum N contiguous matches for a mapped read.
ambig=random Determines how coverage will be calculated for ambiguously-mapped reads.
first: Add coverage only at first genomic mapping location.
random: Add coverage at a random best-scoring location.
all: Add coverage at all best-scoring locations.
toss: Discard ambiguously-mapped reads without adding coverage.
Filtering Parameters:
minc=3.5 Min average coverage to retain scaffold.
minp=20 Min percent coverage to retain scaffold.
minr=18 Min mapped reads to retain scaffold.
minl=500 Min length to retain scaffold.
ratio=1.2 Contigs will not be removed by minc unless the coverage changed by at least this factor. 0 disables this filter.
mapraw=t Set true to map the unnormalized reads. Required to filter by 'ratio'.
basesundermin=-1 If positive, removes contigs with at least this many bases in low-coverage windows.
window=500 Sliding window size
windowcov=5 Average coverage below this will be classified as low.
Tadpole Parameters:
ecct=f Error-correct with Tadpole before normalization.
kt=42 Kmer length for Tadpole.
aggressive=f Do aggressive error correction.
conservative=f Do conservative error correction.
tadpoleprefilter=1 (tadpre) Ignore kmers under this depth to save memory.
Normalization Parameters:
mindepth=2 Min depth of reads to keep.
target=20 Target normalization depth.
hashes=4 Number of hashes in Bloom filter.
passes=1 Normalization passes.
minprob=0.5 Min probability of correctness to add a kmer.
dp=0.75 (depthpercentile) Percentile to use for depth proxy (0.5 means median).
prefilter=t Prefilter, for large datasets.
filterbits=32 (fbits) Bits per cell in primary filter.
prefilterbits=2 (pbits) Bits per cell in prefilter.
k=31 Kmer length for normalization. Longer is more precise but less sensitive.
Other parameters:
opfn=0 (onlyprocessfirstn) Set to a positive number to only process that many datasets. This is for internal testing of specificity.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx800m will specify 800 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 15000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
decontaminate() {
local CMD="java $JNI $EA $EOOM $z $z2 -cp $CP jgi.DecontaminateByNormalization $@"
echo $CMD >&2
eval $CMD
}
decontaminate "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/decontaminate.sh | decontaminate.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 10, 2020
Description: Generates the consensus sequence of a reference
using aligned sequences. This can be used for polishing assemblies,
making representative ribosomal sub units, correcting PacBio reads, etc.
If unaligned sequences are used as input, they should be in fasta or fastq
format, and they will be aligned to the first reference sequence.
Usage: consensus.sh in=mapped.sam ref=ref.fa out=consensus.fa
Recommended settings for assembly polishing via Illumina reads: mafsub=0.5
Standard parameters:
in=<file> Reads mapped to the reference; should be sam or bam.
ref=<file> Reference; may be fasta or fastq.
out=<file> Modified reference; may be fasta or fastq.
outm=<file> Optional output for binary model file.
Preferred extension is .alm.
inm=<file> Optional input model file for statistics.
hist=<file> Optional score histogram output.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Processing parameters:
mindepth=2 Do not change to alleles present at depth below this.
mafsub=0.25 Do not incorporate substitutions below this allele fraction.
mafdel=0.50 Do not incorporate deletions below this allele fraction.
mafins=0.50 Do not incorporate insertions below this allele fraction.
mafn=0.40 Do not change Ns (noref) to calls below this allele fraction.
usemapq=f Include mapq as a positive factor in edge weight.
nonly=f Only change Ns to different bases.
noindels=f Don't allow indels.
ceiling= If set, alignments will be weighted by their inverse identity.
For example, at ceiling=105, a read with 96% identity will get
bonus weight of 105-96=9 while a read with 70% identity will
get 105-70=35. This favors low-identity reads.
name= Set the output sequence name (for a single output sequence).
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
#Note that this needs -Xss flag to prevent serialization stack overflow
consensus() {
local CMD="java $EA $EOOM $z -Xss8m -cp $CP consensus.ConsensusMaker $@"
echo $CMD >&2
eval $CMD
}
consensus "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/consensus.sh | consensus.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified November 7, 2019
Description: Creates a blacklist sketch from common kmers,
which occur in at least X different sequences or taxa.
Please read bbmap/docs/guides/BBSketchGuide.txt for more information.
Usage: sketchblacklist.sh in=<fasta file> out=<sketch file>
Standard parameters:
in=<file> A fasta file containing one or more sequences.
out=<file> Output filename.
mintaxcount=100 Sketch kmers occuring in at least this many taxa.
k=31 Kmer length, 1-32. To maximize sensitivity and
specificity, dual kmer lengths may be used: k=31,24
mode=sequence Possible modes:
sequence: Count kmers once per sequence.
taxa: Count kmers once per taxonomic unit.
name= Set the blacklist sketch name.
delta=t Delta-compress sketches.
a48=t Encode sketches as ASCII-48 rather than hex.
amino=f Amino-acid mode.
entropy=0.66 Ignore sequence with entropy below this value.
keyfraction=0.16 Smaller values reduce blacklist size by ignoring a
a fraction of the key space. Range: 0.0001-0.5.
Taxonomy-specific flags:
tree= Specify a taxtree file. On Genepool, use 'auto'.
gi= Specify a gitable file. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to
taxid files. On Genepool, use 'auto'.
taxlevel=subspecies Taxa hits below this rank will be promoted and merged
with others.
prefilter=t Use a bloom filter to ignore low-count kmers.
prepasses=2 Number of prefilter passes.
prehashes=2 Number of prefilter hashes.
prebits=-1 Manually override number of prefilter cell bits.
tossjunk=t For taxa mode, discard taxonomically uninformative
sequences. This includes sequences with no taxid,
with a tax level NO_RANK, of parent taxid of LIFE.
silva=f Parse headers using Silva or semicolon-delimited syntax.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
For more detailed information, please read /bbmap/docs/guides/BBSketchGuide.txt.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
sketchblacklist() {
local CMD="java $EA $EOOM $z $z2 -cp $CP sketch.BlacklistMaker $@"
echo $CMD >&2
eval $CMD
}
sketchblacklist "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/sketchblacklist.sh | sketchblacklist.sh |
usage(){
echo "
Last modified February 13, 2020
Description: Wrapper for BBMap to allow multiple input and output files for the same reference.
To index: bbwrap.sh ref=<reference fasta>
To map: bbwrap.sh in=<file,file,...> out=<file,file,...>
To map without an index: bbwrap.sh ref=<reference fasta> in=<file,file,...> out=<file,file,...> nodisk
To map pairs and singletons and output them into the same file:
bbwrap.sh in1=read1.fq,singleton.fq in2=read2.fq,null out=mapped.sam append
BBWrap will not work with stdin and stdout, or histogram output.
Other Parameters:
in=<file,file> Input sequences to map.
inlist=<fofn> Alternately, input and output can be a file of filenames,
one line per file, using the flag inlist, outlist, outmlist,
in2list, etc.
mapper=bbmap Select mapper. May be BBMap, BBMapPacBio,
or BBMapPacBioSkimmer.
append=f Append to files rather than overwriting them.
If append is enabled, and there is exactly one output file,
all output will be written to that file.
***** All BBMap parameters can be used; see bbmap.sh for more details. *****
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
bbwrap() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP align2.BBWrap build=1 overwrite=true fastareadlen=500 $@"
echo $CMD >&2
eval $CMD
}
bbwrap "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbwrap.sh | bbwrap.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified November 7, 2019
Description: Sorts reads by name or other keys such as length,
quality, mapping position, flowcell coordinates, or taxonomy.
Writes temp files if memory is exceeded.
Usage: sortbyname.sh in=<file> out=<file>
Input may be fasta, fastq, or sam, compressed or uncompressed.
Parameters:
in=<file> Input file.
out=<file> Output file.
delete=t Delete temporary files.
allowtemp=t Allow writing temporary files.
name=t Sort reads by name.
length=f Sort reads by length.
quality=f Sort reads by quality.
position=f Sort reads by position (for mapped reads).
taxa=f Sort reads by taxonomy (for NCBI naming convention).
sequence=f Sort reads by sequence, alphabetically.
flowcell=f Sort reads by flowcell coordinates.
shuffle=f Shuffle reads randomly (untested).
list=<file> Sort reads according to this list of names.
ascending=t Sort ascending.
memmult=.35 Write a temp file when used memory drops below this
fraction of total memory.
Taxonomy-sorting parameters:
tree= Specify a taxtree file. On Genepool, use 'auto'.
gi= Specify a gitable file. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to
taxid files. On Genepool, use 'auto'.
Note: name, length, and quality are mutually exclusive.
Sorting by quality actually sorts by average expected error rate,
so ascending will place the highest-quality reads first.
Java Parameters:
-Xmx This will set Java's memory usage, overriding
autodetection. -Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx2g"
z2="-Xms2g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 2000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
sortbyname() {
local CMD="java $EA $EOOM $z $z2 -cp $CP sort.SortByName $@"
echo $CMD >&2
eval $CMD
}
sortbyname "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/sortbyname.sh | sortbyname.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified June 22, 2016
Description: Summarizes the stats output of Seal for evaluation of
cross-contamination. The intended use is to map multiple libraries or
assemblies, of different multiplexed organisms, to a concatenated reference
containing one fused scaffold per organism. This will convert all of the
resulting stats files (one per library) to a single text file, with multiple
columns, indicating how much of the input hit the primary versus nonprimary
scaffolds.
If ingoresametaxa or ignoresamebarcode are used, ref names must be
in this format:
barcode,library,tax,location
For example:
6-G,N0296,gammaproteobacteria_bacterium,deep_ocean
Usage: summarizeseal.sh in=<file,file...> out=<file>
You can alternately run 'summarizeseal.sh *.txt out=out.txt'
Parameters:
in=<file> A list of stats files, or a text file containing one stats file name per line.
out=<file> Destination for summary.
printtotal=t (pt) Print a line summarizing the total contamination rate of all assemblies.
ignoresametaxa=f Ignore secondary hits sharing taxonomy.
ignoresamebarcode=f Ignore secondary hits sharing a barcode.
ignoresamelocation=f Ignore secondary hits sharing a sampling site.
totaldenominator=f (td) Use all bases as denominator rather than mapped bases.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx120m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
summarizeseal() {
local CMD="java $EA $EOOM $z -cp $CP driver.SummarizeSealStats $@"
# echo $CMD >&2
eval $CMD
}
summarizeseal "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/summarizeseal.sh | summarizeseal.sh |
usage(){
echo "
Written by Brian Bushnell.
Last modified December 19, 2019
Description: Renames sequences to indicate their NCBI taxIDs.
The headers must be in NCBI or Silva format with gi numbers,
accessions, or organism names. Only supports fasta and gff files.
Usage: gi2taxid.sh in=<file> out=<file> server
Parameters:
in=<file> Input sequences; required parameter. Must be fasta.
This can alternatively be a comma-delimited list,
or just a bunch of space-delimited filenames, e.g.:
gi2taxid.sh x.fa y.fa z.fa out=tid.fa tree=auto table=auto
out=<file> Destination for renamed sequences.
invalid=<file> Destination for headers with no taxid.
keepall=t Keep sequences with no taxid in normal output.
prefix=t Append the taxid as a prefix to the old header, but keep
the old header.
title=tid Set the title of the new number (e.g. ncbi, taxid, tid).
ziplevel=2 (zl) Compression level for gzip output.
pigz=t Spawn a pigz (parallel gzip) process for faster
compression than Java. Requires pigz to be installed.
silva=f Parse headers in Silva format.
shrinknames=f Replace multiple concatenated headers with the first.
deleteinvalid=f Delete the output file if there are any invalid headers.
Taxonomy file flags:
server=f Use the taxonomy server instead of local files.
Server mode only works for accessions (like RefSeq).
tree= Specify a taxtree file. On Genepool, use 'auto'.
gi= Specify a gitable file. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to
taxid files. On Genepool, use 'auto'.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx800m will specify 800 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx7g"
z2="-Xms7g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 7000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
gi2taxid() {
local CMD="java $EA $EOOM $z $z2 -cp $CP tax.RenameGiToTaxid $@"
echo $CMD >&2
eval $CMD
}
gi2taxid "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/gi2taxid.sh | gi2taxid.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified November 25, 2014
Description: Generates an identity matrix via all-to-all alignment.
*** WARNING: This program may produce incorrect results in some cirumstances.
*** It is not advisable to use until fixed.
Usage: idmatrix.sh in=<file> out=<file>
Parameters:
in=<file> File containing reads. in=stdin.fa will pipe from stdin.
out=<file> Matrix output. out=stdout will pipe to stdout.
threads=auto (t) Set number of threads to use; default is number of
logical processors.
percent=f Output identity as percent rather than a fraction.
edits= Allow at most this much edit distance. Default is the
length of the longest input sequence. Lower is faster.
width= Alignment bandwidth, lower is faster. Default: 2*edits+1.
usejni=f (jni) Do alignments faster, in C code. Requires
compiling the C code; details are in /jni/README.txt.
Java Parameters:
-Xmx This will set Java's memory usage, overriding automatic
memory detection. -Xmx20g will specify
20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx2g"
z2="-Xms2g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
idmatrix() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP jgi.IdentityMatrix $@"
echo $CMD >&2
eval $CMD
}
idmatrix "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/idmatrix.sh | idmatrix.sh |
usage(){
echo "
Written by Shijie Yao
Last modified May 31, 2018
Description: DNA Tetramer analysis.
DNA tetramers are counted for each sub-sequence of window size in the sequence.
The window slides along the sequence by the step length.
Sub-sequence shorter than the window size is ignored. Tetramers containing N are ignored.
Usage: TetramerFreq.sh in=<input file> out=<output file> step=500 window=2000
Input may be fasta or fastq, compressed or uncompressed.
Standard parameters:
in=<file> DNA sequence input file
out=<file> Output file name
step/s=INT Step size (default 500)
window/w=INT Window size (default 2kb); <=0 turns windowing off (e.g. short reads)
short=T/F Print lines for sequences shorter than window (default F)
k=INT Kmer length (default 4)
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
tetramerfreq () {
local CMD="java $EA $EOOM $z -cp $CP jgi.TetramerFrequencies $@"
echo $CMD >&2
eval $CMD
}
tetramerfreq "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/tetramerfreq.sh | tetramerfreq.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified September 17, 2018
This script requires at least 17GB RAM.
It is designed for NERSC and uses hard-coded paths.
Description: Removes all reads that map to the human genome with at least 88% identity after quality trimming.
This is more aggressive than removehuman.sh and uses an unmasked human genome reference.
It removes roughly 99.99% of human 2x150bp reads, but may incur false-positive removals.
NOTE! This program uses hard-coded paths and will only run on Nersc systems unless you change the path.
Usage: removehuman.sh in=<input file> outu=<clean output file>
Input may be fasta or fastq, compressed or uncompressed.
Parameters:
threads=auto (t) Set number of threads to use; default is number of logical processors.
overwrite=t (ow) Set to false to force the program to abort rather than overwrite an existing file.
interleaved=auto (int) If true, forces fastq input to be paired and interleaved.
trim=t Trim read ends to remove bases with quality below minq.
Values: t (trim both ends), f (neither end), r (right end only), l (left end only).
untrim=t Undo the trimming after mapping.
minq=4 Trim quality threshold.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression level; lower compression is faster.
outm=<file> File to output the reads that mapped to human.
path= Set the path to an indexed human genome.
***** All BBMap parameters can be used; run bbmap.sh for more details. *****
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx16000m"
z2="-Xms16000m"
set=0
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function removehuman() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP align2.BBMap minratio=0.75 maxindel=8 bwr=0.22 bw=26 minhits=1 path=/global/projectb/sandbox/gaag/bbtools/hg19 build=2 pigz unpigz zl=6 qtrim=r trimq=10 untrim idtag usemodulo printunmappedcount ztd=2 maxsites=1 k=14 tipsearch=0 kfilter=25 bloomfilter $@"
echo $CMD >&2
eval $CMD
}
removehuman "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/removehuman2.sh | removehuman2.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 3, 2020
Description: Reformats reads to change ASCII quality encoding, interleaving, file format, or compression format.
Optionally performs additional functions such as quality trimming, subsetting, and subsampling.
Supports fastq, fasta, fasta+qual, scarf, oneline, sam, bam, gzip, bz2.
Please read bbmap/docs/guides/ReformatGuide.txt for more information.
Usage: reformat.sh in=<file> in2=<file2> out=<outfile> out2=<outfile2>
in2 and out2 are for paired reads and are optional.
If input is paired and there is only one output file, it will be written interleaved.
Parameters and their defaults:
ow=f (overwrite) Overwrites files that already exist.
app=f (append) Append to files that already exist.
zl=4 (ziplevel) Set compression level, 1 (low) to 9 (max).
int=f (interleaved) Determines whether INPUT file is considered interleaved.
fastawrap=70 Length of lines in fasta output.
fastareadlen=0 Set to a non-zero number to break fasta files into reads of at most this length.
fastaminlen=1 Ignore fasta reads shorter than this.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
qout=auto ASCII offset for output quality. May be 33 (Sanger), 64 (Illumina), or auto (same as input).
qfake=30 Quality value used for fasta to fastq reformatting.
qfin=<.qual file> Read qualities from this qual file, for the reads coming from 'in=<fasta file>'
qfin2=<.qual file> Read qualities from this qual file, for the reads coming from 'in2=<fasta file>'
qfout=<.qual file> Write qualities from this qual file, for the reads going to 'out=<fasta file>'
qfout2=<.qual file> Write qualities from this qual file, for the reads coming from 'out2=<fasta file>'
outsingle=<file> (outs) If a read is longer than minlength and its mate is shorter, the longer one goes here.
deleteinput=f Delete input upon successful completion.
ref=<file> Optional reference fasta for sam processing.
Processing Parameters:
verifypaired=f (vpair) When true, checks reads to see if the names look paired. Prints an error message if not.
verifyinterleaved=f (vint) sets 'vpair' to true and 'interleaved' to true.
allowidenticalnames=f (ain) When verifying pair names, allows identical names, instead of requiring /1 and /2 or 1: and 2:
tossbrokenreads=f (tbr) Discard reads that have different numbers of bases and qualities. By default this will be detected and cause a crash.
ignorebadquality=f (ibq) Fix out-of-range quality values instead of crashing with a warning.
addslash=f Append ' /1' and ' /2' to read names, if not already present. Please include the flag 'int=t' if the reads are interleaved.
spaceslash=t Put a space before the slash in addslash mode.
addcolon=f Append ' 1:' and ' 2:' to read names, if not already present. Please include the flag 'int=t' if the reads are interleaved.
underscore=f Change whitespace in read names to underscores.
rcomp=f (rc) Reverse-complement reads.
rcompmate=f (rcm) Reverse-complement read 2 only.
comp=f (complement) Reverse-complement reads.
changequality=t (cq) N bases always get a quality of 0 and ACGT bases get a min quality of 2.
quantize=f Quantize qualities to a subset of values like NextSeq. Can also be used with comma-delimited list, like quantize=0,8,13,22,27,32,37
tuc=f (touppercase) Change lowercase letters in reads to uppercase.
uniquenames=f Make duplicate names unique by appending _<number>.
remap= A set of pairs: remap=CTGN will transform C>T and G>N.
Use remap1 and remap2 to specify read 1 or 2.
iupacToN=f (itn) Convert non-ACGTN symbols to N.
monitor=f Kill this process if it crashes. monitor=600,0.01 would kill after 600 seconds under 1% usage.
crashjunk=t Crash when encountering reads with invalid bases.
tossjunk=f Discard reads with invalid characters as bases.
fixjunk=f Convert invalid bases to N (or X for amino acids).
dotdashxton=f Specifically convert . - and X to N (or X for amino acids).
fixheaders=f Convert nonstandard header characters to standard ASCII.
recalibrate=f (recal) Recalibrate quality scores. Must first generate matrices with CalcTrueQuality.
maxcalledquality=41 Quality scores capped at this upper bound.
mincalledquality=2 Quality scores of ACGT bases will be capped at lower bound.
trimreaddescription=f (trd) Trim the names of reads after the first whitespace.
trimrname=f For sam/bam files, trim rname/rnext fields after the first space.
fixheaders=f Replace characters in headers such as space, *, and | to make them valid file names.
warnifnosequence=t For fasta, issue a warning if a sequenceless header is encountered.
warnfirsttimeonly=t Issue a warning for only the first sequenceless header.
utot=f Convert U to T (for RNA -> DNA translation).
padleft=0 Pad the left end of sequences with this many symbols.
padright=0 Pad the right end of sequences with this many symbols.
pad=0 Set padleft and padright to the same value.
padsymbol=N Symbol to use for padding.
Histogram output parameters:
bhist=<file> Base composition histogram by position.
qhist=<file> Quality histogram by position.
qchist=<file> Count of bases with each quality value.
aqhist=<file> Histogram of average read quality.
bqhist=<file> Quality histogram designed for box plots.
lhist=<file> Read length histogram.
gchist=<file> Read GC content histogram.
gcbins=100 Number gchist bins. Set to 'auto' to use read length.
gcplot=f Add a graphical representation to the gchist.
maxhistlen=6000 Set an upper bound for histogram lengths; higher uses more memory.
The default is 6000 for some histograms and 80000 for others.
Histograms for sam files only (requires sam format 1.4 or higher):
ehist=<file> Errors-per-read histogram.
qahist=<file> Quality accuracy histogram of error rates versus quality score.
indelhist=<file> Indel length histogram.
mhist=<file> Histogram of match, sub, del, and ins rates by read location.
ihist=<file> Insert size histograms. Requires paired reads in a sam file.
idhist=<file> Histogram of read count versus percent identity.
idbins=100 Number idhist bins. Set to 'auto' to use read length.
Sampling parameters:
reads=-1 Set to a positive number to only process this many INPUT reads (or pairs), then quit.
skipreads=-1 Skip (discard) this many INPUT reads before processing the rest.
samplerate=1 Randomly output only this fraction of reads; 1 means sampling is disabled.
sampleseed=-1 Set to a positive number to use that prng seed for sampling (allowing deterministic sampling).
samplereadstarget=0 (srt) Exact number of OUTPUT reads (or pairs) desired.
samplebasestarget=0 (sbt) Exact number of OUTPUT bases desired.
Important: srt/sbt flags should not be used with stdin, samplerate, qtrim, minlength, or minavgquality.
upsample=f Allow srt/sbt to upsample (duplicate reads) when the target is greater than input.
prioritizelength=f If true, calculate a length threshold to reach the target, and retain all reads of at least that length (must set srt or sbt).
Trimming and filtering parameters:
qtrim=f Trim read ends to remove bases with quality below trimq.
Values: t (trim both ends), f (neither end), r (right end only), l (left end only), w (sliding window).
trimq=6 Regions with average quality BELOW this will be trimmed. Can be a floating-point number like 7.3.
minlength=0 (ml) Reads shorter than this after trimming will be discarded. Pairs will be discarded only if both are shorter.
mlf=0 (mlf) Reads shorter than this fraction of original length after trimming will be discarded.
maxlength=0 If nonzero, reads longer than this after trimming will be discarded.
breaklength=0 If nonzero, reads longer than this will be broken into multiple reads of this length. Does not work for paired reads.
requirebothbad=t (rbb) Only discard pairs if both reads are shorter than minlen.
invertfilters=f (invert) Output failing reads instead of passing reads.
minavgquality=0 (maq) Reads with average quality (after trimming) below this will be discarded.
maqb=0 If positive, calculate maq from this many initial bases.
chastityfilter=f (cf) Reads with names containing ' 1:Y:' or ' 2:Y:' will be discarded.
barcodefilter=f Remove reads with unexpected barcodes if barcodes is set, or barcodes containing 'N' otherwise.
A barcode must be the last part of the read header.
barcodes= Comma-delimited list of barcodes or files of barcodes.
maxns=-1 If 0 or greater, reads with more Ns than this (after trimming) will be discarded.
minconsecutivebases=0 (mcb) Discard reads without at least this many consecutive called bases.
forcetrimleft=0 (ftl) If nonzero, trim left bases of the read to this position (exclusive, 0-based).
forcetrimright=0 (ftr) If nonzero, trim right bases of the read after this position (exclusive, 0-based).
forcetrimright2=0 (ftr2) If positive, trim this many bases on the right end.
forcetrimmod=5 (ftm) If positive, trim length to be equal to zero modulo this number.
mingc=0 Discard reads with GC content below this.
maxgc=1 Discard reads with GC content above this.
gcpairs=t Use average GC of paired reads.
Also affects gchist.
Sam and bam processing options:
mappedonly=f Toss unmapped reads.
unmappedonly=f Toss mapped reads.
pairedonly=f Toss reads that are not mapped as proper pairs.
unpairedonly=f Toss reads that are mapped as proper pairs.
primaryonly=f Toss secondary alignments. Set this to true for sam to fastq conversion.
minmapq=-1 If non-negative, toss reads with mapq under this.
maxmapq=-1 If non-negative, toss reads with mapq over this.
requiredbits=0 (rbits) Toss sam lines with any of these flag bits unset. Similar to samtools -f.
filterbits=0 (fbits) Toss sam lines with any of these flag bits set. Similar to samtools -F.
stoptag=f Set to true to write a tag indicating read stop location, prefixed by YS:i:
sam= Set to 'sam=1.3' to convert '=' and 'X' cigar symbols (from sam 1.4+ format) to 'M'.
Set to 'sam=1.4' to convert 'M' to '=' and 'X' (sam=1.4 requires MD tags to be present, or ref to be specified).
Sam and bam alignment filtering options:
These require = and X symbols in cigar strings, or MD tags, or a reference fasta.
-1 means disabled; to filter reads with any of a symbol type, set to 0.
subfilter=-1 Discard reads with more than this many substitutions.
minsubs=-1 Discard reads with fewer than this many substitutions.
insfilter=-1 Discard reads with more than this many insertions.
delfilter=-1 Discard reads with more than this many deletions.
indelfilter=-1 Discard reads with more than this many indels.
editfilter=-1 Discard reads with more than this many edits.
inslenfilter=-1 Discard reads with an insertion longer than this.
dellenfilter=-1 Discard reads with a deletion longer than this.
minidfilter=-1.0 Discard reads with identity below this (0-1).
maxidfilter=1.0 Discard reads with identity above this (0-1).
clipfilter=-1 Discard reads with more than this many soft-clipped bases.
Kmer counting and cardinality estimation:
k=0 If positive, count the total number of kmers.
cardinality=f (loglog) Count unique kmers using the LogLog algorithm.
loglogbuckets=1999 Use this many buckets for cardinality estimation.
Shortcuts:
The # symbol will be substituted for 1 and 2. The % symbol in out will be substituted for input name minus extensions.
For example:
reformat.sh in=read#.fq out=%.fa
...is equivalent to:
reformat.sh in1=read1.fq in2=read2.fq out1=read1.fa out2=read2.fa
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx300m"
z="-Xms300m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function reformat() {
local CMD="java $EA $EOOM $z $z2 -cp $CP jgi.ReformatReads $@"
echo $CMD >&2
eval $CMD
}
reformat "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/reformat.sh | reformat.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified September 12, 2018
Description: Sorts reads by name or other keys such as length,
quality, mapping position, flowcell coordinates, or taxonomy.
Intended to merge temp files produced by SortByName if the program
ran out of time during merging.
Usage: mergesorted.sh sort_temp* out=<file>
Input may be fasta, fastq, or sam, compressed or uncompressed.
Parameters:
in=<file,file,...> Input files. Files may be specified without in=.
out=<file> Output file.
delete=t Delete input files after merging.
name=t Sort reads by name.
length=f Sort reads by length.
quality=f Sort reads by quality.
position=f Sort reads by position (for mapped reads).
taxa=f Sort reads by taxonomy (for NCBI naming convention).
sequence=f Sort reads by sequence, alphabetically.
flowcell=f Sort reads by flowcell coordinates.
shuffle=f Shuffle reads randomly (untested).
list=<file> Sort reads according to this list of names.
ascending=t Sort ascending.
memmult=.35 Write a temp file when used memory drops below this
fraction of total memory.
Taxonomy-sorting parameters:
tree= Specify a taxtree file. On Genepool, use 'auto'.
gi= Specify a gitable file. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to
taxid files. On Genepool, use 'auto'.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx2g"
z2="-Xms2g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 2000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
mergesorted() {
local CMD="java $EA $EOOM $z $z2 -cp $CP sort.MergeSorted $@"
echo $CMD >&2
eval $CMD
}
mergesorted "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/mergesorted.sh | mergesorted.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 4, 2020
Description: Mutates a reference by applying a set of variants.
Usage: applyvariants.sh in=<input file> vcf=<vcf file> out=<output file>
Standard parameters:
in=<file> Reference fasta.
vcf=<file> Variants.
basecov=<file> Optional per-base coverage from BBMap or Pileup.
out=<file> Output fasta.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
mincov=0 If positive and depth is below this, change ref to N.
Requires a coverage file.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
applyvariants() {
local CMD="java $EA $EOOM $z -cp $CP var2.ApplyVariants $@"
echo $CMD >&2
eval $CMD
}
applyvariants "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/applyvariants.sh | applyvariants.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified November 8, 2018
Description: Realigns mapped reads to a reference.
Usage: bbrealign.sh in=<file> ref=<file> out=<file>
Input may be a sorted or unsorted sam or bam file.
The reference should be fasta.
I/O parameters:
in=<file> Input reads.
out=<file> Output reads.
ref=<file> Reference fasta.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Trimming parameters:
border=0 Trim at least this many bases on both ends of reads.
qtrim=r Quality-trim reads on this end
r: right, l: left, rl: both, f: don't quality-trim.
trimq=10 Quality-trim bases below this score.
Realignment parameters:
unclip=f Convert clip symbols from exceeding the ends of the
realignment zone into matches and substitutitions.
repadding=70 Pad alignment by this much on each end. Typically,
longer is more accurate for long indels, but greatly
reduces speed.
rerows=602 Use this many rows maximum for realignment. Reads longer
than this cannot be realigned.
recols=2000 Reads may not be aligned to reference seqments longer
than this. Needs to be at least read length plus
max deletion length plus twice padding.
msa= Select the aligner. Options:
MultiStateAligner11ts: Default.
MultiStateAligner9PacBio: Use for PacBio reads, or for
Illumina reads mapped to PacBio/Nanopore reads.
Sam-filtering parameters:
minpos= Ignore alignments not overlapping this range.
maxpos= Ignore alignments not overlapping this range.
minreadmapq=4 Ignore alignments with lower mapq.
contigs= Comma-delimited list of contig names to include. These
should have no spaces, or underscores instead of spaces.
secondary=f Include secondary alignments.
supplimentary=f Include supplimentary alignments.
invert=f Invert sam filters.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
bbrealign() {
local CMD="java $EA $EOOM $z $z2 -cp $CP var2.Realign $@"
echo $CMD >&2
eval $CMD
}
bbrealign "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbrealign.sh | bbrealign.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified August 6, 2019
Description: Creates a mutant version of a genome.
Usage: mutate.sh in=<input file> out=<output file> id=<identity>
I/O parameters:
in=<file> Input genome.
out=<file> Output mutant genome.
vcf=<file> Output VCF file showing variations added.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
Processing parameters:
subrate=0 Substitution rate, 0 to 1.
indelrate=0 Indel rate, 0 to 1.
maxindel=1 Max indel length.
indelspacing=10 Minimum distance between subsequent indels.
id=1 Target identity, 0 to 1; 1 means 100%.
If this is used it will override subrate and indelrate;
99% of the mutations will be substitutions, and 1% indels.
fraction=1 Genome fraction, 0 to 1; 1 means 100%. A lower fraction
will randomly select that fraction on a per-sequence basis,
possibly incurring one chimeric junction per sequence.
Not compatible with VCF output.
period=-1 If positive, place exactly one mutation every X bases.
prefix= Set this flag to rename the new contigs with this prefix
and a number.
amino=f Treat the input as amino acid sequence.
ploidy=1 Set the ploidy. ploidy>1 allows heterozygous mutations.
This will create one copy of each input sequence per ploidy.
hetrate=0.5 If polyploid, fraction of mutations that are heterozygous.
nohomopolymers=f If true, prevent indels in homopolymers that lead to
ambiguous variant calls. For example, inserting A between
AC or deleting T from TTTT. This is mainly for grading
purposes. It does not fully solve the problem, but greatly
improves concordance (reducing disagreements by 70%).
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
mutate() {
local CMD="java $EA $EOOM $z $z2 -cp $CP jgi.MutateGenome $@"
echo $CMD >&2
eval $CMD
}
mutate "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/mutate.sh | mutate.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 17, 2015
Description: Filters barcodes by quality, and generates quality histograms.
Usage: filterbarcodes.sh in=<file> out=<file> maq=<integer>
Input parameters:
in=<file> Reads that have already been muxed with barcode qualities using mergebarcodes.sh.
int=auto (interleaved) If true, forces fastq input to be paired and interleaved.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
Output parameters:
out=<file> Write filtered reads here. 'out=stdout.fq' will pipe to standard out.
cor=<file> Correlation between read and index qualities.
bqhist=<file> Barcode quality histogram by position.
baqhist=<file> Barcode average quality histogram.
bmqhist=<file> Barcode min quality histogram.
overwrite=t (ow) Set to false to force the program to abort rather than overwrite an existing file.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression level; lower compression is faster.
fastawrap=80 Length of lines in fasta output.
qout=auto ASCII offset for output quality. May be 33 (Sanger), 64 (Illumina), or auto (same as input).
maq=0 Filter reads with barcode average quality less than this.
mmq=0 Filter reads with barcode minimum quality less than this.
Other parameters:
pigz=t Use pigz to compress. If argument is a number, that will set the number of pigz threads.
unpigz=t Use pigz to decompress.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx200m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
filterbarcodes() {
local CMD="java $EA $EOOM $z -cp $CP jgi.CorrelateBarcodes $@"
echo $CMD >&2
eval $CMD
}
filterbarcodes "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/filterbarcodes.sh | filterbarcodes.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 24, 2019
Description: Reformats a fungal assembly for release.
Also creates contig and agp files.
Usage: fungalrelease.sh in=<input file> out=<output file>
I/O parameters:
in=<file> Input scaffolds.
out=<file> Output scaffolds.
outc=<file> Output contigs.
qfin=<file> Optional quality scores input.
qfout=<file> Optional quality scores output.
qfoutc=<file> Optional contig quality scores output.
agp=<file> Output AGP file.
legend=<file> Output name legend file.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
Processing parameters:
fastawrap=60 Wrap length for fasta lines.
tuc=t Convert sequence to upper case.
baniupac=t Crash on encountering a non-ACGTN base call.
mingap=10 Expand all gaps (Ns) to be at least this long.
mingapin=1 Only expand gaps that are at least this long.
sortcscaffolds=t Sort scaffolds descending by length.
sortcontigs=f Sort contigs descending by length.
renamescaffolds=t Rename scaffolds to 'scaffold_#'.
scafnum=1 Number of first scaffold.
renamecontigs=f Rename contigs to 'contig_#' instead of 'scafname_c#'.
contignum=1 Number of first contig; only used if renamecontigs=t.
minscaf=1 Only retain scaffolds at least this long.
mincontig=1 Only retain contigs at least this long.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
}
calcXmx "$@"
fungalrelease() {
local CMD="java $EOOM $EA $EOOM $z $z2 -cp $CP jgi.FungalRelease $@"
echo $CMD >&2
eval $CMD
}
fungalrelease "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/fungalrelease.sh | fungalrelease.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified December 18, 2015
Description: Filters sequences by exact sequence matches.
Usage: filterbysequence.sh in=<file> out=<file> ref=<file> include=<t/f>
I/O Parameters:
in= Primary input. 'in2' will specify a second file.
out= Primary out. 'out2' will specify a second file.
ref= A reference file or comma-delimited list of files.
literal= A literal sequence or comma-delimited list of sequences.
ow=t (overwrite) Overwrites files that already exist.
zl=2 (ziplevel) Set compression level, 1 (low) to 9 (max).
Processing Parameters:
include=f Set to 'true' to include the filtered sequences rather
than excluding them.
rcomp=t Match reverse complements as well.
case=f (casesensitive) Require matching case.
storebases=t (sb) Store ref bases. Requires more memory. If false,
case-sensitive matching cannot be done, and the matching
will be probabilistic based 128-bit hashcodes.
threads=auto (t) Specify the number of worker threads.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx800m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 800m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
function filterbysequence() {
local CMD="java $EA $EOOM $z -cp $CP jgi.FilterBySequence $@"
echo $CMD >&2
eval $CMD
}
filterbysequence "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/filterbysequence.sh | filterbysequence.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified July 27, 2015
Description: Maps reads, then filters an assembly by contig coverage.
Intended to reduce misassembly rate of SPAdes by removing suspicious contigs.
Usage: postfilter.sh in=<reads> ref=<contigs> out=<filtered contigs>
Standard Parameters:
in=<file> File containing input reads.
in2=<file> Optional file containing read mates.
ref=<file> File containing input assembly.
cov=covstats.txt File to write coverage stats generated by pileup.
out=filtered.fa Destination of clean output assembly.
outdirty=<file> (outd) Destination of removed contigs; optional.
ow=f (overwrite) Overwrites files that already exist.
app=f (append) Append to files that already exist.
zl=4 (ziplevel) Set compression level, 1 (low) to 9 (max).
int=f (interleaved) Determines whether input reads are considered interleaved.
Filtering Parameters:
minc=2 (mincov) Discard contigs with lower average coverage.
minp=95 (minpercent) Discard contigs with a lower percent covered bases.
minr=6 (minreads) Discard contigs with fewer mapped reads.
minl=400 (minlength) Discard shorter contigs.
trim=0 (trimends) Trim the first and last X bases of each sequence.
Mapping Parameters (unlisted params will use BBMap defaults)
minhits=2
maxindel=0
tipsearch=0
bw=20
rescue=f
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Other parameters will be passed directly to BBMap.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx800m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 800m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
function postfilter() {
local CMD="java $EA $EOOM $z -cp $CP assemble.Postfilter $@"
echo $CMD >&2
eval $CMD
}
postfilter "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/postfilter.sh | postfilter.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified May 22, 2019
Description: Splits a sequence file evenly into multiple files.
Usage: partition.sh in=<file> in2=<file2> out=<outfile> out2=<outfile2> ways=<number>
in2 and out2 are for paired reads and are optional.
If input is paired and out2 is not specified, data will be written interleaved.
Output filenames MUST contain a '%' symbol. This will be replaced by a number.
Parameters and their defaults:
in=<file> Input file.
out=<file> Output file pattern.
ways=-1 The number of output files to create; must be positive.
pacbio=f Set to true to keep PacBio subreads together.
ow=f (overwrite) Overwrites files that already exist.
app=f (append) Append to files that already exist.
zl=4 (ziplevel) Set compression level, 1 (low) to 9 (max).
int=f (interleaved) Determines whether INPUT file is considered interleaved.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function partition() {
local CMD="java $EA $EOOM $z -cp $CP jgi.PartitionReads $@"
echo $CMD >&2
eval $CMD
}
partition "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/partition.sh | partition.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 10, 2020
Description: Finds orfs and calls genes in unspliced prokaryotes.
This includes bacteria, archaea, viruses, and mitochondria.
Can also predict 16S, 23S, 5S, and tRNAs.
Usage: callgenes.sh in=contigs.fa out=calls.gff outa=aminos.faa out16S=16S.fa
File parameters:
in=<file> A fasta file; the only required parameter.
out=<file> Output gff file.
outa=<file> Amino acid output.
out16s=<file> 16S output.
model=<file> A pgm file or comma-delimited list.
If unspecified a default model will be used.
stats=stderr Stats output (may be stderr, stdin, a file, or null).
hist=null Gene length histogram.
compareto= Optional reference gff file to compare with the gene calls.
'auto' will name it based on the input file name.
Formatting parameters:
json=false Print stats in JSON.
binlen=20 Histogram bin length.
bins=2000 Maximum histogram bins.
pz=f (printzero) Print histogram lines with zero count.
Other parameters:
minlen=60 Don't call genes shorter than this.
trd=f (trimreaddescription) Set to true to trim read headers after
the first whitespace. Necessary for IGV.
merge=f For paired reads, merge before calling.
detranslate=f Output canonical nucleotide sequences instead of amino acids.
recode=f Re-encode nucleotide sequences over called genes, leaving
non-coding regions unchanged.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx6g"
z2="-Xms6g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function callgenes() {
local CMD="java $EA $EOOM $z $z2 -cp $CP prok.CallGenes $@"
#Too long to echo sometimes since wildcards can be expanded
#echo $CMD >&2
eval $CMD
}
callgenes "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/callgenes.sh | callgenes.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified March 24, 2020
Description: Compares reads to the kmers in a reference dataset, optionally
allowing an edit distance. Splits the reads into two outputs - those that
match the reference, and those that don't. Can also trim (remove) the matching
parts of the reads rather than binning the reads.
Please read bbmap/docs/guides/BBDukGuide.txt for more information.
Usage: bbduk.sh in=<input file> out=<output file> ref=<contaminant files>
Input may be stdin or a fasta or fastq file, compressed or uncompressed.
If you pipe via stdin/stdout, please include the file type; e.g. for gzipped
fasta input, set in=stdin.fa.gz
Input parameters:
in=<file> Main input. in=stdin.fq will pipe from stdin.
in2=<file> Input for 2nd read of pairs in a different file.
ref=<file,file> Comma-delimited list of reference files.
In addition to filenames, you may also use the keywords:
adapters, artifacts, phix, lambda, pjet, mtst, kapa
literal=<seq,seq> Comma-delimited list of literal reference sequences.
touppercase=f (tuc) Change all bases upper-case.
interleaved=auto (int) t/f overrides interleaved autodetection.
qin=auto Input quality offset: 33 (Sanger), 64, or auto.
reads=-1 If positive, quit after processing X reads or pairs.
copyundefined=f (cu) Process non-AGCT IUPAC reference bases by making all
possible unambiguous copies. Intended for short motifs
or adapter barcodes, as time/memory use is exponential.
samplerate=1 Set lower to only process a fraction of input reads.
samref=<file> Optional reference fasta for processing sam files.
Output parameters:
out=<file> (outnonmatch) Write reads here that do not contain
kmers matching the database. 'out=stdout.fq' will pipe
to standard out.
out2=<file> (outnonmatch2) Use this to write 2nd read of pairs to a
different file.
outm=<file> (outmatch) Write reads here that fail filters. In default
kfilter mode, this means any read with a matching kmer.
In any mode, it also includes reads that fail filters such
as minlength, mingc, maxgc, entropy, etc. In other words,
it includes all reads that do not go to 'out'.
outm2=<file> (outmatch2) Use this to write 2nd read of pairs to a
different file.
outs=<file> (outsingle) Use this to write singleton reads whose mate
was trimmed shorter than minlen.
stats=<file> Write statistics about which contamininants were detected.
refstats=<file> Write statistics on a per-reference-file basis.
rpkm=<file> Write RPKM for each reference sequence (for RNA-seq).
dump=<file> Dump kmer tables to a file, in fasta format.
duk=<file> Write statistics in duk's format. *DEPRECATED*
nzo=t Only write statistics about ref sequences with nonzero hits.
overwrite=t (ow) Grant permission to overwrite files.
showspeed=t (ss) 'f' suppresses display of processing speed.
ziplevel=2 (zl) Compression level; 1 (min) through 9 (max).
fastawrap=70 Length of lines in fasta output.
qout=auto Output quality offset: 33 (Sanger), 64, or auto.
statscolumns=3 (cols) Number of columns for stats output, 3 or 5.
5 includes base counts.
rename=f Rename reads to indicate which sequences they matched.
refnames=f Use names of reference files rather than scaffold IDs.
trd=f Truncate read and ref names at the first whitespace.
ordered=f Set to true to output reads in same order as input.
maxbasesout=-1 If positive, quit after writing approximately this many
bases to out (outu/outnonmatch).
maxbasesoutm=-1 If positive, quit after writing approximately this many
bases to outm (outmatch).
json=f Print to screen in json format.
Histogram output parameters:
bhist=<file> Base composition histogram by position.
qhist=<file> Quality histogram by position.
qchist=<file> Count of bases with each quality value.
aqhist=<file> Histogram of average read quality.
bqhist=<file> Quality histogram designed for box plots.
lhist=<file> Read length histogram.
phist=<file> Polymer length histogram.
gchist=<file> Read GC content histogram.
enthist=<file> Read entropy histogram.
ihist=<file> Insert size histogram, for paired reads in mapped sam.
gcbins=100 Number gchist bins. Set to 'auto' to use read length.
maxhistlen=6000 Set an upper bound for histogram lengths; higher uses
more memory. The default is 6000 for some histograms
and 80000 for others.
Histograms for mapped sam/bam files only:
histbefore=t Calculate histograms from reads before processing.
ehist=<file> Errors-per-read histogram.
qahist=<file> Quality accuracy histogram of error rates versus quality
score.
indelhist=<file> Indel length histogram.
mhist=<file> Histogram of match, sub, del, and ins rates by position.
idhist=<file> Histogram of read count versus percent identity.
idbins=100 Number idhist bins. Set to 'auto' to use read length.
varfile=<file> Ignore substitution errors listed in this file when
calculating error rates. Can be generated with
CallVariants.
vcf=<file> Ignore substitution errors listed in this VCF file
when calculating error rates.
ignorevcfindels=t Also ignore indels listed in the VCF.
Processing parameters:
k=27 Kmer length used for finding contaminants. Contaminants
shorter than k will not be found. k must be at least 1.
rcomp=t Look for reverse-complements of kmers in addition to
forward kmers.
maskmiddle=t (mm) Treat the middle base of a kmer as a wildcard, to
increase sensitivity in the presence of errors.
minkmerhits=1 (mkh) Reads need at least this many matching kmers
to be considered as matching the reference.
minkmerfraction=0.0 (mkf) A reads needs at least this fraction of its total
kmers to hit a ref, in order to be considered a match.
If this and minkmerhits are set, the greater is used.
mincovfraction=0.0 (mcf) A reads needs at least this fraction of its total
bases to be covered by ref kmers to be considered a match.
If specified, mcf overrides mkh and mkf.
hammingdistance=0 (hdist) Maximum Hamming distance for ref kmers (subs only).
Memory use is proportional to (3*K)^hdist.
qhdist=0 Hamming distance for query kmers; impacts speed, not memory.
editdistance=0 (edist) Maximum edit distance from ref kmers (subs
and indels). Memory use is proportional to (8*K)^edist.
hammingdistance2=0 (hdist2) Sets hdist for short kmers, when using mink.
qhdist2=0 Sets qhdist for short kmers, when using mink.
editdistance2=0 (edist2) Sets edist for short kmers, when using mink.
forbidn=f (fn) Forbids matching of read kmers containing N.
By default, these will match a reference 'A' if
hdist>0 or edist>0, to increase sensitivity.
removeifeitherbad=t (rieb) Paired reads get sent to 'outmatch' if either is
match (or either is trimmed shorter than minlen).
Set to false to require both.
trimfailures=f Instead of discarding failed reads, trim them to 1bp.
This makes the statistics a bit odd.
findbestmatch=f (fbm) If multiple matches, associate read with sequence
sharing most kmers. Reduces speed.
skipr1=f Don't do kmer-based operations on read 1.
skipr2=f Don't do kmer-based operations on read 2.
ecco=f For overlapping paired reads only. Performs error-
correction with BBMerge prior to kmer operations.
recalibrate=f (recal) Recalibrate quality scores. Requires calibration
matrices generated by CalcTrueQuality.
sam=<file,file> If recalibration is desired, and matrices have not already
been generated, BBDuk will create them from the sam file.
amino=f Run in amino acid mode. Some features have not been
tested, but kmer-matching works fine. Maximum k is 12.
Speed and Memory parameters:
threads=auto (t) Set number of threads to use; default is number of
logical processors.
prealloc=f Preallocate memory in table. Allows faster table loading
and more efficient memory usage, for a large reference.
monitor=f Kill this process if it crashes. monitor=600,0.01 would
kill after 600 seconds under 1% usage.
minrskip=1 (mns) Force minimal skip interval when indexing reference
kmers. 1 means use all, 2 means use every other kmer, etc.
maxrskip=1 (mxs) Restrict maximal skip interval when indexing
reference kmers. Normally all are used for scaffolds<100kb,
but with longer scaffolds, up to maxrskip-1 are skipped.
rskip= Set both minrskip and maxrskip to the same value.
If not set, rskip will vary based on sequence length.
qskip=1 Skip query kmers to increase speed. 1 means use all.
speed=0 Ignore this fraction of kmer space (0-15 out of 16) in both
reads and reference. Increases speed and reduces memory.
Note: Do not use more than one of 'speed', 'qskip', and 'rskip'.
Trimming/Filtering/Masking parameters:
Note - if ktrim, kmask, and ksplit are unset, the default behavior is kfilter.
All kmer processing modes are mutually exclusive.
Reads only get sent to 'outm' purely based on kmer matches in kfilter mode.
ktrim=f Trim reads to remove bases matching reference kmers.
Values:
f (don't trim),
r (trim to the right),
l (trim to the left)
kmask= Replace bases matching ref kmers with another symbol.
Allows any non-whitespace character, and processes short
kmers on both ends if mink is set. 'kmask=lc' will
convert masked bases to lowercase.
maskfullycovered=f (mfc) Only mask bases that are fully covered by kmers.
ksplit=f For single-ended reads only. Reads will be split into
pairs around the kmer. If the kmer is at the end of the
read, it will be trimmed instead. Singletons will go to
out, and pairs will go to outm. Do not use ksplit with
other operations such as quality-trimming or filtering.
mink=0 Look for shorter kmers at read tips down to this length,
when k-trimming or masking. 0 means disabled. Enabling
this will disable maskmiddle.
qtrim=f Trim read ends to remove bases with quality below trimq.
Performed AFTER looking for kmers. Values:
rl (trim both ends),
f (neither end),
r (right end only),
l (left end only),
w (sliding window).
trimq=6 Regions with average quality BELOW this will be trimmed,
if qtrim is set to something other than f. Can be a
floating-point number like 7.3.
trimclip=f Trim soft-clipped bases from sam files.
minlength=10 (ml) Reads shorter than this after trimming will be
discarded. Pairs will be discarded if both are shorter.
mlf=0 (minlengthfraction) Reads shorter than this fraction of
original length after trimming will be discarded.
maxlength= Reads longer than this after trimming will be discarded.
minavgquality=0 (maq) Reads with average quality (after trimming) below
this will be discarded.
maqb=0 If positive, calculate maq from this many initial bases.
minbasequality=0 (mbq) Reads with any base below this quality (after
trimming) will be discarded.
maxns=-1 If non-negative, reads with more Ns than this
(after trimming) will be discarded.
mcb=0 (minconsecutivebases) Discard reads without at least
this many consecutive called bases.
ottm=f (outputtrimmedtomatch) Output reads trimmed to shorter
than minlength to outm rather than discarding.
tp=0 (trimpad) Trim this much extra around matching kmers.
tbo=f (trimbyoverlap) Trim adapters based on where paired
reads overlap.
strictoverlap=t Adjust sensitivity for trimbyoverlap mode.
minoverlap=14 Require this many bases of overlap for detection.
mininsert=40 Require insert size of at least this for overlap.
Should be reduced to 16 for small RNA sequencing.
tpe=f (trimpairsevenly) When kmer right-trimming, trim both
reads to the minimum length of either.
forcetrimleft=0 (ftl) If positive, trim bases to the left of this position
(exclusive, 0-based).
forcetrimright=0 (ftr) If positive, trim bases to the right of this position
(exclusive, 0-based).
forcetrimright2=0 (ftr2) If positive, trim this many bases on the right end.
forcetrimmod=0 (ftm) If positive, right-trim length to be equal to zero,
modulo this number.
restrictleft=0 If positive, only look for kmer matches in the
leftmost X bases.
restrictright=0 If positive, only look for kmer matches in the
rightmost X bases.
mingc=0 Discard reads with GC content below this.
maxgc=1 Discard reads with GC content above this.
gcpairs=t Use average GC of paired reads.
Also affects gchist.
tossjunk=f Discard reads with invalid characters as bases.
swift=f Trim Swift sequences: Trailing C/T/N R1, leading G/A/N R2.
Header-parsing parameters - these require Illumina headers:
chastityfilter=f (cf) Discard reads with id containing ' 1:Y:' or ' 2:Y:'.
barcodefilter=f Remove reads with unexpected barcodes if barcodes is set,
or barcodes containing 'N' otherwise. A barcode must be
the last part of the read header. Values:
t: Remove reads with bad barcodes.
f: Ignore barcodes.
crash: Crash upon encountering bad barcodes.
barcodes= Comma-delimited list of barcodes or files of barcodes.
xmin=-1 If positive, discard reads with a lesser X coordinate.
ymin=-1 If positive, discard reads with a lesser Y coordinate.
xmax=-1 If positive, discard reads with a greater X coordinate.
ymax=-1 If positive, discard reads with a greater Y coordinate.
Polymer trimming:
trimpolya=0 If greater than 0, trim poly-A or poly-T tails of
at least this length on either end of reads.
trimpolygleft=0 If greater than 0, trim poly-G prefixes of at least this
length on the left end of reads. Does not trim poly-C.
trimpolygright=0 If greater than 0, trim poly-G tails of at least this
length on the right end of reads. Does not trim poly-C.
trimpolyg=0 This sets both left and right at once.
filterpolyg=0 If greater than 0, remove reads with a poly-G prefix of
at least this length (on the left).
Note: there are also equivalent poly-C flags.
Polymer tracking:
pratio=base,base 'pratio=G,C' will print the ratio of G to C polymers.
plen=20 Length of homopolymers to count.
Entropy/Complexity parameters:
entropy=-1 Set between 0 and 1 to filter reads with entropy below
that value. Higher is more stringent.
entropywindow=50 Calculate entropy using a sliding window of this length.
entropyk=5 Calculate entropy using kmers of this length.
minbasefrequency=0 Discard reads with a minimum base frequency below this.
entropytrim=f Values:
f: (false) Do not entropy-trim.
r: (right) Trim low entropy on the right end only.
l: (left) Trim low entropy on the left end only.
rl: (both) Trim low entropy on both ends.
entropymask=f Values:
f: (filter) Discard low-entropy sequences.
t: (true) Mask low-entropy parts of sequences with N.
lc: Change low-entropy parts of sequences to lowercase.
entropymark=f Mark each base with its entropy value. This is on a scale
of 0-41 and is reported as quality scores, so the output
should be fastq or fasta+qual.
NOTE: If set, entropytrim overrides entropymask.
Cardinality estimation:
cardinality=f (loglog) Count unique kmers using the LogLog algorithm.
cardinalityout=f (loglogout) Count unique kmers in output reads.
loglogk=31 Use this kmer length for counting.
loglogbuckets=2048 Use this many buckets for counting.
khist=<file> Kmer frequency histogram; plots number of kmers versus
kmer depth. This is approximate.
khistout=<file> Kmer frequency histogram for output reads.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will
specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
JNI="-Djava.library.path=""$DIR""jni/"
JNI=""
z="-Xmx1g"
z2="-Xms1g"
set=0
silent=0
json=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 1400m 42
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
bbduk() {
local CMD="java $EA $EOOM $z $z2 $JNI -cp $CP jgi.BBDuk $@"
if [[ $silent == 0 ]] && [[ $json == 0 ]]; then
echo $CMD >&2
fi
eval $CMD
}
bbduk "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbduk.sh | bbduk.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified May 23, 2014
*** DEPRECATED: This should still work but is no longer maintained. ***
Description: Annotates reads with their kmer depth.
Usage: kmercoverage in=<input> out=<read output> hist=<histogram output>
Input parameters:
in2=null Second input file for paired reads
extra=null Additional files to use for input (generating hash table) but not for output
fastareadlen=2^31 Break up FASTA reads longer than this. Can be useful when processing scaffolded genomes
tablereads=-1 Use at most this many reads when building the hashtable (-1 means all)
kmersample=1 Process every nth kmer, and skip the rest
readsample=1 Process every nth read, and skip the rest
Output parameters:
hist=null Specify a file to output the depth histogram
histlen=10000 Max depth displayed on histogram
reads=-1 Only process this number of reads, then quit (-1 means all)
sampleoutput=t Use sampling on output as well as input (not used if sample rates are 1)
printcoverage=f Only print coverage information instead of reads
useheader=f Append coverage info to the read's header
minmedian=0 Don't output reads with median coverage below this
minaverage=0 Don't output reads with average coverage below this
zerobin=f Set to true if you want kmers with a count of 0 to go in the 0 bin instead of the 1 bin in histograms.
Default is false, to prevent confusion about how there can be 0-count kmers.
The reason is that based on the 'minq' and 'minprob' settings, some kmers may be excluded from the bloom filter.
Hashing parameters:
k=31 Kmer length (values under 32 are most efficient, but arbitrarily high values are supported)
cbits=8 Bits per cell in bloom filter; must be 2, 4, 8, 16, or 32. Maximum kmer depth recorded is 2^cbits.
Large values decrease accuracy for a fixed amount of memory.
hashes=4 Number of times a kmer is hashed. Higher is slower.
Higher is MORE accurate if there is enough memory, and LESS accurate if there is not enough memory.
prefilter=f True is slower, but generally more accurate; filters out low-depth kmers from the main hashtable.
prehashes=2 Number of hashes for prefilter.
passes=1 More passes can sometimes increase accuracy by iteratively removing low-depth kmers
minq=7 Ignore kmers containing bases with quality below this
minprob=0.5 Ignore kmers with overall probability of correctness below this
threads=X Spawn exactly X hashing threads (default is number of logical processors). Total active threads may exceed X by up to 4.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
kmercoverage() {
local CMD="java $EA $EOOM $z -cp $CP jgi.KmerCoverage prefilter=true bits=16 interleaved=false $@"
echo $CMD >&2
eval $CMD
}
kmercoverage "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/kmercoverage.sh | kmercoverage.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 5, 2020
Description: Aligns a query sequence to reference sequences.
Outputs the best matching position per reference sequence.
If there are multiple queries, only the best-matching query will be used.
MSA in this context stands for MultiStateAligner, not Multiple Sequence Alignment.
Usage:
msa.sh in=<file> out=<file> literal=<literal,literal,...>
or
msa.sh in=<file> out=<file> ref=<lfile>
Parameters:
in=<file> File containing reads.
out=<file> Sam output file.
literal= A sequence of bases to match, or a comma-delimited list.
ref=<file> A fasta file of bases to match. Please set either ref
or literal, not both.
rcomp=t Also look for reverse-complements of the sequences.
addr=f Add r_ prefix to reverse-complemented alignments.
replicate=f Make copies of sequences with undefined bases for every
possible combination. For example, ATN would expand to
ATA, ATC, ATG, and ATT.
cutoff=0 Ignore alignments with identity below this (range 0-1).
swap=f Swap the reference and query; e.g., report read alignments
to the reference instead of reference alignments to the reads.
Java Parameters:
-Xmx This will set Java's memory usage, overriding automatic
memory detection. -Xmx20g will specify
20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 2000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
msa() {
local CMD="java $EA $EOOM $z -cp $CP jgi.FindPrimers $@"
echo $CMD >&2
eval $CMD
}
msa "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/msa.sh | msa.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified December 4, 2019
Description: Aligns SSUs to each other and reports identity.
This requires sequences annotated with a taxID in their header.
Usage: comparessu.sh in=<input file> out=<output file>
Input may be fasta or fastq, compressed or uncompressed.
Standard parameters:
in=<file> Input sequences.
out=<file> Output data.
t= Set the number of threads; default is logical processors.
overwrite=f (ow) Set to false to force the program to abort rather than
overwrite an existing file.
showspeed=t (ss) Set to 'f' to suppress display of processing speed.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression
level; lower compression is faster.
reads=-1 If positive, quit after this many sequences.
Processing parameters:
ata=f Do an all-to-all comparison. Otherwise, each sequence will
only be compared to one other randomly-selected sequence
per taxonomic level.
minlen=0 Ignore sequences shorter than this.
maxlen=BIG Ignore sequences longer than this.
maxns=-1 If positive, ignore sequences with more than this many Ns.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
comparessu() {
local CMD="java $EA $EOOM $z -cp $CP sketch.CompareSSU $@"
echo $CMD >&2
eval $CMD
}
comparessu "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/comparessu.sh | comparessu.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified February 17, 2015
Description: Separates paired reads into files of 'good' pairs and 'good' singletons by removing 'bad' reads that are shorter than a min length.
Designed to handle situations where reads become too short to be useful after trimming. This program also optionally performs quality trimming.
Usage: bbsplitpairs.sh in=<input file> out=<pair output file> outs=<singleton output file> minlen=<minimum read length, an integer>
Input may be fasta or fastq, compressed or uncompressed.
Optional parameters (and their defaults)
in=<file> The 'in=' flag is needed if the input file is not the first parameter. 'in=stdin' will pipe from standard in.
in2=<file> Use this if 2nd read of pairs are in a different file.
out=<file> The 'out=' flag is needed if the output file is not the second parameter. 'out=stdout' will pipe to standard out.
out2=<file> Use this to write 2nd read of pairs to a different file.
outsingle=<file> (outs) Write singleton reads here.
overwrite=t (ow) Set to false to force the program to abort rather than overwrite an existing file.
showspeed=t (ss) Set to 'f' to suppress display of processing speed.
interleaved=auto (int) If true, forces fastq input to be paired and interleaved.
qtrim=f Trim read ends to remove bases with quality below trimq.
Values: rl (trim both ends), f (neither end), r (right end only), l (left end only).
trimq=6 Trim quality threshold.
minlen=20 (ml) Reads shorter than this after trimming will be discarded.
ziplevel=2 (zl) Set to 1 (lowest) through 9 (max) to change compression level; lower compression is faster.
fixinterleaving=f (fint) Fixes corrupted interleaved files by examining pair names. Only use on files with broken interleaving.
repair=f (rp) Fixes arbitrarily corrupted paired reads by examining read names. High memory.
ain=f (allowidenticalnames) When detecting pair names, allows identical names, instead of requiring /1 and /2 or 1: and 2:
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx200m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
splitpairs() {
local CMD="java $EA $EOOM $z -cp $CP jgi.SplitPairsAndSingles $@"
echo $CMD >&2
eval $CMD
}
splitpairs "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/bbsplitpairs.sh | bbsplitpairs.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified October 24, 2019
Description: Generates a set of kmers such that every input sequence will
contain at least one kmer in the set. This is a greedy algorithm which
retains the top X most common kmers each pass, and removes the sequences
matching those kmers, so subsequent passes are faster.
This will not generate an optimally small set but the output will be
quite small. The file size may be further decreased with kcompress.sh.
Usage: kmerfilterset.sh in=<input file> out=<output file> k=<integer>
File parameters:
in=<file> Primary input.
out=<file> Primary output.
temp=<file> Temporary file pattern (optional). Must contain # symbol.
initial=<file> Initial kmer set (optional). This can be used to accelerate
the process.
Processing parameters:
k=31 Kmer length.
rcomp=t Consider forward and reverse-complement kmers identical.
minkpp=1 (minkmersperpass) Retain at least this many kmers per pass.
Higher is faster but results in a larger set.
maxkpp=2 (maxkmersperpass) Retain at most this many kmers per pass;
0 means unlimited.
mincount=1 Ignore kmers seen fewer than this many times in this pass.
maxpasses=3000 Don't run more than this many passes.
maxns=BIG Ignore sequences with more than this many Ns.
minlen=0 Ignore sequences shorter than this.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will
specify 200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1000m"
z2="-Xms1000m"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
kmerlimit() {
local CMD="java $EA $EOOM $z -cp $CP jgi.KmerFilterSetMaker $@"
echo $CMD >&2
eval $CMD
}
kmerlimit "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/kmerfilterset.sh | kmerfilterset.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified Jan 7, 2020
Description: Prints the full taxonomy of a string.
String may be a gi number, NCBI taxID, or Latin name.
An NCBI identifier should just be a number or ncbi|number.
A gi number should be gi|number.
Please read bbmap/docs/guides/TaxonomyGuide.txt for more information.
Not: It is more convenient to use taxonomy.jgi-psf.org.
Usage: taxonomy.sh tree=<tree file> <identifier>
Alternate usage: taxonomy.sh tree=<tree file> in=<file>
Usage examples:
taxonomy.sh tree=tree.taxtree.gz homo_sapiens canis_lupus 9606
taxonomy.sh tree=tree.taxtree.gz gi=gitable.int1.d.gz in=refseq.fasta
Processing parameters:
in=<file> A file containing named sequences, or just the names.
out=<file> Output file. If blank, use stdout.
tree=<file> Specify a TaxTree file like tree.taxtree.gz.
On Genepool, use 'auto'.
gi=<file> Specify a gitable file like gitable.int1d.gz. Only needed
if gi numbers will be used. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to taxid
files. Only needed if accesions will be used; requires ~45GB
of memory. On Genepool, use 'auto'.
level=null Set to a taxonomic level like phylum to just print that level.
minlevel=-1 For multi-level printing, do not print levels below this.
maxlevel=life For multi-level printing, do not print levels above this.
silva=f Parse headers using Silva or semicolon-delimited syntax.
taxpath=auto Set the path to taxonomy files; auto only works at NERSC.
Parameters without an '=' symbol will be considered organism identifiers.
* Note *
Tree and table files are in /global/projectb/sandbox/gaag/bbtools/tax
For non-Genepool users, or to make new ones, use taxtree.sh and gitable.sh
Java Parameters:
-Xmx This will set Java's memory usage,
overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify
200 megs. The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an out-of-memory
exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx8g"
z2="-Xms8g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 2000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
taxonomy() {
local CMD="java $EA $EOOM $z -cp $CP tax.PrintTaxonomy $@"
echo $CMD >&2
eval $CMD
}
taxonomy "$@" | ARGs-OAP | /ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/taxonomy.sh | taxonomy.sh |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.