repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
cgarciae/phi
phi/dsl.py
Expression.Seq
def Seq(self, *sequence, **kwargs): """ `Seq` is used to express function composition. The expression Seq(f, g) be equivalent to lambda x: g(f(x)) As you see, its a little different from the mathematical definition. Excecution order flow from left to right, this makes reading and reasoning about code way more easy. This bahaviour is based upon the `|>` (pipe) operator found in languages like F#, Elixir and Elm. You can pack as many expressions as you like and they will be applied in order to the data that is passed through them when compiled an excecuted. In general, the following rules apply for Seq: **General Sequence** Seq(f0, f1, ..., fn-1, fn) is equivalent to lambda x: fn(fn-1(...(f1(f0(x))))) **Single Function** Seq(f) is equivalent to f **Identity** The empty Seq Seq() is equivalent to lambda x: x ### Examples from phi import P, Seq f = Seq( P * 2, P + 1, P ** 2 ) assert f(1) == 9 # ((1 * 2) + 1) ** 2 The previous example using `P.Pipe` from phi import P assert 9 == P.Pipe( 1, P * 2, #1 * 2 == 2 P + 1, #2 + 1 == 3 P ** 2 #3 ** 2 == 9 ) """ fs = [ _parse(elem)._f for elem in sequence ] def g(x, state): return functools.reduce(lambda args, f: f(*args), fs, (x, state)) return self.__then__(g, **kwargs)
python
def Seq(self, *sequence, **kwargs): """ `Seq` is used to express function composition. The expression Seq(f, g) be equivalent to lambda x: g(f(x)) As you see, its a little different from the mathematical definition. Excecution order flow from left to right, this makes reading and reasoning about code way more easy. This bahaviour is based upon the `|>` (pipe) operator found in languages like F#, Elixir and Elm. You can pack as many expressions as you like and they will be applied in order to the data that is passed through them when compiled an excecuted. In general, the following rules apply for Seq: **General Sequence** Seq(f0, f1, ..., fn-1, fn) is equivalent to lambda x: fn(fn-1(...(f1(f0(x))))) **Single Function** Seq(f) is equivalent to f **Identity** The empty Seq Seq() is equivalent to lambda x: x ### Examples from phi import P, Seq f = Seq( P * 2, P + 1, P ** 2 ) assert f(1) == 9 # ((1 * 2) + 1) ** 2 The previous example using `P.Pipe` from phi import P assert 9 == P.Pipe( 1, P * 2, #1 * 2 == 2 P + 1, #2 + 1 == 3 P ** 2 #3 ** 2 == 9 ) """ fs = [ _parse(elem)._f for elem in sequence ] def g(x, state): return functools.reduce(lambda args, f: f(*args), fs, (x, state)) return self.__then__(g, **kwargs)
[ "def", "Seq", "(", "self", ",", "*", "sequence", ",", "*", "*", "kwargs", ")", ":", "fs", "=", "[", "_parse", "(", "elem", ")", ".", "_f", "for", "elem", "in", "sequence", "]", "def", "g", "(", "x", ",", "state", ")", ":", "return", "functools", ".", "reduce", "(", "lambda", "args", ",", "f", ":", "f", "(", "*", "args", ")", ",", "fs", ",", "(", "x", ",", "state", ")", ")", "return", "self", ".", "__then__", "(", "g", ",", "*", "*", "kwargs", ")" ]
`Seq` is used to express function composition. The expression Seq(f, g) be equivalent to lambda x: g(f(x)) As you see, its a little different from the mathematical definition. Excecution order flow from left to right, this makes reading and reasoning about code way more easy. This bahaviour is based upon the `|>` (pipe) operator found in languages like F#, Elixir and Elm. You can pack as many expressions as you like and they will be applied in order to the data that is passed through them when compiled an excecuted. In general, the following rules apply for Seq: **General Sequence** Seq(f0, f1, ..., fn-1, fn) is equivalent to lambda x: fn(fn-1(...(f1(f0(x))))) **Single Function** Seq(f) is equivalent to f **Identity** The empty Seq Seq() is equivalent to lambda x: x ### Examples from phi import P, Seq f = Seq( P * 2, P + 1, P ** 2 ) assert f(1) == 9 # ((1 * 2) + 1) ** 2 The previous example using `P.Pipe` from phi import P assert 9 == P.Pipe( 1, P * 2, #1 * 2 == 2 P + 1, #2 + 1 == 3 P ** 2 #3 ** 2 == 9 )
[ "Seq", "is", "used", "to", "express", "function", "composition", ".", "The", "expression" ]
train
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L801-L869
cgarciae/phi
phi/dsl.py
Expression.With
def With(self, context_manager, *body, **kwargs): """ **With** def With(context_manager, *body): **Arguments** * **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager. * ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed. As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With` **Context** Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function. ### Examples from phi import P, Obj, Context, With, Pipe text = Pipe( "text.txt", With( open, Context, Obj.read() ) ) The previous is equivalent to with open("text.txt") as f: text = f.read() """ context_f = _parse(context_manager)._f body_f = E.Seq(*body)._f def g(x, state): context, state = context_f(x, state) with context as scope: with _WithContextManager(scope): return body_f(x, state) return self.__then__(g, **kwargs)
python
def With(self, context_manager, *body, **kwargs): """ **With** def With(context_manager, *body): **Arguments** * **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager. * ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed. As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With` **Context** Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function. ### Examples from phi import P, Obj, Context, With, Pipe text = Pipe( "text.txt", With( open, Context, Obj.read() ) ) The previous is equivalent to with open("text.txt") as f: text = f.read() """ context_f = _parse(context_manager)._f body_f = E.Seq(*body)._f def g(x, state): context, state = context_f(x, state) with context as scope: with _WithContextManager(scope): return body_f(x, state) return self.__then__(g, **kwargs)
[ "def", "With", "(", "self", ",", "context_manager", ",", "*", "body", ",", "*", "*", "kwargs", ")", ":", "context_f", "=", "_parse", "(", "context_manager", ")", ".", "_f", "body_f", "=", "E", ".", "Seq", "(", "*", "body", ")", ".", "_f", "def", "g", "(", "x", ",", "state", ")", ":", "context", ",", "state", "=", "context_f", "(", "x", ",", "state", ")", "with", "context", "as", "scope", ":", "with", "_WithContextManager", "(", "scope", ")", ":", "return", "body_f", "(", "x", ",", "state", ")", "return", "self", ".", "__then__", "(", "g", ",", "*", "*", "kwargs", ")" ]
**With** def With(context_manager, *body): **Arguments** * **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager. * ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed. As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With` **Context** Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function. ### Examples from phi import P, Obj, Context, With, Pipe text = Pipe( "text.txt", With( open, Context, Obj.read() ) ) The previous is equivalent to with open("text.txt") as f: text = f.read()
[ "**", "With", "**" ]
train
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L930-L973
cgarciae/phi
phi/dsl.py
Expression.ReadList
def ReadList(self, *branches, **kwargs): """ Same as `phi.dsl.Expression.List` but any string argument `x` is translated to `Read(x)`. """ branches = map(lambda x: E.Read(x) if isinstance(x, str) else x, branches) return self.List(*branches, **kwargs)
python
def ReadList(self, *branches, **kwargs): """ Same as `phi.dsl.Expression.List` but any string argument `x` is translated to `Read(x)`. """ branches = map(lambda x: E.Read(x) if isinstance(x, str) else x, branches) return self.List(*branches, **kwargs)
[ "def", "ReadList", "(", "self", ",", "*", "branches", ",", "*", "*", "kwargs", ")", ":", "branches", "=", "map", "(", "lambda", "x", ":", "E", ".", "Read", "(", "x", ")", "if", "isinstance", "(", "x", ",", "str", ")", "else", "x", ",", "branches", ")", "return", "self", ".", "List", "(", "*", "branches", ",", "*", "*", "kwargs", ")" ]
Same as `phi.dsl.Expression.List` but any string argument `x` is translated to `Read(x)`.
[ "Same", "as", "phi", ".", "dsl", ".", "Expression", ".", "List", "but", "any", "string", "argument", "x", "is", "translated", "to", "Read", "(", "x", ")", "." ]
train
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L1021-L1027
cgarciae/phi
phi/dsl.py
Expression.Write
def Write(self, *state_args, **state_dict): """See `phi.dsl.Expression.Read`""" if len(state_dict) + len(state_args) < 1: raise Exception("Please include at-least 1 state variable, got {0} and {1}".format(state_args, state_dict)) if len(state_dict) > 1: raise Exception("Please include at-most 1 keyword argument expression, got {0}".format(state_dict)) if len(state_dict) > 0: state_key = next(iter(state_dict.keys())) write_expr = state_dict[state_key] state_args += (state_key,) expr = self >> write_expr else: expr = self def g(x, state): update = { key: x for key in state_args } state = utils.merge(state, update) #side effect for convenience _StateContextManager.REFS.update(state) return x, state return expr.__then__(g)
python
def Write(self, *state_args, **state_dict): """See `phi.dsl.Expression.Read`""" if len(state_dict) + len(state_args) < 1: raise Exception("Please include at-least 1 state variable, got {0} and {1}".format(state_args, state_dict)) if len(state_dict) > 1: raise Exception("Please include at-most 1 keyword argument expression, got {0}".format(state_dict)) if len(state_dict) > 0: state_key = next(iter(state_dict.keys())) write_expr = state_dict[state_key] state_args += (state_key,) expr = self >> write_expr else: expr = self def g(x, state): update = { key: x for key in state_args } state = utils.merge(state, update) #side effect for convenience _StateContextManager.REFS.update(state) return x, state return expr.__then__(g)
[ "def", "Write", "(", "self", ",", "*", "state_args", ",", "*", "*", "state_dict", ")", ":", "if", "len", "(", "state_dict", ")", "+", "len", "(", "state_args", ")", "<", "1", ":", "raise", "Exception", "(", "\"Please include at-least 1 state variable, got {0} and {1}\"", ".", "format", "(", "state_args", ",", "state_dict", ")", ")", "if", "len", "(", "state_dict", ")", ">", "1", ":", "raise", "Exception", "(", "\"Please include at-most 1 keyword argument expression, got {0}\"", ".", "format", "(", "state_dict", ")", ")", "if", "len", "(", "state_dict", ")", ">", "0", ":", "state_key", "=", "next", "(", "iter", "(", "state_dict", ".", "keys", "(", ")", ")", ")", "write_expr", "=", "state_dict", "[", "state_key", "]", "state_args", "+=", "(", "state_key", ",", ")", "expr", "=", "self", ">>", "write_expr", "else", ":", "expr", "=", "self", "def", "g", "(", "x", ",", "state", ")", ":", "update", "=", "{", "key", ":", "x", "for", "key", "in", "state_args", "}", "state", "=", "utils", ".", "merge", "(", "state", ",", "update", ")", "#side effect for convenience", "_StateContextManager", ".", "REFS", ".", "update", "(", "state", ")", "return", "x", ",", "state", "return", "expr", ".", "__then__", "(", "g", ")" ]
See `phi.dsl.Expression.Read`
[ "See", "phi", ".", "dsl", ".", "Expression", ".", "Read" ]
train
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L1030-L1061
cgarciae/phi
phi/dsl.py
Expression.Val
def Val(self, val, **kwargs): """ The expression Val(a) is equivalent to the constant function lambda x: a All expression in this module interprete values that are not functions as constant functions using `Val`, for example Seq(1, P + 1) is equivalent to Seq(Val(1), P + 1) The previous expression as a whole is a constant function since it will return `2` no matter what input you give it. """ f = utils.lift(lambda z: val) return self.__then__(f, **kwargs)
python
def Val(self, val, **kwargs): """ The expression Val(a) is equivalent to the constant function lambda x: a All expression in this module interprete values that are not functions as constant functions using `Val`, for example Seq(1, P + 1) is equivalent to Seq(Val(1), P + 1) The previous expression as a whole is a constant function since it will return `2` no matter what input you give it. """ f = utils.lift(lambda z: val) return self.__then__(f, **kwargs)
[ "def", "Val", "(", "self", ",", "val", ",", "*", "*", "kwargs", ")", ":", "f", "=", "utils", ".", "lift", "(", "lambda", "z", ":", "val", ")", "return", "self", ".", "__then__", "(", "f", ",", "*", "*", "kwargs", ")" ]
The expression Val(a) is equivalent to the constant function lambda x: a All expression in this module interprete values that are not functions as constant functions using `Val`, for example Seq(1, P + 1) is equivalent to Seq(Val(1), P + 1) The previous expression as a whole is a constant function since it will return `2` no matter what input you give it.
[ "The", "expression" ]
train
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L1181-L1203
cgarciae/phi
phi/dsl.py
Expression.If
def If(self, condition, *then, **kwargs): """ **If** If(Predicate, *Then) Having conditionals expressions a necesity in every language, Phi includes the `If` expression for such a purpose. **Arguments** * **Predicate** : a predicate expression uses to determine if the `Then` or `Else` branches should be used. * ***Then** : an expression to be excecuted if the `Predicate` yields `True`, since this parameter is variadic you can stack expression and they will be interpreted as a tuple `phi.dsl.Seq`. This class also includes the `Elif` and `Else` methods which let you write branched conditionals in sequence, however the following rules apply * If no branch is entered the whole expression behaves like the identity * `Elif` can only be used after an `If` or another `Elif` expression * Many `Elif` expressions can be stacked sequentially * `Else` can only be used after an `If` or `Elif` expression ** Examples ** from phi import P, If assert "Between 2 and 10" == P.Pipe( 5, If(P > 10, "Greater than 10" ).Elif(P < 2, "Less than 2" ).Else( "Between 2 and 10" ) ) """ cond_f = _parse(condition)._f then_f = E.Seq(*then)._f else_f = utils.state_identity ast = (cond_f, then_f, else_f) g = _compile_if(ast) expr = self.__then__(g, **kwargs) expr._ast = ast expr._root = self return expr
python
def If(self, condition, *then, **kwargs): """ **If** If(Predicate, *Then) Having conditionals expressions a necesity in every language, Phi includes the `If` expression for such a purpose. **Arguments** * **Predicate** : a predicate expression uses to determine if the `Then` or `Else` branches should be used. * ***Then** : an expression to be excecuted if the `Predicate` yields `True`, since this parameter is variadic you can stack expression and they will be interpreted as a tuple `phi.dsl.Seq`. This class also includes the `Elif` and `Else` methods which let you write branched conditionals in sequence, however the following rules apply * If no branch is entered the whole expression behaves like the identity * `Elif` can only be used after an `If` or another `Elif` expression * Many `Elif` expressions can be stacked sequentially * `Else` can only be used after an `If` or `Elif` expression ** Examples ** from phi import P, If assert "Between 2 and 10" == P.Pipe( 5, If(P > 10, "Greater than 10" ).Elif(P < 2, "Less than 2" ).Else( "Between 2 and 10" ) ) """ cond_f = _parse(condition)._f then_f = E.Seq(*then)._f else_f = utils.state_identity ast = (cond_f, then_f, else_f) g = _compile_if(ast) expr = self.__then__(g, **kwargs) expr._ast = ast expr._root = self return expr
[ "def", "If", "(", "self", ",", "condition", ",", "*", "then", ",", "*", "*", "kwargs", ")", ":", "cond_f", "=", "_parse", "(", "condition", ")", ".", "_f", "then_f", "=", "E", ".", "Seq", "(", "*", "then", ")", ".", "_f", "else_f", "=", "utils", ".", "state_identity", "ast", "=", "(", "cond_f", ",", "then_f", ",", "else_f", ")", "g", "=", "_compile_if", "(", "ast", ")", "expr", "=", "self", ".", "__then__", "(", "g", ",", "*", "*", "kwargs", ")", "expr", ".", "_ast", "=", "ast", "expr", ".", "_root", "=", "self", "return", "expr" ]
**If** If(Predicate, *Then) Having conditionals expressions a necesity in every language, Phi includes the `If` expression for such a purpose. **Arguments** * **Predicate** : a predicate expression uses to determine if the `Then` or `Else` branches should be used. * ***Then** : an expression to be excecuted if the `Predicate` yields `True`, since this parameter is variadic you can stack expression and they will be interpreted as a tuple `phi.dsl.Seq`. This class also includes the `Elif` and `Else` methods which let you write branched conditionals in sequence, however the following rules apply * If no branch is entered the whole expression behaves like the identity * `Elif` can only be used after an `If` or another `Elif` expression * Many `Elif` expressions can be stacked sequentially * `Else` can only be used after an `If` or `Elif` expression ** Examples ** from phi import P, If assert "Between 2 and 10" == P.Pipe( 5, If(P > 10, "Greater than 10" ).Elif(P < 2, "Less than 2" ).Else( "Between 2 and 10" ) )
[ "**", "If", "**" ]
train
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L1206-L1253
cgarciae/phi
phi/dsl.py
Expression.Else
def Else(self, *Else, **kwargs): """See `phi.dsl.Expression.If`""" root = self._root ast = self._ast next_else = E.Seq(*Else)._f ast = _add_else(ast, next_else) g = _compile_if(ast) return root.__then__(g, **kwargs)
python
def Else(self, *Else, **kwargs): """See `phi.dsl.Expression.If`""" root = self._root ast = self._ast next_else = E.Seq(*Else)._f ast = _add_else(ast, next_else) g = _compile_if(ast) return root.__then__(g, **kwargs)
[ "def", "Else", "(", "self", ",", "*", "Else", ",", "*", "*", "kwargs", ")", ":", "root", "=", "self", ".", "_root", "ast", "=", "self", ".", "_ast", "next_else", "=", "E", ".", "Seq", "(", "*", "Else", ")", ".", "_f", "ast", "=", "_add_else", "(", "ast", ",", "next_else", ")", "g", "=", "_compile_if", "(", "ast", ")", "return", "root", ".", "__then__", "(", "g", ",", "*", "*", "kwargs", ")" ]
See `phi.dsl.Expression.If`
[ "See", "phi", ".", "dsl", ".", "Expression", ".", "If" ]
train
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L1255-L1265
alvinlindstam/grapheme
grapheme/api.py
length
def length(string, until=None): """ Returns the number of graphemes in the string. Note that this functions needs to traverse the full string to calculate the length, unlike `len(string)` and it's time consumption is linear to the length of the string (up to the `until` value). Only counts up to the `until` argument, if given. This is useful when testing the length of a string against some limit and the excess length is not interesting. >>> rainbow_flag = "🏳️‍🌈" >>> len(rainbow_flag) 4 >>> graphemes.length(rainbow_flag) 1 >>> graphemes.length("".join(str(i) for i in range(100)), 30) 30 """ if until is None: return sum(1 for _ in GraphemeIterator(string)) iterator = graphemes(string) count = 0 while True: try: if count >= until: break next(iterator) except StopIteration: break else: count += 1 return count
python
def length(string, until=None): """ Returns the number of graphemes in the string. Note that this functions needs to traverse the full string to calculate the length, unlike `len(string)` and it's time consumption is linear to the length of the string (up to the `until` value). Only counts up to the `until` argument, if given. This is useful when testing the length of a string against some limit and the excess length is not interesting. >>> rainbow_flag = "🏳️‍🌈" >>> len(rainbow_flag) 4 >>> graphemes.length(rainbow_flag) 1 >>> graphemes.length("".join(str(i) for i in range(100)), 30) 30 """ if until is None: return sum(1 for _ in GraphemeIterator(string)) iterator = graphemes(string) count = 0 while True: try: if count >= until: break next(iterator) except StopIteration: break else: count += 1 return count
[ "def", "length", "(", "string", ",", "until", "=", "None", ")", ":", "if", "until", "is", "None", ":", "return", "sum", "(", "1", "for", "_", "in", "GraphemeIterator", "(", "string", ")", ")", "iterator", "=", "graphemes", "(", "string", ")", "count", "=", "0", "while", "True", ":", "try", ":", "if", "count", ">=", "until", ":", "break", "next", "(", "iterator", ")", "except", "StopIteration", ":", "break", "else", ":", "count", "+=", "1", "return", "count" ]
Returns the number of graphemes in the string. Note that this functions needs to traverse the full string to calculate the length, unlike `len(string)` and it's time consumption is linear to the length of the string (up to the `until` value). Only counts up to the `until` argument, if given. This is useful when testing the length of a string against some limit and the excess length is not interesting. >>> rainbow_flag = "🏳️‍🌈" >>> len(rainbow_flag) 4 >>> graphemes.length(rainbow_flag) 1 >>> graphemes.length("".join(str(i) for i in range(100)), 30) 30
[ "Returns", "the", "number", "of", "graphemes", "in", "the", "string", "." ]
train
https://github.com/alvinlindstam/grapheme/blob/45cd9b8326ddf96d2618406724a7ebf273cbde03/grapheme/api.py#L21-L55
alvinlindstam/grapheme
grapheme/api.py
slice
def slice(string, start=None, end=None): """ Returns a substring of the given string, counting graphemes instead of codepoints. Negative indices is currently not supported. >>> string = "tamil நி (ni)" >>> string[:7] 'tamil ந' >>> grapheme.slice(string, end=7) 'tamil நி' >>> string[7:] 'ி (ni)' >>> grapheme.slice(string, 7) ' (ni)' """ if start is None: start = 0 if end is not None and start >= end: return "" if start < 0: raise NotImplementedError("Negative indexing is currently not supported.") sum_ = 0 start_index = None for grapheme_index, grapheme_length in enumerate(grapheme_lengths(string)): if grapheme_index == start: start_index = sum_ elif grapheme_index == end: return string[start_index:sum_] sum_ += grapheme_length if start_index is not None: return string[start_index:] return ""
python
def slice(string, start=None, end=None): """ Returns a substring of the given string, counting graphemes instead of codepoints. Negative indices is currently not supported. >>> string = "tamil நி (ni)" >>> string[:7] 'tamil ந' >>> grapheme.slice(string, end=7) 'tamil நி' >>> string[7:] 'ி (ni)' >>> grapheme.slice(string, 7) ' (ni)' """ if start is None: start = 0 if end is not None and start >= end: return "" if start < 0: raise NotImplementedError("Negative indexing is currently not supported.") sum_ = 0 start_index = None for grapheme_index, grapheme_length in enumerate(grapheme_lengths(string)): if grapheme_index == start: start_index = sum_ elif grapheme_index == end: return string[start_index:sum_] sum_ += grapheme_length if start_index is not None: return string[start_index:] return ""
[ "def", "slice", "(", "string", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "start", "is", "None", ":", "start", "=", "0", "if", "end", "is", "not", "None", "and", "start", ">=", "end", ":", "return", "\"\"", "if", "start", "<", "0", ":", "raise", "NotImplementedError", "(", "\"Negative indexing is currently not supported.\"", ")", "sum_", "=", "0", "start_index", "=", "None", "for", "grapheme_index", ",", "grapheme_length", "in", "enumerate", "(", "grapheme_lengths", "(", "string", ")", ")", ":", "if", "grapheme_index", "==", "start", ":", "start_index", "=", "sum_", "elif", "grapheme_index", "==", "end", ":", "return", "string", "[", "start_index", ":", "sum_", "]", "sum_", "+=", "grapheme_length", "if", "start_index", "is", "not", "None", ":", "return", "string", "[", "start_index", ":", "]", "return", "\"\"" ]
Returns a substring of the given string, counting graphemes instead of codepoints. Negative indices is currently not supported. >>> string = "tamil நி (ni)" >>> string[:7] 'tamil ந' >>> grapheme.slice(string, end=7) 'tamil நி' >>> string[7:] 'ி (ni)' >>> grapheme.slice(string, 7) ' (ni)'
[ "Returns", "a", "substring", "of", "the", "given", "string", "counting", "graphemes", "instead", "of", "codepoints", "." ]
train
https://github.com/alvinlindstam/grapheme/blob/45cd9b8326ddf96d2618406724a7ebf273cbde03/grapheme/api.py#L66-L103
alvinlindstam/grapheme
grapheme/api.py
contains
def contains(string, substring): """ Returns true if the sequence of graphemes in substring is also present in string. This differs from the normal python `in` operator, since the python operator will return true if the sequence of codepoints are withing the other string without considering grapheme boundaries. Performance notes: Very fast if `substring not in string`, since that also means that the same graphemes can not be in the two strings. Otherwise this function has linear time complexity in relation to the string length. It will traverse the sequence of graphemes until a match is found, so it will generally perform better for grapheme sequences that match early. >>> "🇸🇪" in "🇪🇸🇪🇪" True >>> grapheme.contains("🇪🇸🇪🇪", "🇸🇪") False """ if substring not in string: return False substr_graphemes = list(graphemes(substring)) if len(substr_graphemes) == 0: return True elif len(substr_graphemes) == 1: return substr_graphemes[0] in graphemes(string) else: str_iter = graphemes(string) str_sub_part = [] for _ in range(len(substr_graphemes)): try: str_sub_part.append(next(str_iter)) except StopIteration: return False for g in str_iter: if str_sub_part == substr_graphemes: return True str_sub_part.append(g) str_sub_part.pop(0) return str_sub_part == substr_graphemes
python
def contains(string, substring): """ Returns true if the sequence of graphemes in substring is also present in string. This differs from the normal python `in` operator, since the python operator will return true if the sequence of codepoints are withing the other string without considering grapheme boundaries. Performance notes: Very fast if `substring not in string`, since that also means that the same graphemes can not be in the two strings. Otherwise this function has linear time complexity in relation to the string length. It will traverse the sequence of graphemes until a match is found, so it will generally perform better for grapheme sequences that match early. >>> "🇸🇪" in "🇪🇸🇪🇪" True >>> grapheme.contains("🇪🇸🇪🇪", "🇸🇪") False """ if substring not in string: return False substr_graphemes = list(graphemes(substring)) if len(substr_graphemes) == 0: return True elif len(substr_graphemes) == 1: return substr_graphemes[0] in graphemes(string) else: str_iter = graphemes(string) str_sub_part = [] for _ in range(len(substr_graphemes)): try: str_sub_part.append(next(str_iter)) except StopIteration: return False for g in str_iter: if str_sub_part == substr_graphemes: return True str_sub_part.append(g) str_sub_part.pop(0) return str_sub_part == substr_graphemes
[ "def", "contains", "(", "string", ",", "substring", ")", ":", "if", "substring", "not", "in", "string", ":", "return", "False", "substr_graphemes", "=", "list", "(", "graphemes", "(", "substring", ")", ")", "if", "len", "(", "substr_graphemes", ")", "==", "0", ":", "return", "True", "elif", "len", "(", "substr_graphemes", ")", "==", "1", ":", "return", "substr_graphemes", "[", "0", "]", "in", "graphemes", "(", "string", ")", "else", ":", "str_iter", "=", "graphemes", "(", "string", ")", "str_sub_part", "=", "[", "]", "for", "_", "in", "range", "(", "len", "(", "substr_graphemes", ")", ")", ":", "try", ":", "str_sub_part", ".", "append", "(", "next", "(", "str_iter", ")", ")", "except", "StopIteration", ":", "return", "False", "for", "g", "in", "str_iter", ":", "if", "str_sub_part", "==", "substr_graphemes", ":", "return", "True", "str_sub_part", ".", "append", "(", "g", ")", "str_sub_part", ".", "pop", "(", "0", ")", "return", "str_sub_part", "==", "substr_graphemes" ]
Returns true if the sequence of graphemes in substring is also present in string. This differs from the normal python `in` operator, since the python operator will return true if the sequence of codepoints are withing the other string without considering grapheme boundaries. Performance notes: Very fast if `substring not in string`, since that also means that the same graphemes can not be in the two strings. Otherwise this function has linear time complexity in relation to the string length. It will traverse the sequence of graphemes until a match is found, so it will generally perform better for grapheme sequences that match early. >>> "🇸🇪" in "🇪🇸🇪🇪" True >>> grapheme.contains("🇪🇸🇪🇪", "🇸🇪") False
[ "Returns", "true", "if", "the", "sequence", "of", "graphemes", "in", "substring", "is", "also", "present", "in", "string", "." ]
train
https://github.com/alvinlindstam/grapheme/blob/45cd9b8326ddf96d2618406724a7ebf273cbde03/grapheme/api.py#L105-L147
alvinlindstam/grapheme
grapheme/api.py
startswith
def startswith(string, prefix): """ Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes. str.startswith may return true for a prefix that is not visually represented as a prefix if a grapheme cluster is continued after the prefix ends. >>> grapheme.startswith("✊🏾", "✊") False >>> "✊🏾".startswith("✊") True """ return string.startswith(prefix) and safe_split_index(string, len(prefix)) == len(prefix)
python
def startswith(string, prefix): """ Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes. str.startswith may return true for a prefix that is not visually represented as a prefix if a grapheme cluster is continued after the prefix ends. >>> grapheme.startswith("✊🏾", "✊") False >>> "✊🏾".startswith("✊") True """ return string.startswith(prefix) and safe_split_index(string, len(prefix)) == len(prefix)
[ "def", "startswith", "(", "string", ",", "prefix", ")", ":", "return", "string", ".", "startswith", "(", "prefix", ")", "and", "safe_split_index", "(", "string", ",", "len", "(", "prefix", ")", ")", "==", "len", "(", "prefix", ")" ]
Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes. str.startswith may return true for a prefix that is not visually represented as a prefix if a grapheme cluster is continued after the prefix ends. >>> grapheme.startswith("✊🏾", "✊") False >>> "✊🏾".startswith("✊") True
[ "Like", "str", ".", "startswith", "but", "also", "checks", "that", "the", "string", "starts", "with", "the", "given", "prefixes", "sequence", "of", "graphemes", "." ]
train
https://github.com/alvinlindstam/grapheme/blob/45cd9b8326ddf96d2618406724a7ebf273cbde03/grapheme/api.py#L150-L162
alvinlindstam/grapheme
grapheme/api.py
endswith
def endswith(string, suffix): """ Like str.endswith, but also checks that the string ends with the given prefixes sequence of graphemes. str.endswith may return true for a suffix that is not visually represented as a suffix if a grapheme cluster is initiated before the suffix starts. >>> grapheme.endswith("🏳️‍🌈", "🌈") False >>> "🏳️‍🌈".endswith("🌈") True """ expected_index = len(string) - len(suffix) return string.endswith(suffix) and safe_split_index(string, expected_index) == expected_index
python
def endswith(string, suffix): """ Like str.endswith, but also checks that the string ends with the given prefixes sequence of graphemes. str.endswith may return true for a suffix that is not visually represented as a suffix if a grapheme cluster is initiated before the suffix starts. >>> grapheme.endswith("🏳️‍🌈", "🌈") False >>> "🏳️‍🌈".endswith("🌈") True """ expected_index = len(string) - len(suffix) return string.endswith(suffix) and safe_split_index(string, expected_index) == expected_index
[ "def", "endswith", "(", "string", ",", "suffix", ")", ":", "expected_index", "=", "len", "(", "string", ")", "-", "len", "(", "suffix", ")", "return", "string", ".", "endswith", "(", "suffix", ")", "and", "safe_split_index", "(", "string", ",", "expected_index", ")", "==", "expected_index" ]
Like str.endswith, but also checks that the string ends with the given prefixes sequence of graphemes. str.endswith may return true for a suffix that is not visually represented as a suffix if a grapheme cluster is initiated before the suffix starts. >>> grapheme.endswith("🏳️‍🌈", "🌈") False >>> "🏳️‍🌈".endswith("🌈") True
[ "Like", "str", ".", "endswith", "but", "also", "checks", "that", "the", "string", "ends", "with", "the", "given", "prefixes", "sequence", "of", "graphemes", "." ]
train
https://github.com/alvinlindstam/grapheme/blob/45cd9b8326ddf96d2618406724a7ebf273cbde03/grapheme/api.py#L165-L178
alvinlindstam/grapheme
grapheme/api.py
safe_split_index
def safe_split_index(string, max_len): """ Returns the highest index up to `max_len` at which the given string can be sliced, without breaking a grapheme. This is useful for when you want to split or take a substring from a string, and don't really care about the exact grapheme length, but don't want to risk breaking existing graphemes. This function does normally not traverse the full grapheme sequence up to the given length, so it can be used for arbitrarily long strings and high `max_len`s. However, some grapheme boundaries depend on the previous state, so the worst case performance is O(n). In practice, it's only very long non-broken sequences of country flags (represented as Regional Indicators) that will perform badly. The return value will always be between `0` and `len(string)`. >>> string = "tamil நி (ni)" >>> i = grapheme.safe_split_index(string, 7) >>> i 6 >>> string[:i] 'tamil ' >>> string[i:] 'நி (ni)' """ last_index = get_last_certain_break_index(string, max_len) for l in grapheme_lengths(string[last_index:]): if last_index + l > max_len: break last_index += l return last_index
python
def safe_split_index(string, max_len): """ Returns the highest index up to `max_len` at which the given string can be sliced, without breaking a grapheme. This is useful for when you want to split or take a substring from a string, and don't really care about the exact grapheme length, but don't want to risk breaking existing graphemes. This function does normally not traverse the full grapheme sequence up to the given length, so it can be used for arbitrarily long strings and high `max_len`s. However, some grapheme boundaries depend on the previous state, so the worst case performance is O(n). In practice, it's only very long non-broken sequences of country flags (represented as Regional Indicators) that will perform badly. The return value will always be between `0` and `len(string)`. >>> string = "tamil நி (ni)" >>> i = grapheme.safe_split_index(string, 7) >>> i 6 >>> string[:i] 'tamil ' >>> string[i:] 'நி (ni)' """ last_index = get_last_certain_break_index(string, max_len) for l in grapheme_lengths(string[last_index:]): if last_index + l > max_len: break last_index += l return last_index
[ "def", "safe_split_index", "(", "string", ",", "max_len", ")", ":", "last_index", "=", "get_last_certain_break_index", "(", "string", ",", "max_len", ")", "for", "l", "in", "grapheme_lengths", "(", "string", "[", "last_index", ":", "]", ")", ":", "if", "last_index", "+", "l", ">", "max_len", ":", "break", "last_index", "+=", "l", "return", "last_index" ]
Returns the highest index up to `max_len` at which the given string can be sliced, without breaking a grapheme. This is useful for when you want to split or take a substring from a string, and don't really care about the exact grapheme length, but don't want to risk breaking existing graphemes. This function does normally not traverse the full grapheme sequence up to the given length, so it can be used for arbitrarily long strings and high `max_len`s. However, some grapheme boundaries depend on the previous state, so the worst case performance is O(n). In practice, it's only very long non-broken sequences of country flags (represented as Regional Indicators) that will perform badly. The return value will always be between `0` and `len(string)`. >>> string = "tamil நி (ni)" >>> i = grapheme.safe_split_index(string, 7) >>> i 6 >>> string[:i] 'tamil ' >>> string[i:] 'நி (ni)'
[ "Returns", "the", "highest", "index", "up", "to", "max_len", "at", "which", "the", "given", "string", "can", "be", "sliced", "without", "breaking", "a", "grapheme", "." ]
train
https://github.com/alvinlindstam/grapheme/blob/45cd9b8326ddf96d2618406724a7ebf273cbde03/grapheme/api.py#L181-L209
awacha/sastool
sastool/io/header.py
readB1logfile
def readB1logfile(filename): """Read B1 logfile (*.log) Inputs: filename: the file name Output: A dictionary. """ dic = dict() # try to open. If this fails, an exception is raised with open(filename, 'rt', encoding='utf-8') as f: for l in f: l = l.strip() if l[0] in '#!%\'': continue # treat this line as a comment try: # find the first tuple in _logfile_data where the first element of the # tuple is the starting of the line. ld = [ld_ for ld_ in _logfile_data if l.split( ':', 1)[0].strip() == ld_[0]][0] except IndexError: # line is not recognized. We can still try to load it: find the first # semicolon. If found, the part of the line before it is stripped # from whitespaces and will be the key. The part after it is stripped # from whitespaces and parsed with misc.parse_number(). If no if ':' in l: key, val = [x.strip() for x in l.split(':', 1)] val = misc.parse_number(val) dic[key] = val try: # fix the character encoding in files written by a # previous version of this software. dic[key] = dic[key].encode('latin2').decode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError, AttributeError): pass else: dic[l.strip()] = True continue try: reader = ld[3] except IndexError: reader = str rhs = l.split(':', 1)[1].strip() try: vals = reader(rhs) except ValueError: if rhs.lower() == 'none': vals = None else: raise if isinstance(ld[1], tuple): # more than one field names. The reader function should return a # tuple here, a value for each field. if len(vals) != len(ld[1]): raise ValueError( 'Cannot read %d values from line %s in file!' % (len(ld[1]), l)) dic.update(dict(list(zip(ld[1], vals)))) else: dic[ld[1]] = vals dic['__Origin__'] = 'B1 log' dic['__particle__'] = 'photon' return dic
python
def readB1logfile(filename): """Read B1 logfile (*.log) Inputs: filename: the file name Output: A dictionary. """ dic = dict() # try to open. If this fails, an exception is raised with open(filename, 'rt', encoding='utf-8') as f: for l in f: l = l.strip() if l[0] in '#!%\'': continue # treat this line as a comment try: # find the first tuple in _logfile_data where the first element of the # tuple is the starting of the line. ld = [ld_ for ld_ in _logfile_data if l.split( ':', 1)[0].strip() == ld_[0]][0] except IndexError: # line is not recognized. We can still try to load it: find the first # semicolon. If found, the part of the line before it is stripped # from whitespaces and will be the key. The part after it is stripped # from whitespaces and parsed with misc.parse_number(). If no if ':' in l: key, val = [x.strip() for x in l.split(':', 1)] val = misc.parse_number(val) dic[key] = val try: # fix the character encoding in files written by a # previous version of this software. dic[key] = dic[key].encode('latin2').decode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError, AttributeError): pass else: dic[l.strip()] = True continue try: reader = ld[3] except IndexError: reader = str rhs = l.split(':', 1)[1].strip() try: vals = reader(rhs) except ValueError: if rhs.lower() == 'none': vals = None else: raise if isinstance(ld[1], tuple): # more than one field names. The reader function should return a # tuple here, a value for each field. if len(vals) != len(ld[1]): raise ValueError( 'Cannot read %d values from line %s in file!' % (len(ld[1]), l)) dic.update(dict(list(zip(ld[1], vals)))) else: dic[ld[1]] = vals dic['__Origin__'] = 'B1 log' dic['__particle__'] = 'photon' return dic
[ "def", "readB1logfile", "(", "filename", ")", ":", "dic", "=", "dict", "(", ")", "# try to open. If this fails, an exception is raised", "with", "open", "(", "filename", ",", "'rt'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "for", "l", "in", "f", ":", "l", "=", "l", ".", "strip", "(", ")", "if", "l", "[", "0", "]", "in", "'#!%\\''", ":", "continue", "# treat this line as a comment", "try", ":", "# find the first tuple in _logfile_data where the first element of the", "# tuple is the starting of the line.", "ld", "=", "[", "ld_", "for", "ld_", "in", "_logfile_data", "if", "l", ".", "split", "(", "':'", ",", "1", ")", "[", "0", "]", ".", "strip", "(", ")", "==", "ld_", "[", "0", "]", "]", "[", "0", "]", "except", "IndexError", ":", "# line is not recognized. We can still try to load it: find the first", "# semicolon. If found, the part of the line before it is stripped", "# from whitespaces and will be the key. The part after it is stripped", "# from whitespaces and parsed with misc.parse_number(). If no", "if", "':'", "in", "l", ":", "key", ",", "val", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "l", ".", "split", "(", "':'", ",", "1", ")", "]", "val", "=", "misc", ".", "parse_number", "(", "val", ")", "dic", "[", "key", "]", "=", "val", "try", ":", "# fix the character encoding in files written by a", "# previous version of this software.", "dic", "[", "key", "]", "=", "dic", "[", "key", "]", ".", "encode", "(", "'latin2'", ")", ".", "decode", "(", "'utf-8'", ")", "except", "(", "UnicodeDecodeError", ",", "UnicodeEncodeError", ",", "AttributeError", ")", ":", "pass", "else", ":", "dic", "[", "l", ".", "strip", "(", ")", "]", "=", "True", "continue", "try", ":", "reader", "=", "ld", "[", "3", "]", "except", "IndexError", ":", "reader", "=", "str", "rhs", "=", "l", ".", "split", "(", "':'", ",", "1", ")", "[", "1", "]", ".", "strip", "(", ")", "try", ":", "vals", "=", "reader", "(", "rhs", ")", "except", "ValueError", ":", "if", "rhs", ".", "lower", "(", ")", "==", "'none'", ":", "vals", "=", "None", "else", ":", "raise", "if", "isinstance", "(", "ld", "[", "1", "]", ",", "tuple", ")", ":", "# more than one field names. The reader function should return a", "# tuple here, a value for each field.", "if", "len", "(", "vals", ")", "!=", "len", "(", "ld", "[", "1", "]", ")", ":", "raise", "ValueError", "(", "'Cannot read %d values from line %s in file!'", "%", "(", "len", "(", "ld", "[", "1", "]", ")", ",", "l", ")", ")", "dic", ".", "update", "(", "dict", "(", "list", "(", "zip", "(", "ld", "[", "1", "]", ",", "vals", ")", ")", ")", ")", "else", ":", "dic", "[", "ld", "[", "1", "]", "]", "=", "vals", "dic", "[", "'__Origin__'", "]", "=", "'B1 log'", "dic", "[", "'__particle__'", "]", "=", "'photon'", "return", "dic" ]
Read B1 logfile (*.log) Inputs: filename: the file name Output: A dictionary.
[ "Read", "B1", "logfile", "(", "*", ".", "log", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L142-L203
awacha/sastool
sastool/io/header.py
writeB1logfile
def writeB1logfile(filename, data): """Write a header structure into a B1 logfile. Inputs: filename: name of the file. data: header dictionary Notes: exceptions pass through to the caller. """ allkeys = list(data.keys()) f = open(filename, 'wt', encoding='utf-8') for ld in _logfile_data: # process each line linebegin = ld[0] fieldnames = ld[1] # set the default formatter if it is not given if len(ld) < 3: formatter = str elif ld[2] is None: formatter = str else: formatter = ld[2] # this will contain the formatted values. formatted = '' if isinstance(fieldnames, str): # scalar field name, just one field. Formatter should be a # callable. if fieldnames not in allkeys: # this field has already been processed continue try: formatted = formatter(data[fieldnames]) except KeyError: # field not found in param structure continue elif isinstance(fieldnames, tuple): # more than one field names in a tuple. In this case, formatter can # be a tuple of callables... if all([(fn not in allkeys) for fn in fieldnames]): # if all the fields have been processed: continue if isinstance(formatter, tuple) and len(formatter) == len(fieldnames): formatted = ' '.join([ft(data[fn]) for ft, fn in zip(formatter, fieldnames)]) # ...or a single callable... elif not isinstance(formatter, tuple): formatted = formatter([data[fn] for fn in fieldnames]) # ...otherwise raise an exception. else: raise SyntaxError('Programming error: formatter should be a scalar or a tuple\ of the same length as the field names in logfile_data.') else: # fieldnames is neither a string, nor a tuple. raise SyntaxError( 'Invalid syntax (programming error) in logfile_data in writeparamfile().') # try to get the values linetowrite = linebegin + ':\t' + formatted + '\n' f.write(linetowrite) if isinstance(fieldnames, tuple): for fn in fieldnames: # remove the params treated. if fn in allkeys: allkeys.remove(fn) else: if fieldnames in allkeys: allkeys.remove(fieldnames) # write untreated params for k in allkeys: linetowrite = k + ':\t' + str(data[k]) + '\n' f.write(linetowrite) f.close()
python
def writeB1logfile(filename, data): """Write a header structure into a B1 logfile. Inputs: filename: name of the file. data: header dictionary Notes: exceptions pass through to the caller. """ allkeys = list(data.keys()) f = open(filename, 'wt', encoding='utf-8') for ld in _logfile_data: # process each line linebegin = ld[0] fieldnames = ld[1] # set the default formatter if it is not given if len(ld) < 3: formatter = str elif ld[2] is None: formatter = str else: formatter = ld[2] # this will contain the formatted values. formatted = '' if isinstance(fieldnames, str): # scalar field name, just one field. Formatter should be a # callable. if fieldnames not in allkeys: # this field has already been processed continue try: formatted = formatter(data[fieldnames]) except KeyError: # field not found in param structure continue elif isinstance(fieldnames, tuple): # more than one field names in a tuple. In this case, formatter can # be a tuple of callables... if all([(fn not in allkeys) for fn in fieldnames]): # if all the fields have been processed: continue if isinstance(formatter, tuple) and len(formatter) == len(fieldnames): formatted = ' '.join([ft(data[fn]) for ft, fn in zip(formatter, fieldnames)]) # ...or a single callable... elif not isinstance(formatter, tuple): formatted = formatter([data[fn] for fn in fieldnames]) # ...otherwise raise an exception. else: raise SyntaxError('Programming error: formatter should be a scalar or a tuple\ of the same length as the field names in logfile_data.') else: # fieldnames is neither a string, nor a tuple. raise SyntaxError( 'Invalid syntax (programming error) in logfile_data in writeparamfile().') # try to get the values linetowrite = linebegin + ':\t' + formatted + '\n' f.write(linetowrite) if isinstance(fieldnames, tuple): for fn in fieldnames: # remove the params treated. if fn in allkeys: allkeys.remove(fn) else: if fieldnames in allkeys: allkeys.remove(fieldnames) # write untreated params for k in allkeys: linetowrite = k + ':\t' + str(data[k]) + '\n' f.write(linetowrite) f.close()
[ "def", "writeB1logfile", "(", "filename", ",", "data", ")", ":", "allkeys", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "f", "=", "open", "(", "filename", ",", "'wt'", ",", "encoding", "=", "'utf-8'", ")", "for", "ld", "in", "_logfile_data", ":", "# process each line", "linebegin", "=", "ld", "[", "0", "]", "fieldnames", "=", "ld", "[", "1", "]", "# set the default formatter if it is not given", "if", "len", "(", "ld", ")", "<", "3", ":", "formatter", "=", "str", "elif", "ld", "[", "2", "]", "is", "None", ":", "formatter", "=", "str", "else", ":", "formatter", "=", "ld", "[", "2", "]", "# this will contain the formatted values.", "formatted", "=", "''", "if", "isinstance", "(", "fieldnames", ",", "str", ")", ":", "# scalar field name, just one field. Formatter should be a", "# callable.", "if", "fieldnames", "not", "in", "allkeys", ":", "# this field has already been processed", "continue", "try", ":", "formatted", "=", "formatter", "(", "data", "[", "fieldnames", "]", ")", "except", "KeyError", ":", "# field not found in param structure", "continue", "elif", "isinstance", "(", "fieldnames", ",", "tuple", ")", ":", "# more than one field names in a tuple. In this case, formatter can", "# be a tuple of callables...", "if", "all", "(", "[", "(", "fn", "not", "in", "allkeys", ")", "for", "fn", "in", "fieldnames", "]", ")", ":", "# if all the fields have been processed:", "continue", "if", "isinstance", "(", "formatter", ",", "tuple", ")", "and", "len", "(", "formatter", ")", "==", "len", "(", "fieldnames", ")", ":", "formatted", "=", "' '", ".", "join", "(", "[", "ft", "(", "data", "[", "fn", "]", ")", "for", "ft", ",", "fn", "in", "zip", "(", "formatter", ",", "fieldnames", ")", "]", ")", "# ...or a single callable...", "elif", "not", "isinstance", "(", "formatter", ",", "tuple", ")", ":", "formatted", "=", "formatter", "(", "[", "data", "[", "fn", "]", "for", "fn", "in", "fieldnames", "]", ")", "# ...otherwise raise an exception.", "else", ":", "raise", "SyntaxError", "(", "'Programming error: formatter should be a scalar or a tuple\\\nof the same length as the field names in logfile_data.'", ")", "else", ":", "# fieldnames is neither a string, nor a tuple.", "raise", "SyntaxError", "(", "'Invalid syntax (programming error) in logfile_data in writeparamfile().'", ")", "# try to get the values", "linetowrite", "=", "linebegin", "+", "':\\t'", "+", "formatted", "+", "'\\n'", "f", ".", "write", "(", "linetowrite", ")", "if", "isinstance", "(", "fieldnames", ",", "tuple", ")", ":", "for", "fn", "in", "fieldnames", ":", "# remove the params treated.", "if", "fn", "in", "allkeys", ":", "allkeys", ".", "remove", "(", "fn", ")", "else", ":", "if", "fieldnames", "in", "allkeys", ":", "allkeys", ".", "remove", "(", "fieldnames", ")", "# write untreated params", "for", "k", "in", "allkeys", ":", "linetowrite", "=", "k", "+", "':\\t'", "+", "str", "(", "data", "[", "k", "]", ")", "+", "'\\n'", "f", ".", "write", "(", "linetowrite", ")", "f", ".", "close", "(", ")" ]
Write a header structure into a B1 logfile. Inputs: filename: name of the file. data: header dictionary Notes: exceptions pass through to the caller.
[ "Write", "a", "header", "structure", "into", "a", "B1", "logfile", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L206-L275
awacha/sastool
sastool/io/header.py
readB1header
def readB1header(filename): """Read beamline B1 (HASYLAB, Hamburg) header data Input ----- filename: string the file name. If ends with ``.gz``, it is fed through a ``gunzip`` filter Output ------ A header dictionary. Examples -------- read header data from 'ORG000123.DAT':: header=readB1header('ORG00123.DAT') """ # Planck's constant times speed of light: incorrect # constant in the old program on hasjusi1, which was # taken over by the measurement program, to keep # compatibility with that. hed = {} if libconfig.LENGTH_UNIT == 'A': jusifaHC = 12396.4 elif libconfig.LENGTH_UNIT == 'nm': jusifaHC = 1239.64 else: raise NotImplementedError( 'Invalid length unit: ' + str(libconfig.LENGTH_UNIT)) if filename.upper().endswith('.GZ'): fid = gzip.GzipFile(filename, 'r') else: fid = open(filename, 'rt') lines = fid.readlines() fid.close() hed['FSN'] = int(lines[0].strip()) hed['Hour'] = int(lines[17].strip()) hed['Minutes'] = int(lines[18].strip()) hed['Month'] = int(lines[19].strip()) hed['Day'] = int(lines[20].strip()) hed['Year'] = int(lines[21].strip()) + 2000 hed['FSNref1'] = int(lines[23].strip()) hed['FSNdc'] = int(lines[24].strip()) hed['FSNsensitivity'] = int(lines[25].strip()) hed['FSNempty'] = int(lines[26].strip()) hed['FSNref2'] = int(lines[27].strip()) hed['Monitor'] = float(lines[31].strip()) hed['Anode'] = float(lines[32].strip()) hed['MeasTime'] = float(lines[33].strip()) hed['Temperature'] = float(lines[34].strip()) hed['BeamPosX'] = float(lines[36].strip()) hed['BeamPosY'] = float(lines[37].strip()) hed['Transm'] = float(lines[41].strip()) hed['Wavelength'] = float(lines[43].strip()) hed['Energy'] = jusifaHC / hed['Wavelength'] hed['Dist'] = float(lines[46].strip()) hed['XPixel'] = 1 / float(lines[49].strip()) hed['YPixel'] = 1 / float(lines[50].strip()) hed['Title'] = lines[53].strip().replace(' ', '_').replace('-', '_') hed['MonitorDORIS'] = float(lines[56].strip()) # aka. DORIS counter hed['Owner'] = lines[57].strip() hed['RotXSample'] = float(lines[59].strip()) hed['RotYSample'] = float(lines[60].strip()) hed['PosSample'] = float(lines[61].strip()) hed['DetPosX'] = float(lines[62].strip()) hed['DetPosY'] = float(lines[63].strip()) hed['MonitorPIEZO'] = float(lines[64].strip()) # aka. PIEZO counter hed['BeamsizeX'] = float(lines[66].strip()) hed['BeamsizeY'] = float(lines[67].strip()) hed['PosRef'] = float(lines[70].strip()) hed['Monochromator1Rot'] = float(lines[77].strip()) hed['Monochromator2Rot'] = float(lines[78].strip()) hed['Heidenhain1'] = float(lines[79].strip()) hed['Heidenhain2'] = float(lines[80].strip()) hed['Current1'] = float(lines[81].strip()) hed['Current2'] = float(lines[82].strip()) hed['Detector'] = 'Unknown' hed['PixelSize'] = (hed['XPixel'] + hed['YPixel']) / 2.0 hed['AnodeError'] = math.sqrt(hed['Anode']) hed['TransmError'] = 0 hed['MonitorError'] = math.sqrt(hed['Monitor']) hed['MonitorPIEZOError'] = math.sqrt(hed['MonitorPIEZO']) hed['MonitorDORISError'] = math.sqrt(hed['MonitorDORIS']) hed['Date'] = datetime.datetime( hed['Year'], hed['Month'], hed['Day'], hed['Hour'], hed['Minutes']) hed['__Origin__'] = 'B1 original' hed['__particle__'] = 'photon' return hed
python
def readB1header(filename): """Read beamline B1 (HASYLAB, Hamburg) header data Input ----- filename: string the file name. If ends with ``.gz``, it is fed through a ``gunzip`` filter Output ------ A header dictionary. Examples -------- read header data from 'ORG000123.DAT':: header=readB1header('ORG00123.DAT') """ # Planck's constant times speed of light: incorrect # constant in the old program on hasjusi1, which was # taken over by the measurement program, to keep # compatibility with that. hed = {} if libconfig.LENGTH_UNIT == 'A': jusifaHC = 12396.4 elif libconfig.LENGTH_UNIT == 'nm': jusifaHC = 1239.64 else: raise NotImplementedError( 'Invalid length unit: ' + str(libconfig.LENGTH_UNIT)) if filename.upper().endswith('.GZ'): fid = gzip.GzipFile(filename, 'r') else: fid = open(filename, 'rt') lines = fid.readlines() fid.close() hed['FSN'] = int(lines[0].strip()) hed['Hour'] = int(lines[17].strip()) hed['Minutes'] = int(lines[18].strip()) hed['Month'] = int(lines[19].strip()) hed['Day'] = int(lines[20].strip()) hed['Year'] = int(lines[21].strip()) + 2000 hed['FSNref1'] = int(lines[23].strip()) hed['FSNdc'] = int(lines[24].strip()) hed['FSNsensitivity'] = int(lines[25].strip()) hed['FSNempty'] = int(lines[26].strip()) hed['FSNref2'] = int(lines[27].strip()) hed['Monitor'] = float(lines[31].strip()) hed['Anode'] = float(lines[32].strip()) hed['MeasTime'] = float(lines[33].strip()) hed['Temperature'] = float(lines[34].strip()) hed['BeamPosX'] = float(lines[36].strip()) hed['BeamPosY'] = float(lines[37].strip()) hed['Transm'] = float(lines[41].strip()) hed['Wavelength'] = float(lines[43].strip()) hed['Energy'] = jusifaHC / hed['Wavelength'] hed['Dist'] = float(lines[46].strip()) hed['XPixel'] = 1 / float(lines[49].strip()) hed['YPixel'] = 1 / float(lines[50].strip()) hed['Title'] = lines[53].strip().replace(' ', '_').replace('-', '_') hed['MonitorDORIS'] = float(lines[56].strip()) # aka. DORIS counter hed['Owner'] = lines[57].strip() hed['RotXSample'] = float(lines[59].strip()) hed['RotYSample'] = float(lines[60].strip()) hed['PosSample'] = float(lines[61].strip()) hed['DetPosX'] = float(lines[62].strip()) hed['DetPosY'] = float(lines[63].strip()) hed['MonitorPIEZO'] = float(lines[64].strip()) # aka. PIEZO counter hed['BeamsizeX'] = float(lines[66].strip()) hed['BeamsizeY'] = float(lines[67].strip()) hed['PosRef'] = float(lines[70].strip()) hed['Monochromator1Rot'] = float(lines[77].strip()) hed['Monochromator2Rot'] = float(lines[78].strip()) hed['Heidenhain1'] = float(lines[79].strip()) hed['Heidenhain2'] = float(lines[80].strip()) hed['Current1'] = float(lines[81].strip()) hed['Current2'] = float(lines[82].strip()) hed['Detector'] = 'Unknown' hed['PixelSize'] = (hed['XPixel'] + hed['YPixel']) / 2.0 hed['AnodeError'] = math.sqrt(hed['Anode']) hed['TransmError'] = 0 hed['MonitorError'] = math.sqrt(hed['Monitor']) hed['MonitorPIEZOError'] = math.sqrt(hed['MonitorPIEZO']) hed['MonitorDORISError'] = math.sqrt(hed['MonitorDORIS']) hed['Date'] = datetime.datetime( hed['Year'], hed['Month'], hed['Day'], hed['Hour'], hed['Minutes']) hed['__Origin__'] = 'B1 original' hed['__particle__'] = 'photon' return hed
[ "def", "readB1header", "(", "filename", ")", ":", "# Planck's constant times speed of light: incorrect", "# constant in the old program on hasjusi1, which was", "# taken over by the measurement program, to keep", "# compatibility with that.", "hed", "=", "{", "}", "if", "libconfig", ".", "LENGTH_UNIT", "==", "'A'", ":", "jusifaHC", "=", "12396.4", "elif", "libconfig", ".", "LENGTH_UNIT", "==", "'nm'", ":", "jusifaHC", "=", "1239.64", "else", ":", "raise", "NotImplementedError", "(", "'Invalid length unit: '", "+", "str", "(", "libconfig", ".", "LENGTH_UNIT", ")", ")", "if", "filename", ".", "upper", "(", ")", ".", "endswith", "(", "'.GZ'", ")", ":", "fid", "=", "gzip", ".", "GzipFile", "(", "filename", ",", "'r'", ")", "else", ":", "fid", "=", "open", "(", "filename", ",", "'rt'", ")", "lines", "=", "fid", ".", "readlines", "(", ")", "fid", ".", "close", "(", ")", "hed", "[", "'FSN'", "]", "=", "int", "(", "lines", "[", "0", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Hour'", "]", "=", "int", "(", "lines", "[", "17", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Minutes'", "]", "=", "int", "(", "lines", "[", "18", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Month'", "]", "=", "int", "(", "lines", "[", "19", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Day'", "]", "=", "int", "(", "lines", "[", "20", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Year'", "]", "=", "int", "(", "lines", "[", "21", "]", ".", "strip", "(", ")", ")", "+", "2000", "hed", "[", "'FSNref1'", "]", "=", "int", "(", "lines", "[", "23", "]", ".", "strip", "(", ")", ")", "hed", "[", "'FSNdc'", "]", "=", "int", "(", "lines", "[", "24", "]", ".", "strip", "(", ")", ")", "hed", "[", "'FSNsensitivity'", "]", "=", "int", "(", "lines", "[", "25", "]", ".", "strip", "(", ")", ")", "hed", "[", "'FSNempty'", "]", "=", "int", "(", "lines", "[", "26", "]", ".", "strip", "(", ")", ")", "hed", "[", "'FSNref2'", "]", "=", "int", "(", "lines", "[", "27", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Monitor'", "]", "=", "float", "(", "lines", "[", "31", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Anode'", "]", "=", "float", "(", "lines", "[", "32", "]", ".", "strip", "(", ")", ")", "hed", "[", "'MeasTime'", "]", "=", "float", "(", "lines", "[", "33", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Temperature'", "]", "=", "float", "(", "lines", "[", "34", "]", ".", "strip", "(", ")", ")", "hed", "[", "'BeamPosX'", "]", "=", "float", "(", "lines", "[", "36", "]", ".", "strip", "(", ")", ")", "hed", "[", "'BeamPosY'", "]", "=", "float", "(", "lines", "[", "37", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Transm'", "]", "=", "float", "(", "lines", "[", "41", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Wavelength'", "]", "=", "float", "(", "lines", "[", "43", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Energy'", "]", "=", "jusifaHC", "/", "hed", "[", "'Wavelength'", "]", "hed", "[", "'Dist'", "]", "=", "float", "(", "lines", "[", "46", "]", ".", "strip", "(", ")", ")", "hed", "[", "'XPixel'", "]", "=", "1", "/", "float", "(", "lines", "[", "49", "]", ".", "strip", "(", ")", ")", "hed", "[", "'YPixel'", "]", "=", "1", "/", "float", "(", "lines", "[", "50", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Title'", "]", "=", "lines", "[", "53", "]", ".", "strip", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "hed", "[", "'MonitorDORIS'", "]", "=", "float", "(", "lines", "[", "56", "]", ".", "strip", "(", ")", ")", "# aka. DORIS counter", "hed", "[", "'Owner'", "]", "=", "lines", "[", "57", "]", ".", "strip", "(", ")", "hed", "[", "'RotXSample'", "]", "=", "float", "(", "lines", "[", "59", "]", ".", "strip", "(", ")", ")", "hed", "[", "'RotYSample'", "]", "=", "float", "(", "lines", "[", "60", "]", ".", "strip", "(", ")", ")", "hed", "[", "'PosSample'", "]", "=", "float", "(", "lines", "[", "61", "]", ".", "strip", "(", ")", ")", "hed", "[", "'DetPosX'", "]", "=", "float", "(", "lines", "[", "62", "]", ".", "strip", "(", ")", ")", "hed", "[", "'DetPosY'", "]", "=", "float", "(", "lines", "[", "63", "]", ".", "strip", "(", ")", ")", "hed", "[", "'MonitorPIEZO'", "]", "=", "float", "(", "lines", "[", "64", "]", ".", "strip", "(", ")", ")", "# aka. PIEZO counter", "hed", "[", "'BeamsizeX'", "]", "=", "float", "(", "lines", "[", "66", "]", ".", "strip", "(", ")", ")", "hed", "[", "'BeamsizeY'", "]", "=", "float", "(", "lines", "[", "67", "]", ".", "strip", "(", ")", ")", "hed", "[", "'PosRef'", "]", "=", "float", "(", "lines", "[", "70", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Monochromator1Rot'", "]", "=", "float", "(", "lines", "[", "77", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Monochromator2Rot'", "]", "=", "float", "(", "lines", "[", "78", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Heidenhain1'", "]", "=", "float", "(", "lines", "[", "79", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Heidenhain2'", "]", "=", "float", "(", "lines", "[", "80", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Current1'", "]", "=", "float", "(", "lines", "[", "81", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Current2'", "]", "=", "float", "(", "lines", "[", "82", "]", ".", "strip", "(", ")", ")", "hed", "[", "'Detector'", "]", "=", "'Unknown'", "hed", "[", "'PixelSize'", "]", "=", "(", "hed", "[", "'XPixel'", "]", "+", "hed", "[", "'YPixel'", "]", ")", "/", "2.0", "hed", "[", "'AnodeError'", "]", "=", "math", ".", "sqrt", "(", "hed", "[", "'Anode'", "]", ")", "hed", "[", "'TransmError'", "]", "=", "0", "hed", "[", "'MonitorError'", "]", "=", "math", ".", "sqrt", "(", "hed", "[", "'Monitor'", "]", ")", "hed", "[", "'MonitorPIEZOError'", "]", "=", "math", ".", "sqrt", "(", "hed", "[", "'MonitorPIEZO'", "]", ")", "hed", "[", "'MonitorDORISError'", "]", "=", "math", ".", "sqrt", "(", "hed", "[", "'MonitorDORIS'", "]", ")", "hed", "[", "'Date'", "]", "=", "datetime", ".", "datetime", "(", "hed", "[", "'Year'", "]", ",", "hed", "[", "'Month'", "]", ",", "hed", "[", "'Day'", "]", ",", "hed", "[", "'Hour'", "]", ",", "hed", "[", "'Minutes'", "]", ")", "hed", "[", "'__Origin__'", "]", "=", "'B1 original'", "hed", "[", "'__particle__'", "]", "=", "'photon'", "return", "hed" ]
Read beamline B1 (HASYLAB, Hamburg) header data Input ----- filename: string the file name. If ends with ``.gz``, it is fed through a ``gunzip`` filter Output ------ A header dictionary. Examples -------- read header data from 'ORG000123.DAT':: header=readB1header('ORG00123.DAT')
[ "Read", "beamline", "B1", "(", "HASYLAB", "Hamburg", ")", "header", "data" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L278-L369
awacha/sastool
sastool/io/header.py
_readedf_extractline
def _readedf_extractline(left, right): """Helper function to interpret lines in an EDF file header. """ functions = [int, float, lambda l:float(l.split(None, 1)[0]), lambda l:int(l.split(None, 1)[0]), dateutil.parser.parse, lambda x:str(x)] for f in functions: try: right = f(right) break except ValueError: continue return right
python
def _readedf_extractline(left, right): """Helper function to interpret lines in an EDF file header. """ functions = [int, float, lambda l:float(l.split(None, 1)[0]), lambda l:int(l.split(None, 1)[0]), dateutil.parser.parse, lambda x:str(x)] for f in functions: try: right = f(right) break except ValueError: continue return right
[ "def", "_readedf_extractline", "(", "left", ",", "right", ")", ":", "functions", "=", "[", "int", ",", "float", ",", "lambda", "l", ":", "float", "(", "l", ".", "split", "(", "None", ",", "1", ")", "[", "0", "]", ")", ",", "lambda", "l", ":", "int", "(", "l", ".", "split", "(", "None", ",", "1", ")", "[", "0", "]", ")", ",", "dateutil", ".", "parser", ".", "parse", ",", "lambda", "x", ":", "str", "(", "x", ")", "]", "for", "f", "in", "functions", ":", "try", ":", "right", "=", "f", "(", "right", ")", "break", "except", "ValueError", ":", "continue", "return", "right" ]
Helper function to interpret lines in an EDF file header.
[ "Helper", "function", "to", "interpret", "lines", "in", "an", "EDF", "file", "header", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L372-L384
awacha/sastool
sastool/io/header.py
readehf
def readehf(filename): """Read EDF header (ESRF data format, as of beamline ID01 and ID02) Input ----- filename: string the file name to load Output ------ the EDF header structure in a dictionary """ f = open(filename, 'r') edf = {} if not f.readline().strip().startswith('{'): raise ValueError('Invalid file format.') for l in f: l = l.strip() if not l: continue if l.endswith('}'): break # last line of header try: left, right = l.split('=', 1) except ValueError: raise ValueError('Invalid line: ' + l) left = left.strip() right = right.strip() if not right.endswith(';'): raise ValueError( 'Invalid line (does not end with a semicolon): ' + l) right = right[:-1].strip() m = re.match('^(?P<left>.*)~(?P<continuation>\d+)$', left) if m is not None: edf[m.group('left')] = edf[m.group('left')] + right else: edf[left] = _readedf_extractline(left, right) f.close() edf['FileName'] = filename edf['__Origin__'] = 'EDF ID02' edf['__particle__'] = 'photon' return edf
python
def readehf(filename): """Read EDF header (ESRF data format, as of beamline ID01 and ID02) Input ----- filename: string the file name to load Output ------ the EDF header structure in a dictionary """ f = open(filename, 'r') edf = {} if not f.readline().strip().startswith('{'): raise ValueError('Invalid file format.') for l in f: l = l.strip() if not l: continue if l.endswith('}'): break # last line of header try: left, right = l.split('=', 1) except ValueError: raise ValueError('Invalid line: ' + l) left = left.strip() right = right.strip() if not right.endswith(';'): raise ValueError( 'Invalid line (does not end with a semicolon): ' + l) right = right[:-1].strip() m = re.match('^(?P<left>.*)~(?P<continuation>\d+)$', left) if m is not None: edf[m.group('left')] = edf[m.group('left')] + right else: edf[left] = _readedf_extractline(left, right) f.close() edf['FileName'] = filename edf['__Origin__'] = 'EDF ID02' edf['__particle__'] = 'photon' return edf
[ "def", "readehf", "(", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "'r'", ")", "edf", "=", "{", "}", "if", "not", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "startswith", "(", "'{'", ")", ":", "raise", "ValueError", "(", "'Invalid file format.'", ")", "for", "l", "in", "f", ":", "l", "=", "l", ".", "strip", "(", ")", "if", "not", "l", ":", "continue", "if", "l", ".", "endswith", "(", "'}'", ")", ":", "break", "# last line of header", "try", ":", "left", ",", "right", "=", "l", ".", "split", "(", "'='", ",", "1", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Invalid line: '", "+", "l", ")", "left", "=", "left", ".", "strip", "(", ")", "right", "=", "right", ".", "strip", "(", ")", "if", "not", "right", ".", "endswith", "(", "';'", ")", ":", "raise", "ValueError", "(", "'Invalid line (does not end with a semicolon): '", "+", "l", ")", "right", "=", "right", "[", ":", "-", "1", "]", ".", "strip", "(", ")", "m", "=", "re", ".", "match", "(", "'^(?P<left>.*)~(?P<continuation>\\d+)$'", ",", "left", ")", "if", "m", "is", "not", "None", ":", "edf", "[", "m", ".", "group", "(", "'left'", ")", "]", "=", "edf", "[", "m", ".", "group", "(", "'left'", ")", "]", "+", "right", "else", ":", "edf", "[", "left", "]", "=", "_readedf_extractline", "(", "left", ",", "right", ")", "f", ".", "close", "(", ")", "edf", "[", "'FileName'", "]", "=", "filename", "edf", "[", "'__Origin__'", "]", "=", "'EDF ID02'", "edf", "[", "'__particle__'", "]", "=", "'photon'", "return", "edf" ]
Read EDF header (ESRF data format, as of beamline ID01 and ID02) Input ----- filename: string the file name to load Output ------ the EDF header structure in a dictionary
[ "Read", "EDF", "header", "(", "ESRF", "data", "format", "as", "of", "beamline", "ID01", "and", "ID02", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L387-L428
awacha/sastool
sastool/io/header.py
readbhfv1
def readbhfv1(filename, load_data=False, bdfext='.bdf', bhfext='.bhf'): """Read header data from bdf/bhf file (Bessy Data Format v1) Input: filename: the name of the file load_data: if the matrices are to be loaded Output: bdf: the BDF header structure Adapted the bdf_read.m macro from Sylvio Haas. """ # strip the bhf or bdf extension if there. if filename.endswith(bdfext): basename = filename[:-len(bdfext)] elif filename.endswith(bhfext): basename = filename[:-len(bhfext)] else: # assume a single file of header and data. basename, bhfext = os.path.splitext(filename) bdfext = bhfext headername = basename + bhfext dataname = basename + bdfext bdf = {} bdf['his'] = [] # empty list for history bdf['C'] = {} # empty list for bdf file descriptions namelists = {} valuelists = {} with open(headername, 'rb') as fid: # if fails, an exception is raised for line in fid: if not line.strip(): continue # empty line mat = line.split(None, 1) prefix = mat[0] if prefix == '#C': left, right = mat[1].split('=', 1) left = left.strip() right = right.strip() if left in ['xdim', 'ydim']: bdf[left] = int(right) elif left in ['type', 'bdf']: bdf[left] = right if left in ['Sendtime']: bdf['C'][left] = float(right) elif left in ['xdim', 'ydim']: bdf['C'][left] = int(right) else: bdf['C'][left] = misc.parse_number(right) elif prefix.startswith("#H"): bdf['his'].append(mat[1]) # elif prefix.startswith("#DATA"): # if not load_data: # break # darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim'])) # bdf['data'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy() # this weird transformation is needed to get the matrix in the same form as bdf_read.m gets it. # elif prefix.startswith('#ERROR'): # if not load_data: # break # darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim'])) # bdf['error'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy() else: for prf in ['M', 'G', 'S', 'T']: if prefix.startswith('#C%sL' % prf): if prf not in namelists: namelists[prf] = [] namelists[prf].extend(mat[1].split()) elif prefix.startswith('#C%sV' % prf): if prf not in valuelists: valuelists[prf] = [] valuelists[prf].extend([float(x) for x in mat[1].split()]) else: continue for dictname, prfname in zip(['M', 'CG', 'CS', 'CT'], ['M', 'G', 'S', 'T']): bdf[dictname] = dict( list(zip(namelists[prfname], valuelists[prfname]))) bdf['__Origin__'] = 'BDFv1' bdf['__particle__'] = 'photon' if load_data: f = open(dataname, 'r') try: s = f.read() except IOError as ioe: # an ugly bug (M$ KB899149) in W!nd0w$ causes an error if loading too # large a file from a network drive and opening it read-only. if ioe.errno == 22: f.close() try: # one work-around is to open it read-write. f = open(dataname, 'r+b') s = f.read() except IOError: # if this does not work, inform the user to either obtain # write permission for that file or copy it to a local # drive f.close() raise IOError(22, """ You were probably trying to open a read-only file from a network drive on Windows, weren\'t you? There is a bug in Windows causing this error (see http://support.microsoft.com/default.aspx?scid=kb;en-us;899149). To work around this, please either obtain write permission for that file (I won't write anything to it, I promise!!!) or copy it to a local drive. Sorry for the inconvenience.""", ioe.filename) datasets = re.findall( '#\s*(?P<name>\w+)\[(?P<xsize>\d+):(?P<ysize>\d+)\]', s) names = [d[0] for d in datasets] xsize = [int(d[1]) for d in datasets] ysize = [int(d[2]) for d in datasets] dt = np.dtype(bdf['type']) for i in range(len(datasets)): start = s.find('#%s' % names[i]) if i < len(datasets) - 1: end = s.find('#%s' % (names[i + 1])) else: end = len(s) s1 = s[start:end] datasize = xsize[i] * ysize[i] * dt.itemsize if datasize > len(s1): # assume we are dealing with a BOOL matrix bdf[names[i]] = np.fromstring( s1[-xsize[i] * ysize[i]:], dtype=np.uint8) else: bdf[names[i]] = np.fromstring( s1[-xsize[i] * ysize[i] * dt.itemsize:], dtype=dt) # conversion: Matlab saves the array in Fortran-style ordering (columns first). # Python however loads in C-style: rows first. We need to take care: # 1) reshape from linear to (ysize,xsize) and not (xsize,ysize) # 2) transpose (swaps columns and rows) # After these operations, we only have to rotate this counter-clockwise by 90 # degrees because bdf2_write rotates by +270 degrees before saving. bdf[names[i]] = np.rot90( bdf[names[i]].reshape((ysize[i], xsize[i]), order='F'), 1) return bdf
python
def readbhfv1(filename, load_data=False, bdfext='.bdf', bhfext='.bhf'): """Read header data from bdf/bhf file (Bessy Data Format v1) Input: filename: the name of the file load_data: if the matrices are to be loaded Output: bdf: the BDF header structure Adapted the bdf_read.m macro from Sylvio Haas. """ # strip the bhf or bdf extension if there. if filename.endswith(bdfext): basename = filename[:-len(bdfext)] elif filename.endswith(bhfext): basename = filename[:-len(bhfext)] else: # assume a single file of header and data. basename, bhfext = os.path.splitext(filename) bdfext = bhfext headername = basename + bhfext dataname = basename + bdfext bdf = {} bdf['his'] = [] # empty list for history bdf['C'] = {} # empty list for bdf file descriptions namelists = {} valuelists = {} with open(headername, 'rb') as fid: # if fails, an exception is raised for line in fid: if not line.strip(): continue # empty line mat = line.split(None, 1) prefix = mat[0] if prefix == '#C': left, right = mat[1].split('=', 1) left = left.strip() right = right.strip() if left in ['xdim', 'ydim']: bdf[left] = int(right) elif left in ['type', 'bdf']: bdf[left] = right if left in ['Sendtime']: bdf['C'][left] = float(right) elif left in ['xdim', 'ydim']: bdf['C'][left] = int(right) else: bdf['C'][left] = misc.parse_number(right) elif prefix.startswith("#H"): bdf['his'].append(mat[1]) # elif prefix.startswith("#DATA"): # if not load_data: # break # darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim'])) # bdf['data'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy() # this weird transformation is needed to get the matrix in the same form as bdf_read.m gets it. # elif prefix.startswith('#ERROR'): # if not load_data: # break # darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim'])) # bdf['error'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy() else: for prf in ['M', 'G', 'S', 'T']: if prefix.startswith('#C%sL' % prf): if prf not in namelists: namelists[prf] = [] namelists[prf].extend(mat[1].split()) elif prefix.startswith('#C%sV' % prf): if prf not in valuelists: valuelists[prf] = [] valuelists[prf].extend([float(x) for x in mat[1].split()]) else: continue for dictname, prfname in zip(['M', 'CG', 'CS', 'CT'], ['M', 'G', 'S', 'T']): bdf[dictname] = dict( list(zip(namelists[prfname], valuelists[prfname]))) bdf['__Origin__'] = 'BDFv1' bdf['__particle__'] = 'photon' if load_data: f = open(dataname, 'r') try: s = f.read() except IOError as ioe: # an ugly bug (M$ KB899149) in W!nd0w$ causes an error if loading too # large a file from a network drive and opening it read-only. if ioe.errno == 22: f.close() try: # one work-around is to open it read-write. f = open(dataname, 'r+b') s = f.read() except IOError: # if this does not work, inform the user to either obtain # write permission for that file or copy it to a local # drive f.close() raise IOError(22, """ You were probably trying to open a read-only file from a network drive on Windows, weren\'t you? There is a bug in Windows causing this error (see http://support.microsoft.com/default.aspx?scid=kb;en-us;899149). To work around this, please either obtain write permission for that file (I won't write anything to it, I promise!!!) or copy it to a local drive. Sorry for the inconvenience.""", ioe.filename) datasets = re.findall( '#\s*(?P<name>\w+)\[(?P<xsize>\d+):(?P<ysize>\d+)\]', s) names = [d[0] for d in datasets] xsize = [int(d[1]) for d in datasets] ysize = [int(d[2]) for d in datasets] dt = np.dtype(bdf['type']) for i in range(len(datasets)): start = s.find('#%s' % names[i]) if i < len(datasets) - 1: end = s.find('#%s' % (names[i + 1])) else: end = len(s) s1 = s[start:end] datasize = xsize[i] * ysize[i] * dt.itemsize if datasize > len(s1): # assume we are dealing with a BOOL matrix bdf[names[i]] = np.fromstring( s1[-xsize[i] * ysize[i]:], dtype=np.uint8) else: bdf[names[i]] = np.fromstring( s1[-xsize[i] * ysize[i] * dt.itemsize:], dtype=dt) # conversion: Matlab saves the array in Fortran-style ordering (columns first). # Python however loads in C-style: rows first. We need to take care: # 1) reshape from linear to (ysize,xsize) and not (xsize,ysize) # 2) transpose (swaps columns and rows) # After these operations, we only have to rotate this counter-clockwise by 90 # degrees because bdf2_write rotates by +270 degrees before saving. bdf[names[i]] = np.rot90( bdf[names[i]].reshape((ysize[i], xsize[i]), order='F'), 1) return bdf
[ "def", "readbhfv1", "(", "filename", ",", "load_data", "=", "False", ",", "bdfext", "=", "'.bdf'", ",", "bhfext", "=", "'.bhf'", ")", ":", "# strip the bhf or bdf extension if there.", "if", "filename", ".", "endswith", "(", "bdfext", ")", ":", "basename", "=", "filename", "[", ":", "-", "len", "(", "bdfext", ")", "]", "elif", "filename", ".", "endswith", "(", "bhfext", ")", ":", "basename", "=", "filename", "[", ":", "-", "len", "(", "bhfext", ")", "]", "else", ":", "# assume a single file of header and data.", "basename", ",", "bhfext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "bdfext", "=", "bhfext", "headername", "=", "basename", "+", "bhfext", "dataname", "=", "basename", "+", "bdfext", "bdf", "=", "{", "}", "bdf", "[", "'his'", "]", "=", "[", "]", "# empty list for history", "bdf", "[", "'C'", "]", "=", "{", "}", "# empty list for bdf file descriptions", "namelists", "=", "{", "}", "valuelists", "=", "{", "}", "with", "open", "(", "headername", ",", "'rb'", ")", "as", "fid", ":", "# if fails, an exception is raised", "for", "line", "in", "fid", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "continue", "# empty line", "mat", "=", "line", ".", "split", "(", "None", ",", "1", ")", "prefix", "=", "mat", "[", "0", "]", "if", "prefix", "==", "'#C'", ":", "left", ",", "right", "=", "mat", "[", "1", "]", ".", "split", "(", "'='", ",", "1", ")", "left", "=", "left", ".", "strip", "(", ")", "right", "=", "right", ".", "strip", "(", ")", "if", "left", "in", "[", "'xdim'", ",", "'ydim'", "]", ":", "bdf", "[", "left", "]", "=", "int", "(", "right", ")", "elif", "left", "in", "[", "'type'", ",", "'bdf'", "]", ":", "bdf", "[", "left", "]", "=", "right", "if", "left", "in", "[", "'Sendtime'", "]", ":", "bdf", "[", "'C'", "]", "[", "left", "]", "=", "float", "(", "right", ")", "elif", "left", "in", "[", "'xdim'", ",", "'ydim'", "]", ":", "bdf", "[", "'C'", "]", "[", "left", "]", "=", "int", "(", "right", ")", "else", ":", "bdf", "[", "'C'", "]", "[", "left", "]", "=", "misc", ".", "parse_number", "(", "right", ")", "elif", "prefix", ".", "startswith", "(", "\"#H\"", ")", ":", "bdf", "[", "'his'", "]", ".", "append", "(", "mat", "[", "1", "]", ")", "# elif prefix.startswith(\"#DATA\"):", "# if not load_data:", "# break", "# darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim']))", "# bdf['data'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy() # this weird transformation is needed to get the matrix in the same form as bdf_read.m gets it.", "# elif prefix.startswith('#ERROR'):", "# if not load_data:", "# break", "# darray = np.fromfile(fid, dtype = bdf['type'], count = int(bdf['xdim'] * bdf['ydim']))", "# bdf['error'] = np.rot90((darray.reshape(bdf['xdim'], bdf['ydim'])).astype('double').T, 1).copy()", "else", ":", "for", "prf", "in", "[", "'M'", ",", "'G'", ",", "'S'", ",", "'T'", "]", ":", "if", "prefix", ".", "startswith", "(", "'#C%sL'", "%", "prf", ")", ":", "if", "prf", "not", "in", "namelists", ":", "namelists", "[", "prf", "]", "=", "[", "]", "namelists", "[", "prf", "]", ".", "extend", "(", "mat", "[", "1", "]", ".", "split", "(", ")", ")", "elif", "prefix", ".", "startswith", "(", "'#C%sV'", "%", "prf", ")", ":", "if", "prf", "not", "in", "valuelists", ":", "valuelists", "[", "prf", "]", "=", "[", "]", "valuelists", "[", "prf", "]", ".", "extend", "(", "[", "float", "(", "x", ")", "for", "x", "in", "mat", "[", "1", "]", ".", "split", "(", ")", "]", ")", "else", ":", "continue", "for", "dictname", ",", "prfname", "in", "zip", "(", "[", "'M'", ",", "'CG'", ",", "'CS'", ",", "'CT'", "]", ",", "[", "'M'", ",", "'G'", ",", "'S'", ",", "'T'", "]", ")", ":", "bdf", "[", "dictname", "]", "=", "dict", "(", "list", "(", "zip", "(", "namelists", "[", "prfname", "]", ",", "valuelists", "[", "prfname", "]", ")", ")", ")", "bdf", "[", "'__Origin__'", "]", "=", "'BDFv1'", "bdf", "[", "'__particle__'", "]", "=", "'photon'", "if", "load_data", ":", "f", "=", "open", "(", "dataname", ",", "'r'", ")", "try", ":", "s", "=", "f", ".", "read", "(", ")", "except", "IOError", "as", "ioe", ":", "# an ugly bug (M$ KB899149) in W!nd0w$ causes an error if loading too", "# large a file from a network drive and opening it read-only.", "if", "ioe", ".", "errno", "==", "22", ":", "f", ".", "close", "(", ")", "try", ":", "# one work-around is to open it read-write.", "f", "=", "open", "(", "dataname", ",", "'r+b'", ")", "s", "=", "f", ".", "read", "(", ")", "except", "IOError", ":", "# if this does not work, inform the user to either obtain", "# write permission for that file or copy it to a local", "# drive", "f", ".", "close", "(", ")", "raise", "IOError", "(", "22", ",", "\"\"\"\nYou were probably trying to open a read-only file from a network drive on\nWindows, weren\\'t you? There is a bug in Windows causing this error\n(see http://support.microsoft.com/default.aspx?scid=kb;en-us;899149).\nTo work around this, please either obtain write permission for that file\n(I won't write anything to it, I promise!!!) or copy it to a local drive.\nSorry for the inconvenience.\"\"\"", ",", "ioe", ".", "filename", ")", "datasets", "=", "re", ".", "findall", "(", "'#\\s*(?P<name>\\w+)\\[(?P<xsize>\\d+):(?P<ysize>\\d+)\\]'", ",", "s", ")", "names", "=", "[", "d", "[", "0", "]", "for", "d", "in", "datasets", "]", "xsize", "=", "[", "int", "(", "d", "[", "1", "]", ")", "for", "d", "in", "datasets", "]", "ysize", "=", "[", "int", "(", "d", "[", "2", "]", ")", "for", "d", "in", "datasets", "]", "dt", "=", "np", ".", "dtype", "(", "bdf", "[", "'type'", "]", ")", "for", "i", "in", "range", "(", "len", "(", "datasets", ")", ")", ":", "start", "=", "s", ".", "find", "(", "'#%s'", "%", "names", "[", "i", "]", ")", "if", "i", "<", "len", "(", "datasets", ")", "-", "1", ":", "end", "=", "s", ".", "find", "(", "'#%s'", "%", "(", "names", "[", "i", "+", "1", "]", ")", ")", "else", ":", "end", "=", "len", "(", "s", ")", "s1", "=", "s", "[", "start", ":", "end", "]", "datasize", "=", "xsize", "[", "i", "]", "*", "ysize", "[", "i", "]", "*", "dt", ".", "itemsize", "if", "datasize", ">", "len", "(", "s1", ")", ":", "# assume we are dealing with a BOOL matrix", "bdf", "[", "names", "[", "i", "]", "]", "=", "np", ".", "fromstring", "(", "s1", "[", "-", "xsize", "[", "i", "]", "*", "ysize", "[", "i", "]", ":", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "else", ":", "bdf", "[", "names", "[", "i", "]", "]", "=", "np", ".", "fromstring", "(", "s1", "[", "-", "xsize", "[", "i", "]", "*", "ysize", "[", "i", "]", "*", "dt", ".", "itemsize", ":", "]", ",", "dtype", "=", "dt", ")", "# conversion: Matlab saves the array in Fortran-style ordering (columns first).", "# Python however loads in C-style: rows first. We need to take care:", "# 1) reshape from linear to (ysize,xsize) and not (xsize,ysize)", "# 2) transpose (swaps columns and rows)", "# After these operations, we only have to rotate this counter-clockwise by 90", "# degrees because bdf2_write rotates by +270 degrees before saving.", "bdf", "[", "names", "[", "i", "]", "]", "=", "np", ".", "rot90", "(", "bdf", "[", "names", "[", "i", "]", "]", ".", "reshape", "(", "(", "ysize", "[", "i", "]", ",", "xsize", "[", "i", "]", ")", ",", "order", "=", "'F'", ")", ",", "1", ")", "return", "bdf" ]
Read header data from bdf/bhf file (Bessy Data Format v1) Input: filename: the name of the file load_data: if the matrices are to be loaded Output: bdf: the BDF header structure Adapted the bdf_read.m macro from Sylvio Haas.
[ "Read", "header", "data", "from", "bdf", "/", "bhf", "file", "(", "Bessy", "Data", "Format", "v1", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L545-L678
awacha/sastool
sastool/io/header.py
readmarheader
def readmarheader(filename): """Read a header from a MarResearch .image file.""" with open(filename, 'rb') as f: intheader = np.fromstring(f.read(10 * 4), np.int32) floatheader = np.fromstring(f.read(15 * 4), '<f4') strheader = f.read(24) f.read(4) otherstrings = [f.read(16) for i in range(29)] return {'Xsize': intheader[0], 'Ysize': intheader[1], 'MeasTime': intheader[8], 'BeamPosX': floatheader[7], 'BeamPosY': floatheader[8], 'Wavelength': floatheader[9], 'Dist': floatheader[10], '__Origin__': 'MarResearch .image', 'recordlength': intheader[2], 'highintensitypixels': intheader[4], 'highintensityrecords': intheader[5], 'Date': dateutil.parser.parse(strheader), 'Detector': 'MARCCD', '__particle__': 'photon'}
python
def readmarheader(filename): """Read a header from a MarResearch .image file.""" with open(filename, 'rb') as f: intheader = np.fromstring(f.read(10 * 4), np.int32) floatheader = np.fromstring(f.read(15 * 4), '<f4') strheader = f.read(24) f.read(4) otherstrings = [f.read(16) for i in range(29)] return {'Xsize': intheader[0], 'Ysize': intheader[1], 'MeasTime': intheader[8], 'BeamPosX': floatheader[7], 'BeamPosY': floatheader[8], 'Wavelength': floatheader[9], 'Dist': floatheader[10], '__Origin__': 'MarResearch .image', 'recordlength': intheader[2], 'highintensitypixels': intheader[4], 'highintensityrecords': intheader[5], 'Date': dateutil.parser.parse(strheader), 'Detector': 'MARCCD', '__particle__': 'photon'}
[ "def", "readmarheader", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "intheader", "=", "np", ".", "fromstring", "(", "f", ".", "read", "(", "10", "*", "4", ")", ",", "np", ".", "int32", ")", "floatheader", "=", "np", ".", "fromstring", "(", "f", ".", "read", "(", "15", "*", "4", ")", ",", "'<f4'", ")", "strheader", "=", "f", ".", "read", "(", "24", ")", "f", ".", "read", "(", "4", ")", "otherstrings", "=", "[", "f", ".", "read", "(", "16", ")", "for", "i", "in", "range", "(", "29", ")", "]", "return", "{", "'Xsize'", ":", "intheader", "[", "0", "]", ",", "'Ysize'", ":", "intheader", "[", "1", "]", ",", "'MeasTime'", ":", "intheader", "[", "8", "]", ",", "'BeamPosX'", ":", "floatheader", "[", "7", "]", ",", "'BeamPosY'", ":", "floatheader", "[", "8", "]", ",", "'Wavelength'", ":", "floatheader", "[", "9", "]", ",", "'Dist'", ":", "floatheader", "[", "10", "]", ",", "'__Origin__'", ":", "'MarResearch .image'", ",", "'recordlength'", ":", "intheader", "[", "2", "]", ",", "'highintensitypixels'", ":", "intheader", "[", "4", "]", ",", "'highintensityrecords'", ":", "intheader", "[", "5", "]", ",", "'Date'", ":", "dateutil", ".", "parser", ".", "parse", "(", "strheader", ")", ",", "'Detector'", ":", "'MARCCD'", ",", "'__particle__'", ":", "'photon'", "}" ]
Read a header from a MarResearch .image file.
[ "Read", "a", "header", "from", "a", "MarResearch", ".", "image", "file", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L745-L760
awacha/sastool
sastool/io/header.py
readBerSANS
def readBerSANS(filename): """Read a header from a SANS file (produced usually by BerSANS)""" hed = {'Comment': ''} translate = {'Lambda': 'Wavelength', 'Title': 'Owner', 'SampleName': 'Title', 'BeamcenterX': 'BeamPosY', 'BeamcenterY': 'BeamPosX', 'Time': 'MeasTime', 'TotalTime': 'MeasTime', 'Moni1': 'Monitor', 'Moni2': 'Monitor', 'Moni': 'Monitor', 'Transmission': 'Transm', } with open(filename, 'rt') as f: comment_next = False for l in f: l = l.strip() if comment_next: hed['Comment'] = hed['Comment'] + '\n' + l comment_next = False elif l.startswith('%Counts'): break elif l.startswith('%Comment'): comment_next = True elif l.startswith('%'): continue elif l.split('=', 1)[0] in translate: hed[translate[l.split('=', 1)[0]]] = misc.parse_number( l.split('=', 1)[1]) else: try: hed[l.split('=', 1)[0]] = misc.parse_number( l.split('=', 1)[1]) except IndexError: print(l.split('=', 1)) if 'FileName' in hed: m = re.match('D(\d+)\.(\d+)', hed['FileName']) if m is not None: hed['FSN'] = int(m.groups()[0]) hed['suffix'] = int(m.groups()[1]) if 'FileDate' in hed: hed['Date'] = dateutil.parser.parse(hed['FileDate']) if 'FileTime' in hed: hed['Date'] = datetime.datetime.combine( hed['Date'].date(), dateutil.parser.parse(hed['FileTime']).time()) hed['__Origin__'] = 'BerSANS' if 'SD' in hed: hed['Dist'] = hed['SD'] * 1000 if hed['Comment'].startswith('\n'): hed['Comment'] = hed['Comment'][1:] hed['__particle__'] = 'neutron' hed['Wavelength'] *= 10 # convert from nanometres to Angstroems return hed
python
def readBerSANS(filename): """Read a header from a SANS file (produced usually by BerSANS)""" hed = {'Comment': ''} translate = {'Lambda': 'Wavelength', 'Title': 'Owner', 'SampleName': 'Title', 'BeamcenterX': 'BeamPosY', 'BeamcenterY': 'BeamPosX', 'Time': 'MeasTime', 'TotalTime': 'MeasTime', 'Moni1': 'Monitor', 'Moni2': 'Monitor', 'Moni': 'Monitor', 'Transmission': 'Transm', } with open(filename, 'rt') as f: comment_next = False for l in f: l = l.strip() if comment_next: hed['Comment'] = hed['Comment'] + '\n' + l comment_next = False elif l.startswith('%Counts'): break elif l.startswith('%Comment'): comment_next = True elif l.startswith('%'): continue elif l.split('=', 1)[0] in translate: hed[translate[l.split('=', 1)[0]]] = misc.parse_number( l.split('=', 1)[1]) else: try: hed[l.split('=', 1)[0]] = misc.parse_number( l.split('=', 1)[1]) except IndexError: print(l.split('=', 1)) if 'FileName' in hed: m = re.match('D(\d+)\.(\d+)', hed['FileName']) if m is not None: hed['FSN'] = int(m.groups()[0]) hed['suffix'] = int(m.groups()[1]) if 'FileDate' in hed: hed['Date'] = dateutil.parser.parse(hed['FileDate']) if 'FileTime' in hed: hed['Date'] = datetime.datetime.combine( hed['Date'].date(), dateutil.parser.parse(hed['FileTime']).time()) hed['__Origin__'] = 'BerSANS' if 'SD' in hed: hed['Dist'] = hed['SD'] * 1000 if hed['Comment'].startswith('\n'): hed['Comment'] = hed['Comment'][1:] hed['__particle__'] = 'neutron' hed['Wavelength'] *= 10 # convert from nanometres to Angstroems return hed
[ "def", "readBerSANS", "(", "filename", ")", ":", "hed", "=", "{", "'Comment'", ":", "''", "}", "translate", "=", "{", "'Lambda'", ":", "'Wavelength'", ",", "'Title'", ":", "'Owner'", ",", "'SampleName'", ":", "'Title'", ",", "'BeamcenterX'", ":", "'BeamPosY'", ",", "'BeamcenterY'", ":", "'BeamPosX'", ",", "'Time'", ":", "'MeasTime'", ",", "'TotalTime'", ":", "'MeasTime'", ",", "'Moni1'", ":", "'Monitor'", ",", "'Moni2'", ":", "'Monitor'", ",", "'Moni'", ":", "'Monitor'", ",", "'Transmission'", ":", "'Transm'", ",", "}", "with", "open", "(", "filename", ",", "'rt'", ")", "as", "f", ":", "comment_next", "=", "False", "for", "l", "in", "f", ":", "l", "=", "l", ".", "strip", "(", ")", "if", "comment_next", ":", "hed", "[", "'Comment'", "]", "=", "hed", "[", "'Comment'", "]", "+", "'\\n'", "+", "l", "comment_next", "=", "False", "elif", "l", ".", "startswith", "(", "'%Counts'", ")", ":", "break", "elif", "l", ".", "startswith", "(", "'%Comment'", ")", ":", "comment_next", "=", "True", "elif", "l", ".", "startswith", "(", "'%'", ")", ":", "continue", "elif", "l", ".", "split", "(", "'='", ",", "1", ")", "[", "0", "]", "in", "translate", ":", "hed", "[", "translate", "[", "l", ".", "split", "(", "'='", ",", "1", ")", "[", "0", "]", "]", "]", "=", "misc", ".", "parse_number", "(", "l", ".", "split", "(", "'='", ",", "1", ")", "[", "1", "]", ")", "else", ":", "try", ":", "hed", "[", "l", ".", "split", "(", "'='", ",", "1", ")", "[", "0", "]", "]", "=", "misc", ".", "parse_number", "(", "l", ".", "split", "(", "'='", ",", "1", ")", "[", "1", "]", ")", "except", "IndexError", ":", "print", "(", "l", ".", "split", "(", "'='", ",", "1", ")", ")", "if", "'FileName'", "in", "hed", ":", "m", "=", "re", ".", "match", "(", "'D(\\d+)\\.(\\d+)'", ",", "hed", "[", "'FileName'", "]", ")", "if", "m", "is", "not", "None", ":", "hed", "[", "'FSN'", "]", "=", "int", "(", "m", ".", "groups", "(", ")", "[", "0", "]", ")", "hed", "[", "'suffix'", "]", "=", "int", "(", "m", ".", "groups", "(", ")", "[", "1", "]", ")", "if", "'FileDate'", "in", "hed", ":", "hed", "[", "'Date'", "]", "=", "dateutil", ".", "parser", ".", "parse", "(", "hed", "[", "'FileDate'", "]", ")", "if", "'FileTime'", "in", "hed", ":", "hed", "[", "'Date'", "]", "=", "datetime", ".", "datetime", ".", "combine", "(", "hed", "[", "'Date'", "]", ".", "date", "(", ")", ",", "dateutil", ".", "parser", ".", "parse", "(", "hed", "[", "'FileTime'", "]", ")", ".", "time", "(", ")", ")", "hed", "[", "'__Origin__'", "]", "=", "'BerSANS'", "if", "'SD'", "in", "hed", ":", "hed", "[", "'Dist'", "]", "=", "hed", "[", "'SD'", "]", "*", "1000", "if", "hed", "[", "'Comment'", "]", ".", "startswith", "(", "'\\n'", ")", ":", "hed", "[", "'Comment'", "]", "=", "hed", "[", "'Comment'", "]", "[", "1", ":", "]", "hed", "[", "'__particle__'", "]", "=", "'neutron'", "hed", "[", "'Wavelength'", "]", "*=", "10", "# convert from nanometres to Angstroems", "return", "hed" ]
Read a header from a SANS file (produced usually by BerSANS)
[ "Read", "a", "header", "from", "a", "SANS", "file", "(", "produced", "usually", "by", "BerSANS", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/header.py#L763-L817
awacha/sastool
sastool/fitting/fitfunctions/basic.py
Sine
def Sine(x, a, omega, phi, y0): """Sine function Inputs: ------- ``x``: independent variable ``a``: amplitude ``omega``: circular frequency ``phi``: phase ``y0``: offset Formula: -------- ``a*sin(x*omega + phi)+y0`` """ return a * np.sin(x * omega + phi) + y0
python
def Sine(x, a, omega, phi, y0): """Sine function Inputs: ------- ``x``: independent variable ``a``: amplitude ``omega``: circular frequency ``phi``: phase ``y0``: offset Formula: -------- ``a*sin(x*omega + phi)+y0`` """ return a * np.sin(x * omega + phi) + y0
[ "def", "Sine", "(", "x", ",", "a", ",", "omega", ",", "phi", ",", "y0", ")", ":", "return", "a", "*", "np", ".", "sin", "(", "x", "*", "omega", "+", "phi", ")", "+", "y0" ]
Sine function Inputs: ------- ``x``: independent variable ``a``: amplitude ``omega``: circular frequency ``phi``: phase ``y0``: offset Formula: -------- ``a*sin(x*omega + phi)+y0``
[ "Sine", "function" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L23-L38
awacha/sastool
sastool/fitting/fitfunctions/basic.py
Cosine
def Cosine(x, a, omega, phi, y0): """Cosine function Inputs: ------- ``x``: independent variable ``a``: amplitude ``omega``: circular frequency ``phi``: phase ``y0``: offset Formula: -------- ``a*cos(x*omega + phi)+y0`` """ return a * np.cos(x * omega + phi) + y0
python
def Cosine(x, a, omega, phi, y0): """Cosine function Inputs: ------- ``x``: independent variable ``a``: amplitude ``omega``: circular frequency ``phi``: phase ``y0``: offset Formula: -------- ``a*cos(x*omega + phi)+y0`` """ return a * np.cos(x * omega + phi) + y0
[ "def", "Cosine", "(", "x", ",", "a", ",", "omega", ",", "phi", ",", "y0", ")", ":", "return", "a", "*", "np", ".", "cos", "(", "x", "*", "omega", "+", "phi", ")", "+", "y0" ]
Cosine function Inputs: ------- ``x``: independent variable ``a``: amplitude ``omega``: circular frequency ``phi``: phase ``y0``: offset Formula: -------- ``a*cos(x*omega + phi)+y0``
[ "Cosine", "function" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L41-L56
awacha/sastool
sastool/fitting/fitfunctions/basic.py
Square
def Square(x, a, b, c): """Second order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the second-order term ``b``: coefficient of the first-order term ``c``: additive constant Formula: -------- ``a*x^2 + b*x + c`` """ return a * x ** 2 + b * x + c
python
def Square(x, a, b, c): """Second order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the second-order term ``b``: coefficient of the first-order term ``c``: additive constant Formula: -------- ``a*x^2 + b*x + c`` """ return a * x ** 2 + b * x + c
[ "def", "Square", "(", "x", ",", "a", ",", "b", ",", "c", ")", ":", "return", "a", "*", "x", "**", "2", "+", "b", "*", "x", "+", "c" ]
Second order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the second-order term ``b``: coefficient of the first-order term ``c``: additive constant Formula: -------- ``a*x^2 + b*x + c``
[ "Second", "order", "polynomial" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L59-L73
awacha/sastool
sastool/fitting/fitfunctions/basic.py
Cube
def Cube(x, a, b, c, d): """Third order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the third-order term ``b``: coefficient of the second-order term ``c``: coefficient of the first-order term ``d``: additive constant Formula: -------- ``a*x^3 + b*x^2 + c*x + d`` """ return a * x ** 3 + b * x ** 2 + c * x + d
python
def Cube(x, a, b, c, d): """Third order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the third-order term ``b``: coefficient of the second-order term ``c``: coefficient of the first-order term ``d``: additive constant Formula: -------- ``a*x^3 + b*x^2 + c*x + d`` """ return a * x ** 3 + b * x ** 2 + c * x + d
[ "def", "Cube", "(", "x", ",", "a", ",", "b", ",", "c", ",", "d", ")", ":", "return", "a", "*", "x", "**", "3", "+", "b", "*", "x", "**", "2", "+", "c", "*", "x", "+", "d" ]
Third order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the third-order term ``b``: coefficient of the second-order term ``c``: coefficient of the first-order term ``d``: additive constant Formula: -------- ``a*x^3 + b*x^2 + c*x + d``
[ "Third", "order", "polynomial" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L76-L91
awacha/sastool
sastool/fitting/fitfunctions/basic.py
Exponential
def Exponential(x, a, tau, y0): """Exponential function Inputs: ------- ``x``: independent variable ``a``: scaling factor ``tau``: time constant ``y0``: additive constant Formula: -------- ``a*exp(x/tau)+y0`` """ return np.exp(x / tau) * a + y0
python
def Exponential(x, a, tau, y0): """Exponential function Inputs: ------- ``x``: independent variable ``a``: scaling factor ``tau``: time constant ``y0``: additive constant Formula: -------- ``a*exp(x/tau)+y0`` """ return np.exp(x / tau) * a + y0
[ "def", "Exponential", "(", "x", ",", "a", ",", "tau", ",", "y0", ")", ":", "return", "np", ".", "exp", "(", "x", "/", "tau", ")", "*", "a", "+", "y0" ]
Exponential function Inputs: ------- ``x``: independent variable ``a``: scaling factor ``tau``: time constant ``y0``: additive constant Formula: -------- ``a*exp(x/tau)+y0``
[ "Exponential", "function" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L110-L124
awacha/sastool
sastool/fitting/fitfunctions/basic.py
Lorentzian
def Lorentzian(x, a, x0, sigma, y0): """Lorentzian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a/(1+((x-x0)/sigma)^2)+y0`` """ return a / (1 + ((x - x0) / sigma) ** 2) + y0
python
def Lorentzian(x, a, x0, sigma, y0): """Lorentzian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a/(1+((x-x0)/sigma)^2)+y0`` """ return a / (1 + ((x - x0) / sigma) ** 2) + y0
[ "def", "Lorentzian", "(", "x", ",", "a", ",", "x0", ",", "sigma", ",", "y0", ")", ":", "return", "a", "/", "(", "1", "+", "(", "(", "x", "-", "x0", ")", "/", "sigma", ")", "**", "2", ")", "+", "y0" ]
Lorentzian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a/(1+((x-x0)/sigma)^2)+y0``
[ "Lorentzian", "peak" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L127-L142
awacha/sastool
sastool/fitting/fitfunctions/basic.py
Gaussian
def Gaussian(x, a, x0, sigma, y0): """Gaussian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a*exp(-(x-x0)^2)/(2*sigma^2)+y0`` """ return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + y0
python
def Gaussian(x, a, x0, sigma, y0): """Gaussian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a*exp(-(x-x0)^2)/(2*sigma^2)+y0`` """ return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + y0
[ "def", "Gaussian", "(", "x", ",", "a", ",", "x0", ",", "sigma", ",", "y0", ")", ":", "return", "a", "*", "np", ".", "exp", "(", "-", "(", "x", "-", "x0", ")", "**", "2", "/", "(", "2", "*", "sigma", "**", "2", ")", ")", "+", "y0" ]
Gaussian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a*exp(-(x-x0)^2)/(2*sigma^2)+y0``
[ "Gaussian", "peak" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L145-L160
awacha/sastool
sastool/fitting/fitfunctions/basic.py
LogNormal
def LogNormal(x, a, mu, sigma): """PDF of a log-normal distribution Inputs: ------- ``x``: independent variable ``a``: amplitude ``mu``: center parameter ``sigma``: width parameter Formula: -------- ``a/ (2*pi*sigma^2*x^2)^0.5 * exp(-(log(x)-mu)^2/(2*sigma^2)) """ return a / np.sqrt(2 * np.pi * sigma ** 2 * x ** 2) *\ np.exp(-(np.log(x) - mu) ** 2 / (2 * sigma ** 2))
python
def LogNormal(x, a, mu, sigma): """PDF of a log-normal distribution Inputs: ------- ``x``: independent variable ``a``: amplitude ``mu``: center parameter ``sigma``: width parameter Formula: -------- ``a/ (2*pi*sigma^2*x^2)^0.5 * exp(-(log(x)-mu)^2/(2*sigma^2)) """ return a / np.sqrt(2 * np.pi * sigma ** 2 * x ** 2) *\ np.exp(-(np.log(x) - mu) ** 2 / (2 * sigma ** 2))
[ "def", "LogNormal", "(", "x", ",", "a", ",", "mu", ",", "sigma", ")", ":", "return", "a", "/", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", "*", "sigma", "**", "2", "*", "x", "**", "2", ")", "*", "np", ".", "exp", "(", "-", "(", "np", ".", "log", "(", "x", ")", "-", "mu", ")", "**", "2", "/", "(", "2", "*", "sigma", "**", "2", ")", ")" ]
PDF of a log-normal distribution Inputs: ------- ``x``: independent variable ``a``: amplitude ``mu``: center parameter ``sigma``: width parameter Formula: -------- ``a/ (2*pi*sigma^2*x^2)^0.5 * exp(-(log(x)-mu)^2/(2*sigma^2))
[ "PDF", "of", "a", "log", "-", "normal", "distribution" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/basic.py#L163-L178
awacha/sastool
sastool/utils2d/integrate.py
radintpix
def radintpix(data, dataerr, bcx, bcy, mask=None, pix=None, returnavgpix=False, phi0=0, dphi=0, returnmask=False, symmetric_sector=False, doslice=False, errorpropagation=2, autoqrange_linear=True): """Radial integration (averaging) on the detector plane Inputs: data: scattering pattern matrix (np.ndarray, dtype: np.double) dataerr: error matrix (np.ndarray, dtype: np.double; or None) bcx, bcy: beam position, counting from 1 mask: mask matrix (np.ndarray, dtype: np.uint8) pix: pixel distance values (abscissa) from origin. If None, auto-determine. returnavgpix: if the averaged pixel values should be returned phi0: starting angle (radian) for sector integration. If doslice is True, this is the angle of the slice. dphi: angular width (radian) of the sector or width (pixels) of the slice. If negative or zero, full radial average is requested. returnmask: if the effective mask matrix is to be returned symmetric_sector: the sector defined by phi0+pi is also to be used for integration. doslice: if slicing is to be done instead of sector averaging. autoqrange_linear: if the automatically determined q-range is to be linspace-d. Otherwise log10 spacing will be applied. Outputs: pix, Intensity, [Error], Area, [mask] Error is only returned if dataerr is not None mask is only returned if returnmask is True Relies heavily (completely) on radint(). """ if isinstance(data, np.ndarray): data = data.astype(np.double) if isinstance(dataerr, np.ndarray): dataerr = dataerr.astype(np.double) if isinstance(mask, np.ndarray): mask = mask.astype(np.uint8) return radint(data, dataerr, -1, -1, -1, 1.0 * bcx, 1.0 * bcy, mask, pix, returnavgpix, phi0, dphi, returnmask, symmetric_sector, doslice, False, errorpropagation, autoqrange_linear)
python
def radintpix(data, dataerr, bcx, bcy, mask=None, pix=None, returnavgpix=False, phi0=0, dphi=0, returnmask=False, symmetric_sector=False, doslice=False, errorpropagation=2, autoqrange_linear=True): """Radial integration (averaging) on the detector plane Inputs: data: scattering pattern matrix (np.ndarray, dtype: np.double) dataerr: error matrix (np.ndarray, dtype: np.double; or None) bcx, bcy: beam position, counting from 1 mask: mask matrix (np.ndarray, dtype: np.uint8) pix: pixel distance values (abscissa) from origin. If None, auto-determine. returnavgpix: if the averaged pixel values should be returned phi0: starting angle (radian) for sector integration. If doslice is True, this is the angle of the slice. dphi: angular width (radian) of the sector or width (pixels) of the slice. If negative or zero, full radial average is requested. returnmask: if the effective mask matrix is to be returned symmetric_sector: the sector defined by phi0+pi is also to be used for integration. doslice: if slicing is to be done instead of sector averaging. autoqrange_linear: if the automatically determined q-range is to be linspace-d. Otherwise log10 spacing will be applied. Outputs: pix, Intensity, [Error], Area, [mask] Error is only returned if dataerr is not None mask is only returned if returnmask is True Relies heavily (completely) on radint(). """ if isinstance(data, np.ndarray): data = data.astype(np.double) if isinstance(dataerr, np.ndarray): dataerr = dataerr.astype(np.double) if isinstance(mask, np.ndarray): mask = mask.astype(np.uint8) return radint(data, dataerr, -1, -1, -1, 1.0 * bcx, 1.0 * bcy, mask, pix, returnavgpix, phi0, dphi, returnmask, symmetric_sector, doslice, False, errorpropagation, autoqrange_linear)
[ "def", "radintpix", "(", "data", ",", "dataerr", ",", "bcx", ",", "bcy", ",", "mask", "=", "None", ",", "pix", "=", "None", ",", "returnavgpix", "=", "False", ",", "phi0", "=", "0", ",", "dphi", "=", "0", ",", "returnmask", "=", "False", ",", "symmetric_sector", "=", "False", ",", "doslice", "=", "False", ",", "errorpropagation", "=", "2", ",", "autoqrange_linear", "=", "True", ")", ":", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "if", "isinstance", "(", "dataerr", ",", "np", ".", "ndarray", ")", ":", "dataerr", "=", "dataerr", ".", "astype", "(", "np", ".", "double", ")", "if", "isinstance", "(", "mask", ",", "np", ".", "ndarray", ")", ":", "mask", "=", "mask", ".", "astype", "(", "np", ".", "uint8", ")", "return", "radint", "(", "data", ",", "dataerr", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "1.0", "*", "bcx", ",", "1.0", "*", "bcy", ",", "mask", ",", "pix", ",", "returnavgpix", ",", "phi0", ",", "dphi", ",", "returnmask", ",", "symmetric_sector", ",", "doslice", ",", "False", ",", "errorpropagation", ",", "autoqrange_linear", ")" ]
Radial integration (averaging) on the detector plane Inputs: data: scattering pattern matrix (np.ndarray, dtype: np.double) dataerr: error matrix (np.ndarray, dtype: np.double; or None) bcx, bcy: beam position, counting from 1 mask: mask matrix (np.ndarray, dtype: np.uint8) pix: pixel distance values (abscissa) from origin. If None, auto-determine. returnavgpix: if the averaged pixel values should be returned phi0: starting angle (radian) for sector integration. If doslice is True, this is the angle of the slice. dphi: angular width (radian) of the sector or width (pixels) of the slice. If negative or zero, full radial average is requested. returnmask: if the effective mask matrix is to be returned symmetric_sector: the sector defined by phi0+pi is also to be used for integration. doslice: if slicing is to be done instead of sector averaging. autoqrange_linear: if the automatically determined q-range is to be linspace-d. Otherwise log10 spacing will be applied. Outputs: pix, Intensity, [Error], Area, [mask] Error is only returned if dataerr is not None mask is only returned if returnmask is True Relies heavily (completely) on radint().
[ "Radial", "integration", "(", "averaging", ")", "on", "the", "detector", "plane" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/integrate.py#L10-L48
awacha/sastool
sastool/utils2d/integrate.py
azimintpix
def azimintpix(data, dataerr, bcx, bcy, mask=None, Ntheta=100, pixmin=0, pixmax=np.inf, returnmask=False, errorpropagation=2): """Azimuthal integration (averaging) on the detector plane Inputs: data: scattering pattern matrix (np.ndarray, dtype: np.double) dataerr: error matrix (np.ndarray, dtype: np.double; or None) bcx, bcy: beam position, counting from 1 mask: mask matrix (np.ndarray, dtype: np.uint8) Ntheta: Number of points in the abscissa (azimuth angle) pixmin: smallest distance from the origin in pixels pixmax: largest distance from the origin in pixels returnmask: if the effective mask matrix is to be returned Outputs: theta, Intensity, [Error], Area, [mask] Error is only returned if dataerr is not None mask is only returned if returnmask is True Relies heavily (completely) on azimint(). """ if isinstance(data, np.ndarray): data = data.astype(np.double) if isinstance(dataerr, np.ndarray): dataerr = dataerr.astype(np.double) if isinstance(mask, np.ndarray): mask = mask.astype(np.uint8) return azimint(data, dataerr, -1, -1, - 1, bcx, bcy, mask, Ntheta, pixmin, pixmax, returnmask, errorpropagation)
python
def azimintpix(data, dataerr, bcx, bcy, mask=None, Ntheta=100, pixmin=0, pixmax=np.inf, returnmask=False, errorpropagation=2): """Azimuthal integration (averaging) on the detector plane Inputs: data: scattering pattern matrix (np.ndarray, dtype: np.double) dataerr: error matrix (np.ndarray, dtype: np.double; or None) bcx, bcy: beam position, counting from 1 mask: mask matrix (np.ndarray, dtype: np.uint8) Ntheta: Number of points in the abscissa (azimuth angle) pixmin: smallest distance from the origin in pixels pixmax: largest distance from the origin in pixels returnmask: if the effective mask matrix is to be returned Outputs: theta, Intensity, [Error], Area, [mask] Error is only returned if dataerr is not None mask is only returned if returnmask is True Relies heavily (completely) on azimint(). """ if isinstance(data, np.ndarray): data = data.astype(np.double) if isinstance(dataerr, np.ndarray): dataerr = dataerr.astype(np.double) if isinstance(mask, np.ndarray): mask = mask.astype(np.uint8) return azimint(data, dataerr, -1, -1, - 1, bcx, bcy, mask, Ntheta, pixmin, pixmax, returnmask, errorpropagation)
[ "def", "azimintpix", "(", "data", ",", "dataerr", ",", "bcx", ",", "bcy", ",", "mask", "=", "None", ",", "Ntheta", "=", "100", ",", "pixmin", "=", "0", ",", "pixmax", "=", "np", ".", "inf", ",", "returnmask", "=", "False", ",", "errorpropagation", "=", "2", ")", ":", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "if", "isinstance", "(", "dataerr", ",", "np", ".", "ndarray", ")", ":", "dataerr", "=", "dataerr", ".", "astype", "(", "np", ".", "double", ")", "if", "isinstance", "(", "mask", ",", "np", ".", "ndarray", ")", ":", "mask", "=", "mask", ".", "astype", "(", "np", ".", "uint8", ")", "return", "azimint", "(", "data", ",", "dataerr", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "bcx", ",", "bcy", ",", "mask", ",", "Ntheta", ",", "pixmin", ",", "pixmax", ",", "returnmask", ",", "errorpropagation", ")" ]
Azimuthal integration (averaging) on the detector plane Inputs: data: scattering pattern matrix (np.ndarray, dtype: np.double) dataerr: error matrix (np.ndarray, dtype: np.double; or None) bcx, bcy: beam position, counting from 1 mask: mask matrix (np.ndarray, dtype: np.uint8) Ntheta: Number of points in the abscissa (azimuth angle) pixmin: smallest distance from the origin in pixels pixmax: largest distance from the origin in pixels returnmask: if the effective mask matrix is to be returned Outputs: theta, Intensity, [Error], Area, [mask] Error is only returned if dataerr is not None mask is only returned if returnmask is True Relies heavily (completely) on azimint().
[ "Azimuthal", "integration", "(", "averaging", ")", "on", "the", "detector", "plane" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/integrate.py#L51-L79
awacha/sastool
sastool/misc/searchpath.py
find_subdirs
def find_subdirs(startdir='.', recursion_depth=None): """Find all subdirectory of a directory. Inputs: startdir: directory to start with. Defaults to the current folder. recursion_depth: number of levels to traverse. None is infinite. Output: a list of absolute names of subfolders. Examples: >>> find_subdirs('dir',0) # returns just ['dir'] >>> find_subdirs('dir',1) # returns all direct (first-level) subdirs # of 'dir'. """ startdir = os.path.expanduser(startdir) direct_subdirs = [os.path.join(startdir, x) for x in os.listdir( startdir) if os.path.isdir(os.path.join(startdir, x))] if recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = recursion_depth - 1 if (recursion_depth is not None) and (recursion_depth <= 1): return [startdir] + direct_subdirs else: subdirs = [] for d in direct_subdirs: subdirs.extend(find_subdirs(d, next_recursion_depth)) return [startdir] + subdirs
python
def find_subdirs(startdir='.', recursion_depth=None): """Find all subdirectory of a directory. Inputs: startdir: directory to start with. Defaults to the current folder. recursion_depth: number of levels to traverse. None is infinite. Output: a list of absolute names of subfolders. Examples: >>> find_subdirs('dir',0) # returns just ['dir'] >>> find_subdirs('dir',1) # returns all direct (first-level) subdirs # of 'dir'. """ startdir = os.path.expanduser(startdir) direct_subdirs = [os.path.join(startdir, x) for x in os.listdir( startdir) if os.path.isdir(os.path.join(startdir, x))] if recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = recursion_depth - 1 if (recursion_depth is not None) and (recursion_depth <= 1): return [startdir] + direct_subdirs else: subdirs = [] for d in direct_subdirs: subdirs.extend(find_subdirs(d, next_recursion_depth)) return [startdir] + subdirs
[ "def", "find_subdirs", "(", "startdir", "=", "'.'", ",", "recursion_depth", "=", "None", ")", ":", "startdir", "=", "os", ".", "path", ".", "expanduser", "(", "startdir", ")", "direct_subdirs", "=", "[", "os", ".", "path", ".", "join", "(", "startdir", ",", "x", ")", "for", "x", "in", "os", ".", "listdir", "(", "startdir", ")", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "startdir", ",", "x", ")", ")", "]", "if", "recursion_depth", "is", "None", ":", "next_recursion_depth", "=", "None", "else", ":", "next_recursion_depth", "=", "recursion_depth", "-", "1", "if", "(", "recursion_depth", "is", "not", "None", ")", "and", "(", "recursion_depth", "<=", "1", ")", ":", "return", "[", "startdir", "]", "+", "direct_subdirs", "else", ":", "subdirs", "=", "[", "]", "for", "d", "in", "direct_subdirs", ":", "subdirs", ".", "extend", "(", "find_subdirs", "(", "d", ",", "next_recursion_depth", ")", ")", "return", "[", "startdir", "]", "+", "subdirs" ]
Find all subdirectory of a directory. Inputs: startdir: directory to start with. Defaults to the current folder. recursion_depth: number of levels to traverse. None is infinite. Output: a list of absolute names of subfolders. Examples: >>> find_subdirs('dir',0) # returns just ['dir'] >>> find_subdirs('dir',1) # returns all direct (first-level) subdirs # of 'dir'.
[ "Find", "all", "subdirectory", "of", "a", "directory", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/searchpath.py#L107-L135
awacha/sastool
sastool/misc/basicfit.py
findpeak
def findpeak(x, y, dy=None, position=None, hwhm=None, baseline=None, amplitude=None, curve='Lorentz'): """Find a (positive) peak in the dataset. This function is deprecated, please consider using findpeak_single() instead. Inputs: x, y, dy: abscissa, ordinate and the error of the ordinate (can be None) position, hwhm, baseline, amplitude: first guesses for the named parameters curve: 'Gauss' or 'Lorentz' (default) Outputs: peak position, error of peak position, hwhm, error of hwhm, baseline, error of baseline, amplitude, error of amplitude. Notes: A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'. """ warnings.warn('Function findpeak() is deprecated, please use findpeak_single() instead.', DeprecationWarning) pos, hwhm, baseline, ampl = findpeak_single(x, y, dy, position, hwhm, baseline, amplitude, curve) return pos.val, pos.err, hwhm.val, hwhm.err, baseline.val, baseline.err, ampl.val, ampl.err
python
def findpeak(x, y, dy=None, position=None, hwhm=None, baseline=None, amplitude=None, curve='Lorentz'): """Find a (positive) peak in the dataset. This function is deprecated, please consider using findpeak_single() instead. Inputs: x, y, dy: abscissa, ordinate and the error of the ordinate (can be None) position, hwhm, baseline, amplitude: first guesses for the named parameters curve: 'Gauss' or 'Lorentz' (default) Outputs: peak position, error of peak position, hwhm, error of hwhm, baseline, error of baseline, amplitude, error of amplitude. Notes: A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'. """ warnings.warn('Function findpeak() is deprecated, please use findpeak_single() instead.', DeprecationWarning) pos, hwhm, baseline, ampl = findpeak_single(x, y, dy, position, hwhm, baseline, amplitude, curve) return pos.val, pos.err, hwhm.val, hwhm.err, baseline.val, baseline.err, ampl.val, ampl.err
[ "def", "findpeak", "(", "x", ",", "y", ",", "dy", "=", "None", ",", "position", "=", "None", ",", "hwhm", "=", "None", ",", "baseline", "=", "None", ",", "amplitude", "=", "None", ",", "curve", "=", "'Lorentz'", ")", ":", "warnings", ".", "warn", "(", "'Function findpeak() is deprecated, please use findpeak_single() instead.'", ",", "DeprecationWarning", ")", "pos", ",", "hwhm", ",", "baseline", ",", "ampl", "=", "findpeak_single", "(", "x", ",", "y", ",", "dy", ",", "position", ",", "hwhm", ",", "baseline", ",", "amplitude", ",", "curve", ")", "return", "pos", ".", "val", ",", "pos", ".", "err", ",", "hwhm", ".", "val", ",", "hwhm", ".", "err", ",", "baseline", ".", "val", ",", "baseline", ".", "err", ",", "ampl", ".", "val", ",", "ampl", ".", "err" ]
Find a (positive) peak in the dataset. This function is deprecated, please consider using findpeak_single() instead. Inputs: x, y, dy: abscissa, ordinate and the error of the ordinate (can be None) position, hwhm, baseline, amplitude: first guesses for the named parameters curve: 'Gauss' or 'Lorentz' (default) Outputs: peak position, error of peak position, hwhm, error of hwhm, baseline, error of baseline, amplitude, error of amplitude. Notes: A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'.
[ "Find", "a", "(", "positive", ")", "peak", "in", "the", "dataset", ".", "This", "function", "is", "deprecated", "please", "consider", "using", "findpeak_single", "()", "instead", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/basicfit.py#L18-L38
awacha/sastool
sastool/misc/basicfit.py
findpeak_single
def findpeak_single(x, y, dy=None, position=None, hwhm=None, baseline=None, amplitude=None, curve='Lorentz', return_stat=False, signs=(-1, 1), return_x=None): """Find a (positive or negative) peak in the dataset. Inputs: x, y, dy: abscissa, ordinate and the error of the ordinate (can be None) position, hwhm, baseline, amplitude: first guesses for the named parameters curve: 'Gauss' or 'Lorentz' (default) return_stat: return fitting statistics from easylsq.nlsq_fit() signs: a tuple, can be (1,), (-1,), (1,-1). Will try these signs for the peak amplitude return_x: abscissa on which the fitted function form has to be evaluated Outputs: peak position, hwhm, baseline, amplitude[, stat][, peakfunction] where: peak position, hwhm, baseline, amplitude are ErrorValue instances. stat is the statistics dictionary, returned only if return_stat is True peakfunction is the fitted peak evaluated at return_x if it is not None. Notes: A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'. The abscissa should be sorted, ascending. """ y_orig=y if dy is None: dy = np.ones_like(x) if curve.upper().startswith('GAUSS'): def fitfunc(x_, amplitude_, position_, hwhm_, baseline_): return amplitude_ * np.exp(-0.5 * (x_ - position_) ** 2 / hwhm_ ** 2) + baseline_ elif curve.upper().startswith('LORENTZ'): def fitfunc(x_, amplitude_, position_, hwhm_, baseline_): return amplitude_ * hwhm_ ** 2 / (hwhm_ ** 2 + (position_ - x_) ** 2) + baseline_ else: raise ValueError('Invalid curve type: {}'.format(curve)) results=[] # we try fitting a positive and a negative peak and return the better fit (where R2 is larger) for sign in signs: init_params={'position':position,'hwhm':hwhm,'baseline':baseline,'amplitude':amplitude} y = y_orig * sign if init_params['position'] is None: init_params['position'] = x[y == y.max()][0] if init_params['hwhm'] is None: init_params['hwhm'] = 0.5 * (x.max() - x.min()) if init_params['baseline'] is None: init_params['baseline'] = y.min() if init_params['amplitude'] is None: init_params['amplitude'] = y.max() - init_params['baseline'] results.append(nlsq_fit(x, y, dy, fitfunc, (init_params['amplitude'], init_params['position'], init_params['hwhm'], init_params['baseline']))+(sign,)) max_R2=max([r[2]['R2'] for r in results]) p,dp,stat,sign=[r for r in results if r[2]['R2']==max_R2][0] retval = [ErrorValue(p[1], dp[1]), ErrorValue(abs(p[2]), dp[2]), sign * ErrorValue(p[3], dp[3]), sign * ErrorValue(p[0], dp[0])] if return_stat: stat['func_value'] = stat['func_value'] * sign retval.append(stat) if return_x is not None: retval.append(sign * fitfunc(return_x, p[0], p[1], p[2], p[3])) return tuple(retval)
python
def findpeak_single(x, y, dy=None, position=None, hwhm=None, baseline=None, amplitude=None, curve='Lorentz', return_stat=False, signs=(-1, 1), return_x=None): """Find a (positive or negative) peak in the dataset. Inputs: x, y, dy: abscissa, ordinate and the error of the ordinate (can be None) position, hwhm, baseline, amplitude: first guesses for the named parameters curve: 'Gauss' or 'Lorentz' (default) return_stat: return fitting statistics from easylsq.nlsq_fit() signs: a tuple, can be (1,), (-1,), (1,-1). Will try these signs for the peak amplitude return_x: abscissa on which the fitted function form has to be evaluated Outputs: peak position, hwhm, baseline, amplitude[, stat][, peakfunction] where: peak position, hwhm, baseline, amplitude are ErrorValue instances. stat is the statistics dictionary, returned only if return_stat is True peakfunction is the fitted peak evaluated at return_x if it is not None. Notes: A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'. The abscissa should be sorted, ascending. """ y_orig=y if dy is None: dy = np.ones_like(x) if curve.upper().startswith('GAUSS'): def fitfunc(x_, amplitude_, position_, hwhm_, baseline_): return amplitude_ * np.exp(-0.5 * (x_ - position_) ** 2 / hwhm_ ** 2) + baseline_ elif curve.upper().startswith('LORENTZ'): def fitfunc(x_, amplitude_, position_, hwhm_, baseline_): return amplitude_ * hwhm_ ** 2 / (hwhm_ ** 2 + (position_ - x_) ** 2) + baseline_ else: raise ValueError('Invalid curve type: {}'.format(curve)) results=[] # we try fitting a positive and a negative peak and return the better fit (where R2 is larger) for sign in signs: init_params={'position':position,'hwhm':hwhm,'baseline':baseline,'amplitude':amplitude} y = y_orig * sign if init_params['position'] is None: init_params['position'] = x[y == y.max()][0] if init_params['hwhm'] is None: init_params['hwhm'] = 0.5 * (x.max() - x.min()) if init_params['baseline'] is None: init_params['baseline'] = y.min() if init_params['amplitude'] is None: init_params['amplitude'] = y.max() - init_params['baseline'] results.append(nlsq_fit(x, y, dy, fitfunc, (init_params['amplitude'], init_params['position'], init_params['hwhm'], init_params['baseline']))+(sign,)) max_R2=max([r[2]['R2'] for r in results]) p,dp,stat,sign=[r for r in results if r[2]['R2']==max_R2][0] retval = [ErrorValue(p[1], dp[1]), ErrorValue(abs(p[2]), dp[2]), sign * ErrorValue(p[3], dp[3]), sign * ErrorValue(p[0], dp[0])] if return_stat: stat['func_value'] = stat['func_value'] * sign retval.append(stat) if return_x is not None: retval.append(sign * fitfunc(return_x, p[0], p[1], p[2], p[3])) return tuple(retval)
[ "def", "findpeak_single", "(", "x", ",", "y", ",", "dy", "=", "None", ",", "position", "=", "None", ",", "hwhm", "=", "None", ",", "baseline", "=", "None", ",", "amplitude", "=", "None", ",", "curve", "=", "'Lorentz'", ",", "return_stat", "=", "False", ",", "signs", "=", "(", "-", "1", ",", "1", ")", ",", "return_x", "=", "None", ")", ":", "y_orig", "=", "y", "if", "dy", "is", "None", ":", "dy", "=", "np", ".", "ones_like", "(", "x", ")", "if", "curve", ".", "upper", "(", ")", ".", "startswith", "(", "'GAUSS'", ")", ":", "def", "fitfunc", "(", "x_", ",", "amplitude_", ",", "position_", ",", "hwhm_", ",", "baseline_", ")", ":", "return", "amplitude_", "*", "np", ".", "exp", "(", "-", "0.5", "*", "(", "x_", "-", "position_", ")", "**", "2", "/", "hwhm_", "**", "2", ")", "+", "baseline_", "elif", "curve", ".", "upper", "(", ")", ".", "startswith", "(", "'LORENTZ'", ")", ":", "def", "fitfunc", "(", "x_", ",", "amplitude_", ",", "position_", ",", "hwhm_", ",", "baseline_", ")", ":", "return", "amplitude_", "*", "hwhm_", "**", "2", "/", "(", "hwhm_", "**", "2", "+", "(", "position_", "-", "x_", ")", "**", "2", ")", "+", "baseline_", "else", ":", "raise", "ValueError", "(", "'Invalid curve type: {}'", ".", "format", "(", "curve", ")", ")", "results", "=", "[", "]", "# we try fitting a positive and a negative peak and return the better fit (where R2 is larger)", "for", "sign", "in", "signs", ":", "init_params", "=", "{", "'position'", ":", "position", ",", "'hwhm'", ":", "hwhm", ",", "'baseline'", ":", "baseline", ",", "'amplitude'", ":", "amplitude", "}", "y", "=", "y_orig", "*", "sign", "if", "init_params", "[", "'position'", "]", "is", "None", ":", "init_params", "[", "'position'", "]", "=", "x", "[", "y", "==", "y", ".", "max", "(", ")", "]", "[", "0", "]", "if", "init_params", "[", "'hwhm'", "]", "is", "None", ":", "init_params", "[", "'hwhm'", "]", "=", "0.5", "*", "(", "x", ".", "max", "(", ")", "-", "x", ".", "min", "(", ")", ")", "if", "init_params", "[", "'baseline'", "]", "is", "None", ":", "init_params", "[", "'baseline'", "]", "=", "y", ".", "min", "(", ")", "if", "init_params", "[", "'amplitude'", "]", "is", "None", ":", "init_params", "[", "'amplitude'", "]", "=", "y", ".", "max", "(", ")", "-", "init_params", "[", "'baseline'", "]", "results", ".", "append", "(", "nlsq_fit", "(", "x", ",", "y", ",", "dy", ",", "fitfunc", ",", "(", "init_params", "[", "'amplitude'", "]", ",", "init_params", "[", "'position'", "]", ",", "init_params", "[", "'hwhm'", "]", ",", "init_params", "[", "'baseline'", "]", ")", ")", "+", "(", "sign", ",", ")", ")", "max_R2", "=", "max", "(", "[", "r", "[", "2", "]", "[", "'R2'", "]", "for", "r", "in", "results", "]", ")", "p", ",", "dp", ",", "stat", ",", "sign", "=", "[", "r", "for", "r", "in", "results", "if", "r", "[", "2", "]", "[", "'R2'", "]", "==", "max_R2", "]", "[", "0", "]", "retval", "=", "[", "ErrorValue", "(", "p", "[", "1", "]", ",", "dp", "[", "1", "]", ")", ",", "ErrorValue", "(", "abs", "(", "p", "[", "2", "]", ")", ",", "dp", "[", "2", "]", ")", ",", "sign", "*", "ErrorValue", "(", "p", "[", "3", "]", ",", "dp", "[", "3", "]", ")", ",", "sign", "*", "ErrorValue", "(", "p", "[", "0", "]", ",", "dp", "[", "0", "]", ")", "]", "if", "return_stat", ":", "stat", "[", "'func_value'", "]", "=", "stat", "[", "'func_value'", "]", "*", "sign", "retval", ".", "append", "(", "stat", ")", "if", "return_x", "is", "not", "None", ":", "retval", ".", "append", "(", "sign", "*", "fitfunc", "(", "return_x", ",", "p", "[", "0", "]", ",", "p", "[", "1", "]", ",", "p", "[", "2", "]", ",", "p", "[", "3", "]", ")", ")", "return", "tuple", "(", "retval", ")" ]
Find a (positive or negative) peak in the dataset. Inputs: x, y, dy: abscissa, ordinate and the error of the ordinate (can be None) position, hwhm, baseline, amplitude: first guesses for the named parameters curve: 'Gauss' or 'Lorentz' (default) return_stat: return fitting statistics from easylsq.nlsq_fit() signs: a tuple, can be (1,), (-1,), (1,-1). Will try these signs for the peak amplitude return_x: abscissa on which the fitted function form has to be evaluated Outputs: peak position, hwhm, baseline, amplitude[, stat][, peakfunction] where: peak position, hwhm, baseline, amplitude are ErrorValue instances. stat is the statistics dictionary, returned only if return_stat is True peakfunction is the fitted peak evaluated at return_x if it is not None. Notes: A Gauss or a Lorentz curve is fitted, depending on the value of 'curve'. The abscissa should be sorted, ascending.
[ "Find", "a", "(", "positive", "or", "negative", ")", "peak", "in", "the", "dataset", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/basicfit.py#L41-L97
awacha/sastool
sastool/misc/basicfit.py
findpeak_multi
def findpeak_multi(x, y, dy, N, Ntolerance, Nfit=None, curve='Lorentz', return_xfit=False, return_stat=False): """Find multiple peaks in the dataset given by vectors x and y. Points are searched for in the dataset where the N points before and after have strictly lower values than them. To get rid of false negatives caused by fluctuations, Ntolerance is introduced. It is the number of outlier points to be tolerated, i.e. points on the left-hand side of the peak where the growing tendency breaks or on the right-hand side where the diminishing tendency breaks. Increasing this number, however gives rise to false positives. Inputs: x, y, dy: vectors defining the data-set. dy can be None. N, Ntolerance: the parameters of the peak-finding routines Nfit: the number of points on the left and on the right of the peak to be used for least squares refinement of the peak positions. curve: the type of the curve to be fitted to the peaks. Can be 'Lorentz' or 'Gauss' return_xfit: if the abscissa used for fitting is to be returned. return_stat: if the fitting statistics is to be returned for each peak. Outputs: position, hwhm, baseline, amplitude, (xfit): lists Notes: Peaks are identified where the curve grows N points before and decreases N points after. On noisy curves Ntolerance may improve the results, i.e. decreases the 2*N above mentioned criteria. """ if Nfit is None: Nfit = N # find points where the curve grows for N points before them and # decreases for N points after them. To accomplish this, we create # an indicator array of the sign of the first derivative. sgndiff = np.sign(np.diff(y)) xdiff = x[:-1] # associate difference values to the lower 'x' value. pix = np.arange(len(x) - 1) # pixel coordinates create an indicator # array as the sum of sgndiff shifted left and right. whenever an # element of this is 2*N, it fulfills the criteria above. indicator = np.zeros(len(sgndiff) - 2 * N) for i in range(2 * N): indicator += np.sign(N - i) * sgndiff[i:-2 * N + i] # add the last one, since the indexing is different (would be # [2*N:0], which is not what we want) indicator += -sgndiff[2 * N:] # find the positions (indices) of the peak. The strict criteria is # relaxed somewhat by using the Ntolerance value. Note the use of # 2*Ntolerance, since each outlier point creates two outliers in # sgndiff (-1 insted of +1 and vice versa). peakpospix = pix[N:-N][indicator >= 2 * N - 2 * Ntolerance] ypeak = y[peakpospix] # Now refine the found positions by least-squares fitting. But # first we have to sort out other non-peaks, i.e. found points # which have other found points with higher values in their [-N,N] # neighbourhood. pos = []; ampl = []; hwhm = []; baseline = []; xfit = []; stat = [] dy1 = None for i in range(len(ypeak)): if not [j for j in list(range(i + 1, len(ypeak))) + list(range(0, i)) if abs(peakpospix[j] - peakpospix[i]) <= N and ypeak[i] < ypeak[j]]: # only leave maxima. idx = peakpospix[i] if dy is not None: dy1 = dy[(idx - Nfit):(idx + Nfit + 1)] xfit_ = x[(idx - Nfit):(idx + Nfit + 1)] pos_, hwhm_, baseline_, ampl_, stat_ = findpeak_single(xfit_, y[(idx - Nfit):(idx + Nfit + 1)], dy1, position=x[idx], return_stat=True) stat.append(stat_) xfit.append(xfit_) pos.append(pos_) ampl.append(ampl_) hwhm.append(hwhm_) baseline.append(baseline_) results = [pos, hwhm, baseline, ampl] if return_xfit: results.append(xfit) if return_stat: results.append(stat) return tuple(results)
python
def findpeak_multi(x, y, dy, N, Ntolerance, Nfit=None, curve='Lorentz', return_xfit=False, return_stat=False): """Find multiple peaks in the dataset given by vectors x and y. Points are searched for in the dataset where the N points before and after have strictly lower values than them. To get rid of false negatives caused by fluctuations, Ntolerance is introduced. It is the number of outlier points to be tolerated, i.e. points on the left-hand side of the peak where the growing tendency breaks or on the right-hand side where the diminishing tendency breaks. Increasing this number, however gives rise to false positives. Inputs: x, y, dy: vectors defining the data-set. dy can be None. N, Ntolerance: the parameters of the peak-finding routines Nfit: the number of points on the left and on the right of the peak to be used for least squares refinement of the peak positions. curve: the type of the curve to be fitted to the peaks. Can be 'Lorentz' or 'Gauss' return_xfit: if the abscissa used for fitting is to be returned. return_stat: if the fitting statistics is to be returned for each peak. Outputs: position, hwhm, baseline, amplitude, (xfit): lists Notes: Peaks are identified where the curve grows N points before and decreases N points after. On noisy curves Ntolerance may improve the results, i.e. decreases the 2*N above mentioned criteria. """ if Nfit is None: Nfit = N # find points where the curve grows for N points before them and # decreases for N points after them. To accomplish this, we create # an indicator array of the sign of the first derivative. sgndiff = np.sign(np.diff(y)) xdiff = x[:-1] # associate difference values to the lower 'x' value. pix = np.arange(len(x) - 1) # pixel coordinates create an indicator # array as the sum of sgndiff shifted left and right. whenever an # element of this is 2*N, it fulfills the criteria above. indicator = np.zeros(len(sgndiff) - 2 * N) for i in range(2 * N): indicator += np.sign(N - i) * sgndiff[i:-2 * N + i] # add the last one, since the indexing is different (would be # [2*N:0], which is not what we want) indicator += -sgndiff[2 * N:] # find the positions (indices) of the peak. The strict criteria is # relaxed somewhat by using the Ntolerance value. Note the use of # 2*Ntolerance, since each outlier point creates two outliers in # sgndiff (-1 insted of +1 and vice versa). peakpospix = pix[N:-N][indicator >= 2 * N - 2 * Ntolerance] ypeak = y[peakpospix] # Now refine the found positions by least-squares fitting. But # first we have to sort out other non-peaks, i.e. found points # which have other found points with higher values in their [-N,N] # neighbourhood. pos = []; ampl = []; hwhm = []; baseline = []; xfit = []; stat = [] dy1 = None for i in range(len(ypeak)): if not [j for j in list(range(i + 1, len(ypeak))) + list(range(0, i)) if abs(peakpospix[j] - peakpospix[i]) <= N and ypeak[i] < ypeak[j]]: # only leave maxima. idx = peakpospix[i] if dy is not None: dy1 = dy[(idx - Nfit):(idx + Nfit + 1)] xfit_ = x[(idx - Nfit):(idx + Nfit + 1)] pos_, hwhm_, baseline_, ampl_, stat_ = findpeak_single(xfit_, y[(idx - Nfit):(idx + Nfit + 1)], dy1, position=x[idx], return_stat=True) stat.append(stat_) xfit.append(xfit_) pos.append(pos_) ampl.append(ampl_) hwhm.append(hwhm_) baseline.append(baseline_) results = [pos, hwhm, baseline, ampl] if return_xfit: results.append(xfit) if return_stat: results.append(stat) return tuple(results)
[ "def", "findpeak_multi", "(", "x", ",", "y", ",", "dy", ",", "N", ",", "Ntolerance", ",", "Nfit", "=", "None", ",", "curve", "=", "'Lorentz'", ",", "return_xfit", "=", "False", ",", "return_stat", "=", "False", ")", ":", "if", "Nfit", "is", "None", ":", "Nfit", "=", "N", "# find points where the curve grows for N points before them and", "# decreases for N points after them. To accomplish this, we create", "# an indicator array of the sign of the first derivative.", "sgndiff", "=", "np", ".", "sign", "(", "np", ".", "diff", "(", "y", ")", ")", "xdiff", "=", "x", "[", ":", "-", "1", "]", "# associate difference values to the lower 'x' value.", "pix", "=", "np", ".", "arange", "(", "len", "(", "x", ")", "-", "1", ")", "# pixel coordinates create an indicator", "# array as the sum of sgndiff shifted left and right. whenever an", "# element of this is 2*N, it fulfills the criteria above.", "indicator", "=", "np", ".", "zeros", "(", "len", "(", "sgndiff", ")", "-", "2", "*", "N", ")", "for", "i", "in", "range", "(", "2", "*", "N", ")", ":", "indicator", "+=", "np", ".", "sign", "(", "N", "-", "i", ")", "*", "sgndiff", "[", "i", ":", "-", "2", "*", "N", "+", "i", "]", "# add the last one, since the indexing is different (would be", "# [2*N:0], which is not what we want)", "indicator", "+=", "-", "sgndiff", "[", "2", "*", "N", ":", "]", "# find the positions (indices) of the peak. The strict criteria is", "# relaxed somewhat by using the Ntolerance value. Note the use of", "# 2*Ntolerance, since each outlier point creates two outliers in", "# sgndiff (-1 insted of +1 and vice versa).", "peakpospix", "=", "pix", "[", "N", ":", "-", "N", "]", "[", "indicator", ">=", "2", "*", "N", "-", "2", "*", "Ntolerance", "]", "ypeak", "=", "y", "[", "peakpospix", "]", "# Now refine the found positions by least-squares fitting. But", "# first we have to sort out other non-peaks, i.e. found points", "# which have other found points with higher values in their [-N,N]", "# neighbourhood.", "pos", "=", "[", "]", "ampl", "=", "[", "]", "hwhm", "=", "[", "]", "baseline", "=", "[", "]", "xfit", "=", "[", "]", "stat", "=", "[", "]", "dy1", "=", "None", "for", "i", "in", "range", "(", "len", "(", "ypeak", ")", ")", ":", "if", "not", "[", "j", "for", "j", "in", "list", "(", "range", "(", "i", "+", "1", ",", "len", "(", "ypeak", ")", ")", ")", "+", "list", "(", "range", "(", "0", ",", "i", ")", ")", "if", "abs", "(", "peakpospix", "[", "j", "]", "-", "peakpospix", "[", "i", "]", ")", "<=", "N", "and", "ypeak", "[", "i", "]", "<", "ypeak", "[", "j", "]", "]", ":", "# only leave maxima.", "idx", "=", "peakpospix", "[", "i", "]", "if", "dy", "is", "not", "None", ":", "dy1", "=", "dy", "[", "(", "idx", "-", "Nfit", ")", ":", "(", "idx", "+", "Nfit", "+", "1", ")", "]", "xfit_", "=", "x", "[", "(", "idx", "-", "Nfit", ")", ":", "(", "idx", "+", "Nfit", "+", "1", ")", "]", "pos_", ",", "hwhm_", ",", "baseline_", ",", "ampl_", ",", "stat_", "=", "findpeak_single", "(", "xfit_", ",", "y", "[", "(", "idx", "-", "Nfit", ")", ":", "(", "idx", "+", "Nfit", "+", "1", ")", "]", ",", "dy1", ",", "position", "=", "x", "[", "idx", "]", ",", "return_stat", "=", "True", ")", "stat", ".", "append", "(", "stat_", ")", "xfit", ".", "append", "(", "xfit_", ")", "pos", ".", "append", "(", "pos_", ")", "ampl", ".", "append", "(", "ampl_", ")", "hwhm", ".", "append", "(", "hwhm_", ")", "baseline", ".", "append", "(", "baseline_", ")", "results", "=", "[", "pos", ",", "hwhm", ",", "baseline", ",", "ampl", "]", "if", "return_xfit", ":", "results", ".", "append", "(", "xfit", ")", "if", "return_stat", ":", "results", ".", "append", "(", "stat", ")", "return", "tuple", "(", "results", ")" ]
Find multiple peaks in the dataset given by vectors x and y. Points are searched for in the dataset where the N points before and after have strictly lower values than them. To get rid of false negatives caused by fluctuations, Ntolerance is introduced. It is the number of outlier points to be tolerated, i.e. points on the left-hand side of the peak where the growing tendency breaks or on the right-hand side where the diminishing tendency breaks. Increasing this number, however gives rise to false positives. Inputs: x, y, dy: vectors defining the data-set. dy can be None. N, Ntolerance: the parameters of the peak-finding routines Nfit: the number of points on the left and on the right of the peak to be used for least squares refinement of the peak positions. curve: the type of the curve to be fitted to the peaks. Can be 'Lorentz' or 'Gauss' return_xfit: if the abscissa used for fitting is to be returned. return_stat: if the fitting statistics is to be returned for each peak. Outputs: position, hwhm, baseline, amplitude, (xfit): lists Notes: Peaks are identified where the curve grows N points before and decreases N points after. On noisy curves Ntolerance may improve the results, i.e. decreases the 2*N above mentioned criteria.
[ "Find", "multiple", "peaks", "in", "the", "dataset", "given", "by", "vectors", "x", "and", "y", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/basicfit.py#L99-L178
awacha/sastool
sastool/misc/basicfit.py
findpeak_asymmetric
def findpeak_asymmetric(x, y, dy=None, curve='Lorentz', return_x=None, init_parameters=None): """Find an asymmetric Lorentzian peak. Inputs: x: numpy array of the abscissa y: numpy array of the ordinate dy: numpy array of the errors in y (or None if not present) curve: string (case insensitive): if starts with "Lorentz", a Lorentzian curve will be fitted. If starts with "Gauss", a Gaussian will be fitted. Otherwise error. return_x: numpy array of the x values at which the best fitting peak function should be evaluated and returned init_parameters: either None, or a list of [amplitude, center, hwhm_left, hwhm_right, baseline]: initial parameters to start fitting from. Results: center, hwhm_left, hwhm_right, baseline, amplitude [, y_fitted] The fitted parameters are returned as floats if dy was None or ErrorValue instances if dy was not None. y_fitted is only returned if return_x was not None Note: 1) The dataset must contain only the peak. 2) A positive peak will be fitted 3) The peak center must be in the given range """ idx = np.logical_and(np.isfinite(x), np.isfinite(y)) if dy is not None: idx = np.logical_and(idx, np.isfinite(dy)) x=x[idx] y=y[idx] if dy is not None: dy=dy[idx] if curve.lower().startswith('loren'): lorentzian = True elif curve.lower().startswith('gauss'): lorentzian = False else: raise ValueError('Unknown peak type {}'.format(curve)) def peakfunc(pars, x, lorentzian=True): x0, sigma1, sigma2, C, A = pars result = np.empty_like(x) if lorentzian: result[x < x0] = A * sigma1 ** 2 / (sigma1 ** 2 + (x0 - x[x < x0]) ** 2) + C result[x >= x0] = A * sigma2 ** 2 / (sigma2 ** 2 + (x0 - x[x >= x0]) ** 2) + C else: result[x < x0] = A * np.exp(-(x[x < x0] - x0) ** 2 / (2 * sigma1 ** 2)) result[x >= x0] = A * np.exp(-(x[x >= x0] - x0) ** 2 / (2 * sigma1 ** 2)) return result def fitfunc(pars, x, y, dy, lorentzian=True): yfit = peakfunc(pars, x, lorentzian) if dy is None: return yfit - y else: return (yfit - y) / dy if init_parameters is not None: pos, hwhmleft, hwhmright, baseline, amplitude = [float(x) for x in init_parameters] else: baseline = y.min() amplitude = y.max() - baseline hwhmleft = hwhmright = (x.max() - x.min()) * 0.5 pos = x[np.argmax(y)] #print([pos,hwhm,hwhm,baseline,amplitude]) result = scipy.optimize.least_squares(fitfunc, [pos, hwhmleft, hwhmright, baseline, amplitude], args=(x, y, dy, lorentzian), bounds=([x.min(), 0, 0, -np.inf, 0], [x.max(), np.inf, np.inf, np.inf, np.inf])) # print(result.x[0], result.x[1], result.x[2], result.x[3], result.x[4], result.message, result.success) if not result.success: raise RuntimeError('Error while peak fitting: {}'.format(result.message)) if dy is None: ret = (result.x[0], result.x[1], result.x[2], result.x[3], result.x[4]) else: # noinspection PyTupleAssignmentBalance _, s, VT = svd(result.jac, full_matrices=False) threshold = np.finfo(float).eps * max(result.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s ** 2, VT) ret = tuple([ErrorValue(result.x[i], pcov[i, i] ** 0.5) for i in range(5)]) if return_x is not None: ret = ret + (peakfunc([float(x) for x in ret], return_x, lorentzian),) return ret
python
def findpeak_asymmetric(x, y, dy=None, curve='Lorentz', return_x=None, init_parameters=None): """Find an asymmetric Lorentzian peak. Inputs: x: numpy array of the abscissa y: numpy array of the ordinate dy: numpy array of the errors in y (or None if not present) curve: string (case insensitive): if starts with "Lorentz", a Lorentzian curve will be fitted. If starts with "Gauss", a Gaussian will be fitted. Otherwise error. return_x: numpy array of the x values at which the best fitting peak function should be evaluated and returned init_parameters: either None, or a list of [amplitude, center, hwhm_left, hwhm_right, baseline]: initial parameters to start fitting from. Results: center, hwhm_left, hwhm_right, baseline, amplitude [, y_fitted] The fitted parameters are returned as floats if dy was None or ErrorValue instances if dy was not None. y_fitted is only returned if return_x was not None Note: 1) The dataset must contain only the peak. 2) A positive peak will be fitted 3) The peak center must be in the given range """ idx = np.logical_and(np.isfinite(x), np.isfinite(y)) if dy is not None: idx = np.logical_and(idx, np.isfinite(dy)) x=x[idx] y=y[idx] if dy is not None: dy=dy[idx] if curve.lower().startswith('loren'): lorentzian = True elif curve.lower().startswith('gauss'): lorentzian = False else: raise ValueError('Unknown peak type {}'.format(curve)) def peakfunc(pars, x, lorentzian=True): x0, sigma1, sigma2, C, A = pars result = np.empty_like(x) if lorentzian: result[x < x0] = A * sigma1 ** 2 / (sigma1 ** 2 + (x0 - x[x < x0]) ** 2) + C result[x >= x0] = A * sigma2 ** 2 / (sigma2 ** 2 + (x0 - x[x >= x0]) ** 2) + C else: result[x < x0] = A * np.exp(-(x[x < x0] - x0) ** 2 / (2 * sigma1 ** 2)) result[x >= x0] = A * np.exp(-(x[x >= x0] - x0) ** 2 / (2 * sigma1 ** 2)) return result def fitfunc(pars, x, y, dy, lorentzian=True): yfit = peakfunc(pars, x, lorentzian) if dy is None: return yfit - y else: return (yfit - y) / dy if init_parameters is not None: pos, hwhmleft, hwhmright, baseline, amplitude = [float(x) for x in init_parameters] else: baseline = y.min() amplitude = y.max() - baseline hwhmleft = hwhmright = (x.max() - x.min()) * 0.5 pos = x[np.argmax(y)] #print([pos,hwhm,hwhm,baseline,amplitude]) result = scipy.optimize.least_squares(fitfunc, [pos, hwhmleft, hwhmright, baseline, amplitude], args=(x, y, dy, lorentzian), bounds=([x.min(), 0, 0, -np.inf, 0], [x.max(), np.inf, np.inf, np.inf, np.inf])) # print(result.x[0], result.x[1], result.x[2], result.x[3], result.x[4], result.message, result.success) if not result.success: raise RuntimeError('Error while peak fitting: {}'.format(result.message)) if dy is None: ret = (result.x[0], result.x[1], result.x[2], result.x[3], result.x[4]) else: # noinspection PyTupleAssignmentBalance _, s, VT = svd(result.jac, full_matrices=False) threshold = np.finfo(float).eps * max(result.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s ** 2, VT) ret = tuple([ErrorValue(result.x[i], pcov[i, i] ** 0.5) for i in range(5)]) if return_x is not None: ret = ret + (peakfunc([float(x) for x in ret], return_x, lorentzian),) return ret
[ "def", "findpeak_asymmetric", "(", "x", ",", "y", ",", "dy", "=", "None", ",", "curve", "=", "'Lorentz'", ",", "return_x", "=", "None", ",", "init_parameters", "=", "None", ")", ":", "idx", "=", "np", ".", "logical_and", "(", "np", ".", "isfinite", "(", "x", ")", ",", "np", ".", "isfinite", "(", "y", ")", ")", "if", "dy", "is", "not", "None", ":", "idx", "=", "np", ".", "logical_and", "(", "idx", ",", "np", ".", "isfinite", "(", "dy", ")", ")", "x", "=", "x", "[", "idx", "]", "y", "=", "y", "[", "idx", "]", "if", "dy", "is", "not", "None", ":", "dy", "=", "dy", "[", "idx", "]", "if", "curve", ".", "lower", "(", ")", ".", "startswith", "(", "'loren'", ")", ":", "lorentzian", "=", "True", "elif", "curve", ".", "lower", "(", ")", ".", "startswith", "(", "'gauss'", ")", ":", "lorentzian", "=", "False", "else", ":", "raise", "ValueError", "(", "'Unknown peak type {}'", ".", "format", "(", "curve", ")", ")", "def", "peakfunc", "(", "pars", ",", "x", ",", "lorentzian", "=", "True", ")", ":", "x0", ",", "sigma1", ",", "sigma2", ",", "C", ",", "A", "=", "pars", "result", "=", "np", ".", "empty_like", "(", "x", ")", "if", "lorentzian", ":", "result", "[", "x", "<", "x0", "]", "=", "A", "*", "sigma1", "**", "2", "/", "(", "sigma1", "**", "2", "+", "(", "x0", "-", "x", "[", "x", "<", "x0", "]", ")", "**", "2", ")", "+", "C", "result", "[", "x", ">=", "x0", "]", "=", "A", "*", "sigma2", "**", "2", "/", "(", "sigma2", "**", "2", "+", "(", "x0", "-", "x", "[", "x", ">=", "x0", "]", ")", "**", "2", ")", "+", "C", "else", ":", "result", "[", "x", "<", "x0", "]", "=", "A", "*", "np", ".", "exp", "(", "-", "(", "x", "[", "x", "<", "x0", "]", "-", "x0", ")", "**", "2", "/", "(", "2", "*", "sigma1", "**", "2", ")", ")", "result", "[", "x", ">=", "x0", "]", "=", "A", "*", "np", ".", "exp", "(", "-", "(", "x", "[", "x", ">=", "x0", "]", "-", "x0", ")", "**", "2", "/", "(", "2", "*", "sigma1", "**", "2", ")", ")", "return", "result", "def", "fitfunc", "(", "pars", ",", "x", ",", "y", ",", "dy", ",", "lorentzian", "=", "True", ")", ":", "yfit", "=", "peakfunc", "(", "pars", ",", "x", ",", "lorentzian", ")", "if", "dy", "is", "None", ":", "return", "yfit", "-", "y", "else", ":", "return", "(", "yfit", "-", "y", ")", "/", "dy", "if", "init_parameters", "is", "not", "None", ":", "pos", ",", "hwhmleft", ",", "hwhmright", ",", "baseline", ",", "amplitude", "=", "[", "float", "(", "x", ")", "for", "x", "in", "init_parameters", "]", "else", ":", "baseline", "=", "y", ".", "min", "(", ")", "amplitude", "=", "y", ".", "max", "(", ")", "-", "baseline", "hwhmleft", "=", "hwhmright", "=", "(", "x", ".", "max", "(", ")", "-", "x", ".", "min", "(", ")", ")", "*", "0.5", "pos", "=", "x", "[", "np", ".", "argmax", "(", "y", ")", "]", "#print([pos,hwhm,hwhm,baseline,amplitude])", "result", "=", "scipy", ".", "optimize", ".", "least_squares", "(", "fitfunc", ",", "[", "pos", ",", "hwhmleft", ",", "hwhmright", ",", "baseline", ",", "amplitude", "]", ",", "args", "=", "(", "x", ",", "y", ",", "dy", ",", "lorentzian", ")", ",", "bounds", "=", "(", "[", "x", ".", "min", "(", ")", ",", "0", ",", "0", ",", "-", "np", ".", "inf", ",", "0", "]", ",", "[", "x", ".", "max", "(", ")", ",", "np", ".", "inf", ",", "np", ".", "inf", ",", "np", ".", "inf", ",", "np", ".", "inf", "]", ")", ")", "# print(result.x[0], result.x[1], result.x[2], result.x[3], result.x[4], result.message, result.success)", "if", "not", "result", ".", "success", ":", "raise", "RuntimeError", "(", "'Error while peak fitting: {}'", ".", "format", "(", "result", ".", "message", ")", ")", "if", "dy", "is", "None", ":", "ret", "=", "(", "result", ".", "x", "[", "0", "]", ",", "result", ".", "x", "[", "1", "]", ",", "result", ".", "x", "[", "2", "]", ",", "result", ".", "x", "[", "3", "]", ",", "result", ".", "x", "[", "4", "]", ")", "else", ":", "# noinspection PyTupleAssignmentBalance", "_", ",", "s", ",", "VT", "=", "svd", "(", "result", ".", "jac", ",", "full_matrices", "=", "False", ")", "threshold", "=", "np", ".", "finfo", "(", "float", ")", ".", "eps", "*", "max", "(", "result", ".", "jac", ".", "shape", ")", "*", "s", "[", "0", "]", "s", "=", "s", "[", "s", ">", "threshold", "]", "VT", "=", "VT", "[", ":", "s", ".", "size", "]", "pcov", "=", "np", ".", "dot", "(", "VT", ".", "T", "/", "s", "**", "2", ",", "VT", ")", "ret", "=", "tuple", "(", "[", "ErrorValue", "(", "result", ".", "x", "[", "i", "]", ",", "pcov", "[", "i", ",", "i", "]", "**", "0.5", ")", "for", "i", "in", "range", "(", "5", ")", "]", ")", "if", "return_x", "is", "not", "None", ":", "ret", "=", "ret", "+", "(", "peakfunc", "(", "[", "float", "(", "x", ")", "for", "x", "in", "ret", "]", ",", "return_x", ",", "lorentzian", ")", ",", ")", "return", "ret" ]
Find an asymmetric Lorentzian peak. Inputs: x: numpy array of the abscissa y: numpy array of the ordinate dy: numpy array of the errors in y (or None if not present) curve: string (case insensitive): if starts with "Lorentz", a Lorentzian curve will be fitted. If starts with "Gauss", a Gaussian will be fitted. Otherwise error. return_x: numpy array of the x values at which the best fitting peak function should be evaluated and returned init_parameters: either None, or a list of [amplitude, center, hwhm_left, hwhm_right, baseline]: initial parameters to start fitting from. Results: center, hwhm_left, hwhm_right, baseline, amplitude [, y_fitted] The fitted parameters are returned as floats if dy was None or ErrorValue instances if dy was not None. y_fitted is only returned if return_x was not None Note: 1) The dataset must contain only the peak. 2) A positive peak will be fitted 3) The peak center must be in the given range
[ "Find", "an", "asymmetric", "Lorentzian", "peak", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/basicfit.py#L181-L265
awacha/sastool
sastool/io/onedim.py
readspecscan
def readspecscan(f, number=None): """Read the next spec scan in the file, which starts at the current position.""" scan = None scannumber = None while True: l = f.readline() if l.startswith('#S'): scannumber = int(l[2:].split()[0]) if not ((number is None) or (number == scannumber)): # break the loop, will skip to the next empty line after this # loop break if scan is None: scan = {} scan['number'] = scannumber scan['command'] = l[2:].split(None, 1)[1].strip() scan['data'] = [] elif l.startswith('#C'): scan['comment'] = l[2:].strip() elif l.startswith('#D'): scan['datestring'] = l[2:].strip() elif l.startswith('#T'): scan['countingtime'] = float(l[2:].split()[0]) scan['scantimeunits'] = l[2:].split()[1].strip() elif l.startswith('#M'): scan['countingcounts'] = float(l[2:].split()[0]) elif l.startswith('#G'): if 'G' not in scan: scan['G'] = [] scan['G'].extend([float(x) for x in l.split()[1:]]) elif l.startswith('#P'): if 'positions' not in scan: scan['positions'] = [] scan['positions'].extend([float(x) for x in l.split()[1:]]) elif l.startswith('#Q'): pass elif l.startswith('#N'): n = [float(x) for x in l[2:].strip().split()] if len(n) == 1: scan['N'] = n[0] else: scan['N'] = n elif l.startswith('#L'): scan['Columns'] = [x.strip() for x in l[3:].split(' ')] elif not l: # end of file if scan is None: raise SpecFileEOF else: break elif not l.strip(): break # empty line, end of scan in file. elif l.startswith('#'): # ignore other lines starting with a hashmark. continue else: scan['data'].append(tuple(float(x) for x in l.split())) while l.strip(): l = f.readline() if scan is not None: scan['data'] = np.array( scan['data'], dtype=list(zip(scan['Columns'], itertools.repeat(np.float)))) return scan else: return scannumber
python
def readspecscan(f, number=None): """Read the next spec scan in the file, which starts at the current position.""" scan = None scannumber = None while True: l = f.readline() if l.startswith('#S'): scannumber = int(l[2:].split()[0]) if not ((number is None) or (number == scannumber)): # break the loop, will skip to the next empty line after this # loop break if scan is None: scan = {} scan['number'] = scannumber scan['command'] = l[2:].split(None, 1)[1].strip() scan['data'] = [] elif l.startswith('#C'): scan['comment'] = l[2:].strip() elif l.startswith('#D'): scan['datestring'] = l[2:].strip() elif l.startswith('#T'): scan['countingtime'] = float(l[2:].split()[0]) scan['scantimeunits'] = l[2:].split()[1].strip() elif l.startswith('#M'): scan['countingcounts'] = float(l[2:].split()[0]) elif l.startswith('#G'): if 'G' not in scan: scan['G'] = [] scan['G'].extend([float(x) for x in l.split()[1:]]) elif l.startswith('#P'): if 'positions' not in scan: scan['positions'] = [] scan['positions'].extend([float(x) for x in l.split()[1:]]) elif l.startswith('#Q'): pass elif l.startswith('#N'): n = [float(x) for x in l[2:].strip().split()] if len(n) == 1: scan['N'] = n[0] else: scan['N'] = n elif l.startswith('#L'): scan['Columns'] = [x.strip() for x in l[3:].split(' ')] elif not l: # end of file if scan is None: raise SpecFileEOF else: break elif not l.strip(): break # empty line, end of scan in file. elif l.startswith('#'): # ignore other lines starting with a hashmark. continue else: scan['data'].append(tuple(float(x) for x in l.split())) while l.strip(): l = f.readline() if scan is not None: scan['data'] = np.array( scan['data'], dtype=list(zip(scan['Columns'], itertools.repeat(np.float)))) return scan else: return scannumber
[ "def", "readspecscan", "(", "f", ",", "number", "=", "None", ")", ":", "scan", "=", "None", "scannumber", "=", "None", "while", "True", ":", "l", "=", "f", ".", "readline", "(", ")", "if", "l", ".", "startswith", "(", "'#S'", ")", ":", "scannumber", "=", "int", "(", "l", "[", "2", ":", "]", ".", "split", "(", ")", "[", "0", "]", ")", "if", "not", "(", "(", "number", "is", "None", ")", "or", "(", "number", "==", "scannumber", ")", ")", ":", "# break the loop, will skip to the next empty line after this", "# loop", "break", "if", "scan", "is", "None", ":", "scan", "=", "{", "}", "scan", "[", "'number'", "]", "=", "scannumber", "scan", "[", "'command'", "]", "=", "l", "[", "2", ":", "]", ".", "split", "(", "None", ",", "1", ")", "[", "1", "]", ".", "strip", "(", ")", "scan", "[", "'data'", "]", "=", "[", "]", "elif", "l", ".", "startswith", "(", "'#C'", ")", ":", "scan", "[", "'comment'", "]", "=", "l", "[", "2", ":", "]", ".", "strip", "(", ")", "elif", "l", ".", "startswith", "(", "'#D'", ")", ":", "scan", "[", "'datestring'", "]", "=", "l", "[", "2", ":", "]", ".", "strip", "(", ")", "elif", "l", ".", "startswith", "(", "'#T'", ")", ":", "scan", "[", "'countingtime'", "]", "=", "float", "(", "l", "[", "2", ":", "]", ".", "split", "(", ")", "[", "0", "]", ")", "scan", "[", "'scantimeunits'", "]", "=", "l", "[", "2", ":", "]", ".", "split", "(", ")", "[", "1", "]", ".", "strip", "(", ")", "elif", "l", ".", "startswith", "(", "'#M'", ")", ":", "scan", "[", "'countingcounts'", "]", "=", "float", "(", "l", "[", "2", ":", "]", ".", "split", "(", ")", "[", "0", "]", ")", "elif", "l", ".", "startswith", "(", "'#G'", ")", ":", "if", "'G'", "not", "in", "scan", ":", "scan", "[", "'G'", "]", "=", "[", "]", "scan", "[", "'G'", "]", ".", "extend", "(", "[", "float", "(", "x", ")", "for", "x", "in", "l", ".", "split", "(", ")", "[", "1", ":", "]", "]", ")", "elif", "l", ".", "startswith", "(", "'#P'", ")", ":", "if", "'positions'", "not", "in", "scan", ":", "scan", "[", "'positions'", "]", "=", "[", "]", "scan", "[", "'positions'", "]", ".", "extend", "(", "[", "float", "(", "x", ")", "for", "x", "in", "l", ".", "split", "(", ")", "[", "1", ":", "]", "]", ")", "elif", "l", ".", "startswith", "(", "'#Q'", ")", ":", "pass", "elif", "l", ".", "startswith", "(", "'#N'", ")", ":", "n", "=", "[", "float", "(", "x", ")", "for", "x", "in", "l", "[", "2", ":", "]", ".", "strip", "(", ")", ".", "split", "(", ")", "]", "if", "len", "(", "n", ")", "==", "1", ":", "scan", "[", "'N'", "]", "=", "n", "[", "0", "]", "else", ":", "scan", "[", "'N'", "]", "=", "n", "elif", "l", ".", "startswith", "(", "'#L'", ")", ":", "scan", "[", "'Columns'", "]", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "l", "[", "3", ":", "]", ".", "split", "(", "' '", ")", "]", "elif", "not", "l", ":", "# end of file", "if", "scan", "is", "None", ":", "raise", "SpecFileEOF", "else", ":", "break", "elif", "not", "l", ".", "strip", "(", ")", ":", "break", "# empty line, end of scan in file.", "elif", "l", ".", "startswith", "(", "'#'", ")", ":", "# ignore other lines starting with a hashmark.", "continue", "else", ":", "scan", "[", "'data'", "]", ".", "append", "(", "tuple", "(", "float", "(", "x", ")", "for", "x", "in", "l", ".", "split", "(", ")", ")", ")", "while", "l", ".", "strip", "(", ")", ":", "l", "=", "f", ".", "readline", "(", ")", "if", "scan", "is", "not", "None", ":", "scan", "[", "'data'", "]", "=", "np", ".", "array", "(", "scan", "[", "'data'", "]", ",", "dtype", "=", "list", "(", "zip", "(", "scan", "[", "'Columns'", "]", ",", "itertools", ".", "repeat", "(", "np", ".", "float", ")", ")", ")", ")", "return", "scan", "else", ":", "return", "scannumber" ]
Read the next spec scan in the file, which starts at the current position.
[ "Read", "the", "next", "spec", "scan", "in", "the", "file", "which", "starts", "at", "the", "current", "position", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/onedim.py#L18-L82
awacha/sastool
sastool/io/onedim.py
readspec
def readspec(filename, read_scan=None): """Open a SPEC file and read its content Inputs: filename: string the file to open read_scan: None, 'all' or integer the index of scan to be read from the file. If None, no scan should be read. If 'all', all scans should be read. If a number, just the scan with that number should be read. Output: the data in the spec file in a dict. """ with open(filename, 'rt') as f: sf = {'motors': [], 'maxscannumber': 0} sf['originalfilename'] = filename lastscannumber = None while True: l = f.readline() if l.startswith('#F'): sf['filename'] = l[2:].strip() elif l.startswith('#E'): sf['epoch'] = int(l[2:].strip()) sf['datetime'] = datetime.datetime.fromtimestamp(sf['epoch']) elif l.startswith('#D'): sf['datestring'] = l[2:].strip() elif l.startswith('#C'): sf['comment'] = l[2:].strip() elif l.startswith('#O'): try: l = l.split(None, 1)[1] except IndexError: continue if 'motors' not in list(sf.keys()): sf['motors'] = [] sf['motors'].extend([x.strip() for x in l.split(' ')]) elif not l.strip(): # empty line, signifies the end of the header part. The next # line will be a scan. break sf['scans'] = {} if read_scan is not None: if read_scan == 'all': nr = None else: nr = read_scan try: while True: s = readspecscan(f, nr) if isinstance(s, dict): sf['scans'][s['number']] = s if nr is not None: break sf['maxscannumber'] = max( sf['maxscannumber'], s['number']) elif s is not None: sf['maxscannumber'] = max(sf['maxscannumber'], s) except SpecFileEOF: pass else: while True: l = f.readline() if not l: break if l.startswith('#S'): n = int(l[2:].split()[0]) sf['maxscannumber'] = max(sf['maxscannumber'], n) for n in sf['scans']: s = sf['scans'][n] s['motors'] = sf['motors'] if 'comment' not in s: s['comment'] = sf['comment'] if 'positions' not in s: s['positions'] = [None] * len(sf['motors']) return sf
python
def readspec(filename, read_scan=None): """Open a SPEC file and read its content Inputs: filename: string the file to open read_scan: None, 'all' or integer the index of scan to be read from the file. If None, no scan should be read. If 'all', all scans should be read. If a number, just the scan with that number should be read. Output: the data in the spec file in a dict. """ with open(filename, 'rt') as f: sf = {'motors': [], 'maxscannumber': 0} sf['originalfilename'] = filename lastscannumber = None while True: l = f.readline() if l.startswith('#F'): sf['filename'] = l[2:].strip() elif l.startswith('#E'): sf['epoch'] = int(l[2:].strip()) sf['datetime'] = datetime.datetime.fromtimestamp(sf['epoch']) elif l.startswith('#D'): sf['datestring'] = l[2:].strip() elif l.startswith('#C'): sf['comment'] = l[2:].strip() elif l.startswith('#O'): try: l = l.split(None, 1)[1] except IndexError: continue if 'motors' not in list(sf.keys()): sf['motors'] = [] sf['motors'].extend([x.strip() for x in l.split(' ')]) elif not l.strip(): # empty line, signifies the end of the header part. The next # line will be a scan. break sf['scans'] = {} if read_scan is not None: if read_scan == 'all': nr = None else: nr = read_scan try: while True: s = readspecscan(f, nr) if isinstance(s, dict): sf['scans'][s['number']] = s if nr is not None: break sf['maxscannumber'] = max( sf['maxscannumber'], s['number']) elif s is not None: sf['maxscannumber'] = max(sf['maxscannumber'], s) except SpecFileEOF: pass else: while True: l = f.readline() if not l: break if l.startswith('#S'): n = int(l[2:].split()[0]) sf['maxscannumber'] = max(sf['maxscannumber'], n) for n in sf['scans']: s = sf['scans'][n] s['motors'] = sf['motors'] if 'comment' not in s: s['comment'] = sf['comment'] if 'positions' not in s: s['positions'] = [None] * len(sf['motors']) return sf
[ "def", "readspec", "(", "filename", ",", "read_scan", "=", "None", ")", ":", "with", "open", "(", "filename", ",", "'rt'", ")", "as", "f", ":", "sf", "=", "{", "'motors'", ":", "[", "]", ",", "'maxscannumber'", ":", "0", "}", "sf", "[", "'originalfilename'", "]", "=", "filename", "lastscannumber", "=", "None", "while", "True", ":", "l", "=", "f", ".", "readline", "(", ")", "if", "l", ".", "startswith", "(", "'#F'", ")", ":", "sf", "[", "'filename'", "]", "=", "l", "[", "2", ":", "]", ".", "strip", "(", ")", "elif", "l", ".", "startswith", "(", "'#E'", ")", ":", "sf", "[", "'epoch'", "]", "=", "int", "(", "l", "[", "2", ":", "]", ".", "strip", "(", ")", ")", "sf", "[", "'datetime'", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "sf", "[", "'epoch'", "]", ")", "elif", "l", ".", "startswith", "(", "'#D'", ")", ":", "sf", "[", "'datestring'", "]", "=", "l", "[", "2", ":", "]", ".", "strip", "(", ")", "elif", "l", ".", "startswith", "(", "'#C'", ")", ":", "sf", "[", "'comment'", "]", "=", "l", "[", "2", ":", "]", ".", "strip", "(", ")", "elif", "l", ".", "startswith", "(", "'#O'", ")", ":", "try", ":", "l", "=", "l", ".", "split", "(", "None", ",", "1", ")", "[", "1", "]", "except", "IndexError", ":", "continue", "if", "'motors'", "not", "in", "list", "(", "sf", ".", "keys", "(", ")", ")", ":", "sf", "[", "'motors'", "]", "=", "[", "]", "sf", "[", "'motors'", "]", ".", "extend", "(", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "l", ".", "split", "(", "' '", ")", "]", ")", "elif", "not", "l", ".", "strip", "(", ")", ":", "# empty line, signifies the end of the header part. The next", "# line will be a scan.", "break", "sf", "[", "'scans'", "]", "=", "{", "}", "if", "read_scan", "is", "not", "None", ":", "if", "read_scan", "==", "'all'", ":", "nr", "=", "None", "else", ":", "nr", "=", "read_scan", "try", ":", "while", "True", ":", "s", "=", "readspecscan", "(", "f", ",", "nr", ")", "if", "isinstance", "(", "s", ",", "dict", ")", ":", "sf", "[", "'scans'", "]", "[", "s", "[", "'number'", "]", "]", "=", "s", "if", "nr", "is", "not", "None", ":", "break", "sf", "[", "'maxscannumber'", "]", "=", "max", "(", "sf", "[", "'maxscannumber'", "]", ",", "s", "[", "'number'", "]", ")", "elif", "s", "is", "not", "None", ":", "sf", "[", "'maxscannumber'", "]", "=", "max", "(", "sf", "[", "'maxscannumber'", "]", ",", "s", ")", "except", "SpecFileEOF", ":", "pass", "else", ":", "while", "True", ":", "l", "=", "f", ".", "readline", "(", ")", "if", "not", "l", ":", "break", "if", "l", ".", "startswith", "(", "'#S'", ")", ":", "n", "=", "int", "(", "l", "[", "2", ":", "]", ".", "split", "(", ")", "[", "0", "]", ")", "sf", "[", "'maxscannumber'", "]", "=", "max", "(", "sf", "[", "'maxscannumber'", "]", ",", "n", ")", "for", "n", "in", "sf", "[", "'scans'", "]", ":", "s", "=", "sf", "[", "'scans'", "]", "[", "n", "]", "s", "[", "'motors'", "]", "=", "sf", "[", "'motors'", "]", "if", "'comment'", "not", "in", "s", ":", "s", "[", "'comment'", "]", "=", "sf", "[", "'comment'", "]", "if", "'positions'", "not", "in", "s", ":", "s", "[", "'positions'", "]", "=", "[", "None", "]", "*", "len", "(", "sf", "[", "'motors'", "]", ")", "return", "sf" ]
Open a SPEC file and read its content Inputs: filename: string the file to open read_scan: None, 'all' or integer the index of scan to be read from the file. If None, no scan should be read. If 'all', all scans should be read. If a number, just the scan with that number should be read. Output: the data in the spec file in a dict.
[ "Open", "a", "SPEC", "file", "and", "read", "its", "content" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/onedim.py#L85-L162
awacha/sastool
sastool/io/onedim.py
readabt
def readabt(filename, dirs='.'): """Read abt_*.fio type files from beamline B1, HASYLAB. Input: filename: the name of the file. dirs: directories to search for files in Output: A dictionary. The fields are self-explanatory. """ # resolve filename filename = misc.findfileindirs(filename, dirs) f = open(filename, 'rt') abt = {'offsetcorrected': False, 'params': {}, 'columns': [], 'data': [], 'title': '<no_title>', 'offsets': {}, 'filename': filename} readingmode = '' for l in f: l = l.strip() if l.startswith('!') or len(l) == 0: continue elif l.startswith('%c'): readingmode = 'comments' elif l.startswith('%p'): readingmode = 'params' elif l.startswith('%d'): readingmode = 'data' elif readingmode == 'comments': m = re.match( r'(?P<scantype>\w+)-Scan started at (?P<startdate>\d+-\w+-\d+) (?P<starttime>\d+:\d+:\d+), ended (?P<endtime>\d+:\d+:\d+)', l) if m: abt.update(m.groupdict()) continue else: m = re.match(r'Name: (?P<name>\w+)', l) if m: abt.update(m.groupdict()) m1 = re.search(r'from (?P<from>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) m1 = re.search(r'to (?P<to>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) m1 = re.search(r'by (?P<by>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) m1 = re.search(r'sampling (?P<sampling>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) continue if l.find('Counter readings are offset corrected') >= 0: abt['offsetcorrected'] = True readingmode = 'offsets' continue # if we reach here in 'comments' mode, this is the title line abt['title'] = l continue elif readingmode == 'offsets': m = re.findall(r'(\w+)\s(\d+(?:.\d+)?)', l) if m: abt['offsets'].update(dict(m)) for k in abt['offsets']: abt['offsets'][k] = float(abt['offsets'][k]) elif readingmode == 'params': abt['params'][l.split('=')[0].strip()] = float( l.split('=')[1].strip()) elif readingmode == 'data': if l.startswith('Col'): abt['columns'].append(l.split()[2]) else: abt['data'].append([float(x) for x in l.split()]) f.close() # some post-processing # remove common prefix from column names maxcolnamelen = max(len(c) for c in abt['columns']) l = 1 for l in range(1, maxcolnamelen): if len(set([c[:l] for c in abt['columns']])) > 1: break abt['columns'] = [c[l - 1:] for c in abt['columns']] # represent data as a structured array dt = np.dtype(list(zip(abt['columns'], itertools.repeat(np.double)))) abt['data'] = np.array(abt['data'], dtype=np.double).view(dt) # dates and times in datetime formats monthnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] for m, i in zip(monthnames, itertools.count(1)): abt['startdate'] = abt['startdate'].replace(m, str(i)) abt['startdate'] = datetime.date( *reversed([int(x) for x in abt['startdate'].split('-')])) abt['starttime'] = datetime.time( *[int(x) for x in abt['starttime'].split(':')]) abt['endtime'] = datetime.time( *[int(x) for x in abt['endtime'].split(':')]) abt['start'] = datetime.datetime.combine( abt['startdate'], abt['starttime']) if abt['endtime'] <= abt['starttime']: abt['end'] = datetime.datetime.combine( abt['startdate'] + datetime.timedelta(1), abt['endtime']) else: abt['end'] = datetime.datetime.combine( abt['startdate'], abt['endtime']) del abt['starttime'] del abt['startdate'] del abt['endtime'] # convert some fields to float for k in ['from', 'to', 'by', 'sampling']: if k in abt: abt[k] = float(abt[k]) # change space and dash in title to underscore abt['title'] = abt['title'].replace('-', '_').replace(' ', '_') return abt
python
def readabt(filename, dirs='.'): """Read abt_*.fio type files from beamline B1, HASYLAB. Input: filename: the name of the file. dirs: directories to search for files in Output: A dictionary. The fields are self-explanatory. """ # resolve filename filename = misc.findfileindirs(filename, dirs) f = open(filename, 'rt') abt = {'offsetcorrected': False, 'params': {}, 'columns': [], 'data': [], 'title': '<no_title>', 'offsets': {}, 'filename': filename} readingmode = '' for l in f: l = l.strip() if l.startswith('!') or len(l) == 0: continue elif l.startswith('%c'): readingmode = 'comments' elif l.startswith('%p'): readingmode = 'params' elif l.startswith('%d'): readingmode = 'data' elif readingmode == 'comments': m = re.match( r'(?P<scantype>\w+)-Scan started at (?P<startdate>\d+-\w+-\d+) (?P<starttime>\d+:\d+:\d+), ended (?P<endtime>\d+:\d+:\d+)', l) if m: abt.update(m.groupdict()) continue else: m = re.match(r'Name: (?P<name>\w+)', l) if m: abt.update(m.groupdict()) m1 = re.search(r'from (?P<from>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) m1 = re.search(r'to (?P<to>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) m1 = re.search(r'by (?P<by>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) m1 = re.search(r'sampling (?P<sampling>\d+(?:.\d+)?)', l) if m1: abt.update(m1.groupdict()) continue if l.find('Counter readings are offset corrected') >= 0: abt['offsetcorrected'] = True readingmode = 'offsets' continue # if we reach here in 'comments' mode, this is the title line abt['title'] = l continue elif readingmode == 'offsets': m = re.findall(r'(\w+)\s(\d+(?:.\d+)?)', l) if m: abt['offsets'].update(dict(m)) for k in abt['offsets']: abt['offsets'][k] = float(abt['offsets'][k]) elif readingmode == 'params': abt['params'][l.split('=')[0].strip()] = float( l.split('=')[1].strip()) elif readingmode == 'data': if l.startswith('Col'): abt['columns'].append(l.split()[2]) else: abt['data'].append([float(x) for x in l.split()]) f.close() # some post-processing # remove common prefix from column names maxcolnamelen = max(len(c) for c in abt['columns']) l = 1 for l in range(1, maxcolnamelen): if len(set([c[:l] for c in abt['columns']])) > 1: break abt['columns'] = [c[l - 1:] for c in abt['columns']] # represent data as a structured array dt = np.dtype(list(zip(abt['columns'], itertools.repeat(np.double)))) abt['data'] = np.array(abt['data'], dtype=np.double).view(dt) # dates and times in datetime formats monthnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] for m, i in zip(monthnames, itertools.count(1)): abt['startdate'] = abt['startdate'].replace(m, str(i)) abt['startdate'] = datetime.date( *reversed([int(x) for x in abt['startdate'].split('-')])) abt['starttime'] = datetime.time( *[int(x) for x in abt['starttime'].split(':')]) abt['endtime'] = datetime.time( *[int(x) for x in abt['endtime'].split(':')]) abt['start'] = datetime.datetime.combine( abt['startdate'], abt['starttime']) if abt['endtime'] <= abt['starttime']: abt['end'] = datetime.datetime.combine( abt['startdate'] + datetime.timedelta(1), abt['endtime']) else: abt['end'] = datetime.datetime.combine( abt['startdate'], abt['endtime']) del abt['starttime'] del abt['startdate'] del abt['endtime'] # convert some fields to float for k in ['from', 'to', 'by', 'sampling']: if k in abt: abt[k] = float(abt[k]) # change space and dash in title to underscore abt['title'] = abt['title'].replace('-', '_').replace(' ', '_') return abt
[ "def", "readabt", "(", "filename", ",", "dirs", "=", "'.'", ")", ":", "# resolve filename", "filename", "=", "misc", ".", "findfileindirs", "(", "filename", ",", "dirs", ")", "f", "=", "open", "(", "filename", ",", "'rt'", ")", "abt", "=", "{", "'offsetcorrected'", ":", "False", ",", "'params'", ":", "{", "}", ",", "'columns'", ":", "[", "]", ",", "'data'", ":", "[", "]", ",", "'title'", ":", "'<no_title>'", ",", "'offsets'", ":", "{", "}", ",", "'filename'", ":", "filename", "}", "readingmode", "=", "''", "for", "l", "in", "f", ":", "l", "=", "l", ".", "strip", "(", ")", "if", "l", ".", "startswith", "(", "'!'", ")", "or", "len", "(", "l", ")", "==", "0", ":", "continue", "elif", "l", ".", "startswith", "(", "'%c'", ")", ":", "readingmode", "=", "'comments'", "elif", "l", ".", "startswith", "(", "'%p'", ")", ":", "readingmode", "=", "'params'", "elif", "l", ".", "startswith", "(", "'%d'", ")", ":", "readingmode", "=", "'data'", "elif", "readingmode", "==", "'comments'", ":", "m", "=", "re", ".", "match", "(", "r'(?P<scantype>\\w+)-Scan started at (?P<startdate>\\d+-\\w+-\\d+) (?P<starttime>\\d+:\\d+:\\d+), ended (?P<endtime>\\d+:\\d+:\\d+)'", ",", "l", ")", "if", "m", ":", "abt", ".", "update", "(", "m", ".", "groupdict", "(", ")", ")", "continue", "else", ":", "m", "=", "re", ".", "match", "(", "r'Name: (?P<name>\\w+)'", ",", "l", ")", "if", "m", ":", "abt", ".", "update", "(", "m", ".", "groupdict", "(", ")", ")", "m1", "=", "re", ".", "search", "(", "r'from (?P<from>\\d+(?:.\\d+)?)'", ",", "l", ")", "if", "m1", ":", "abt", ".", "update", "(", "m1", ".", "groupdict", "(", ")", ")", "m1", "=", "re", ".", "search", "(", "r'to (?P<to>\\d+(?:.\\d+)?)'", ",", "l", ")", "if", "m1", ":", "abt", ".", "update", "(", "m1", ".", "groupdict", "(", ")", ")", "m1", "=", "re", ".", "search", "(", "r'by (?P<by>\\d+(?:.\\d+)?)'", ",", "l", ")", "if", "m1", ":", "abt", ".", "update", "(", "m1", ".", "groupdict", "(", ")", ")", "m1", "=", "re", ".", "search", "(", "r'sampling (?P<sampling>\\d+(?:.\\d+)?)'", ",", "l", ")", "if", "m1", ":", "abt", ".", "update", "(", "m1", ".", "groupdict", "(", ")", ")", "continue", "if", "l", ".", "find", "(", "'Counter readings are offset corrected'", ")", ">=", "0", ":", "abt", "[", "'offsetcorrected'", "]", "=", "True", "readingmode", "=", "'offsets'", "continue", "# if we reach here in 'comments' mode, this is the title line", "abt", "[", "'title'", "]", "=", "l", "continue", "elif", "readingmode", "==", "'offsets'", ":", "m", "=", "re", ".", "findall", "(", "r'(\\w+)\\s(\\d+(?:.\\d+)?)'", ",", "l", ")", "if", "m", ":", "abt", "[", "'offsets'", "]", ".", "update", "(", "dict", "(", "m", ")", ")", "for", "k", "in", "abt", "[", "'offsets'", "]", ":", "abt", "[", "'offsets'", "]", "[", "k", "]", "=", "float", "(", "abt", "[", "'offsets'", "]", "[", "k", "]", ")", "elif", "readingmode", "==", "'params'", ":", "abt", "[", "'params'", "]", "[", "l", ".", "split", "(", "'='", ")", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "float", "(", "l", ".", "split", "(", "'='", ")", "[", "1", "]", ".", "strip", "(", ")", ")", "elif", "readingmode", "==", "'data'", ":", "if", "l", ".", "startswith", "(", "'Col'", ")", ":", "abt", "[", "'columns'", "]", ".", "append", "(", "l", ".", "split", "(", ")", "[", "2", "]", ")", "else", ":", "abt", "[", "'data'", "]", ".", "append", "(", "[", "float", "(", "x", ")", "for", "x", "in", "l", ".", "split", "(", ")", "]", ")", "f", ".", "close", "(", ")", "# some post-processing", "# remove common prefix from column names", "maxcolnamelen", "=", "max", "(", "len", "(", "c", ")", "for", "c", "in", "abt", "[", "'columns'", "]", ")", "l", "=", "1", "for", "l", "in", "range", "(", "1", ",", "maxcolnamelen", ")", ":", "if", "len", "(", "set", "(", "[", "c", "[", ":", "l", "]", "for", "c", "in", "abt", "[", "'columns'", "]", "]", ")", ")", ">", "1", ":", "break", "abt", "[", "'columns'", "]", "=", "[", "c", "[", "l", "-", "1", ":", "]", "for", "c", "in", "abt", "[", "'columns'", "]", "]", "# represent data as a structured array", "dt", "=", "np", ".", "dtype", "(", "list", "(", "zip", "(", "abt", "[", "'columns'", "]", ",", "itertools", ".", "repeat", "(", "np", ".", "double", ")", ")", ")", ")", "abt", "[", "'data'", "]", "=", "np", ".", "array", "(", "abt", "[", "'data'", "]", ",", "dtype", "=", "np", ".", "double", ")", ".", "view", "(", "dt", ")", "# dates and times in datetime formats", "monthnames", "=", "[", "'Jan'", ",", "'Feb'", ",", "'Mar'", ",", "'Apr'", ",", "'May'", ",", "'Jun'", ",", "'Jul'", ",", "'Aug'", ",", "'Sep'", ",", "'Oct'", ",", "'Nov'", ",", "'Dec'", "]", "for", "m", ",", "i", "in", "zip", "(", "monthnames", ",", "itertools", ".", "count", "(", "1", ")", ")", ":", "abt", "[", "'startdate'", "]", "=", "abt", "[", "'startdate'", "]", ".", "replace", "(", "m", ",", "str", "(", "i", ")", ")", "abt", "[", "'startdate'", "]", "=", "datetime", ".", "date", "(", "*", "reversed", "(", "[", "int", "(", "x", ")", "for", "x", "in", "abt", "[", "'startdate'", "]", ".", "split", "(", "'-'", ")", "]", ")", ")", "abt", "[", "'starttime'", "]", "=", "datetime", ".", "time", "(", "*", "[", "int", "(", "x", ")", "for", "x", "in", "abt", "[", "'starttime'", "]", ".", "split", "(", "':'", ")", "]", ")", "abt", "[", "'endtime'", "]", "=", "datetime", ".", "time", "(", "*", "[", "int", "(", "x", ")", "for", "x", "in", "abt", "[", "'endtime'", "]", ".", "split", "(", "':'", ")", "]", ")", "abt", "[", "'start'", "]", "=", "datetime", ".", "datetime", ".", "combine", "(", "abt", "[", "'startdate'", "]", ",", "abt", "[", "'starttime'", "]", ")", "if", "abt", "[", "'endtime'", "]", "<=", "abt", "[", "'starttime'", "]", ":", "abt", "[", "'end'", "]", "=", "datetime", ".", "datetime", ".", "combine", "(", "abt", "[", "'startdate'", "]", "+", "datetime", ".", "timedelta", "(", "1", ")", ",", "abt", "[", "'endtime'", "]", ")", "else", ":", "abt", "[", "'end'", "]", "=", "datetime", ".", "datetime", ".", "combine", "(", "abt", "[", "'startdate'", "]", ",", "abt", "[", "'endtime'", "]", ")", "del", "abt", "[", "'starttime'", "]", "del", "abt", "[", "'startdate'", "]", "del", "abt", "[", "'endtime'", "]", "# convert some fields to float", "for", "k", "in", "[", "'from'", ",", "'to'", ",", "'by'", ",", "'sampling'", "]", ":", "if", "k", "in", "abt", ":", "abt", "[", "k", "]", "=", "float", "(", "abt", "[", "k", "]", ")", "# change space and dash in title to underscore", "abt", "[", "'title'", "]", "=", "abt", "[", "'title'", "]", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "return", "abt" ]
Read abt_*.fio type files from beamline B1, HASYLAB. Input: filename: the name of the file. dirs: directories to search for files in Output: A dictionary. The fields are self-explanatory.
[ "Read", "abt_", "*", ".", "fio", "type", "files", "from", "beamline", "B1", "HASYLAB", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/onedim.py#L165-L275
awacha/sastool
sastool/io/credo_cpth5/header.py
Header.energy
def energy(self) -> ErrorValue: """X-ray energy""" return (ErrorValue(*(scipy.constants.physical_constants['speed of light in vacuum'][0::2])) * ErrorValue(*(scipy.constants.physical_constants['Planck constant in eV s'][0::2])) / scipy.constants.nano / self.wavelength)
python
def energy(self) -> ErrorValue: """X-ray energy""" return (ErrorValue(*(scipy.constants.physical_constants['speed of light in vacuum'][0::2])) * ErrorValue(*(scipy.constants.physical_constants['Planck constant in eV s'][0::2])) / scipy.constants.nano / self.wavelength)
[ "def", "energy", "(", "self", ")", "->", "ErrorValue", ":", "return", "(", "ErrorValue", "(", "*", "(", "scipy", ".", "constants", ".", "physical_constants", "[", "'speed of light in vacuum'", "]", "[", "0", ":", ":", "2", "]", ")", ")", "*", "ErrorValue", "(", "*", "(", "scipy", ".", "constants", ".", "physical_constants", "[", "'Planck constant in eV s'", "]", "[", "0", ":", ":", "2", "]", ")", ")", "/", "scipy", ".", "constants", ".", "nano", "/", "self", ".", "wavelength", ")" ]
X-ray energy
[ "X", "-", "ray", "energy" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cpth5/header.py#L50-L55
awacha/sastool
sastool/io/credo_cpth5/header.py
Header.maskname
def maskname(self) -> Optional[str]: """Name of the mask matrix file.""" try: maskid = self._data['maskname'] if not maskid.endswith('.mat'): maskid = maskid + '.mat' return maskid except KeyError: return None
python
def maskname(self) -> Optional[str]: """Name of the mask matrix file.""" try: maskid = self._data['maskname'] if not maskid.endswith('.mat'): maskid = maskid + '.mat' return maskid except KeyError: return None
[ "def", "maskname", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "try", ":", "maskid", "=", "self", ".", "_data", "[", "'maskname'", "]", "if", "not", "maskid", ".", "endswith", "(", "'.mat'", ")", ":", "maskid", "=", "maskid", "+", "'.mat'", "return", "maskid", "except", "KeyError", ":", "return", "None" ]
Name of the mask matrix file.
[ "Name", "of", "the", "mask", "matrix", "file", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cpth5/header.py#L191-L199
awacha/sastool
sastool/utils2d/centering.py
findbeam_gravity
def findbeam_gravity(data, mask): """Find beam center with the "gravity" method Inputs: data: scattering image mask: mask matrix Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin, starting from 1 """ # for each row and column find the center of gravity data1 = data.copy() # take a copy, because elements will be tampered with data1[mask == 0] = 0 # set masked elements to zero # vector of x (row) coordinates x = np.arange(data1.shape[0]) # vector of y (column) coordinates y = np.arange(data1.shape[1]) # two column vectors, both containing ones. The length of onex and # oney corresponds to length of x and y, respectively. onex = np.ones_like(x) oney = np.ones_like(y) # Multiply the matrix with x. Each element of the resulting column # vector will contain the center of gravity of the corresponding row # in the matrix, multiplied by the "weight". Thus: nix_i=sum_j( A_ij # * x_j). If we divide this by spamx_i=sum_j(A_ij), then we get the # center of gravity. The length of this column vector is len(y). nix = np.dot(x, data1) spamx = np.dot(onex, data1) # indices where both nix and spamx is nonzero. goodx = ((nix != 0) & (spamx != 0)) # trim y, nix and spamx by goodx, eliminate invalid points. nix = nix[goodx] spamx = spamx[goodx] # now do the same for the column direction. niy = np.dot(data1, y) spamy = np.dot(data1, oney) goody = ((niy != 0) & (spamy != 0)) niy = niy[goody] spamy = spamy[goody] # column coordinate of the center in each row will be contained in # ycent, the row coordinate of the center in each column will be # in xcent. ycent = nix / spamx xcent = niy / spamy # return the mean values as the centers. return [xcent.mean(), ycent.mean()]
python
def findbeam_gravity(data, mask): """Find beam center with the "gravity" method Inputs: data: scattering image mask: mask matrix Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin, starting from 1 """ # for each row and column find the center of gravity data1 = data.copy() # take a copy, because elements will be tampered with data1[mask == 0] = 0 # set masked elements to zero # vector of x (row) coordinates x = np.arange(data1.shape[0]) # vector of y (column) coordinates y = np.arange(data1.shape[1]) # two column vectors, both containing ones. The length of onex and # oney corresponds to length of x and y, respectively. onex = np.ones_like(x) oney = np.ones_like(y) # Multiply the matrix with x. Each element of the resulting column # vector will contain the center of gravity of the corresponding row # in the matrix, multiplied by the "weight". Thus: nix_i=sum_j( A_ij # * x_j). If we divide this by spamx_i=sum_j(A_ij), then we get the # center of gravity. The length of this column vector is len(y). nix = np.dot(x, data1) spamx = np.dot(onex, data1) # indices where both nix and spamx is nonzero. goodx = ((nix != 0) & (spamx != 0)) # trim y, nix and spamx by goodx, eliminate invalid points. nix = nix[goodx] spamx = spamx[goodx] # now do the same for the column direction. niy = np.dot(data1, y) spamy = np.dot(data1, oney) goody = ((niy != 0) & (spamy != 0)) niy = niy[goody] spamy = spamy[goody] # column coordinate of the center in each row will be contained in # ycent, the row coordinate of the center in each column will be # in xcent. ycent = nix / spamx xcent = niy / spamy # return the mean values as the centers. return [xcent.mean(), ycent.mean()]
[ "def", "findbeam_gravity", "(", "data", ",", "mask", ")", ":", "# for each row and column find the center of gravity", "data1", "=", "data", ".", "copy", "(", ")", "# take a copy, because elements will be tampered with", "data1", "[", "mask", "==", "0", "]", "=", "0", "# set masked elements to zero", "# vector of x (row) coordinates", "x", "=", "np", ".", "arange", "(", "data1", ".", "shape", "[", "0", "]", ")", "# vector of y (column) coordinates", "y", "=", "np", ".", "arange", "(", "data1", ".", "shape", "[", "1", "]", ")", "# two column vectors, both containing ones. The length of onex and", "# oney corresponds to length of x and y, respectively.", "onex", "=", "np", ".", "ones_like", "(", "x", ")", "oney", "=", "np", ".", "ones_like", "(", "y", ")", "# Multiply the matrix with x. Each element of the resulting column", "# vector will contain the center of gravity of the corresponding row", "# in the matrix, multiplied by the \"weight\". Thus: nix_i=sum_j( A_ij", "# * x_j). If we divide this by spamx_i=sum_j(A_ij), then we get the", "# center of gravity. The length of this column vector is len(y).", "nix", "=", "np", ".", "dot", "(", "x", ",", "data1", ")", "spamx", "=", "np", ".", "dot", "(", "onex", ",", "data1", ")", "# indices where both nix and spamx is nonzero.", "goodx", "=", "(", "(", "nix", "!=", "0", ")", "&", "(", "spamx", "!=", "0", ")", ")", "# trim y, nix and spamx by goodx, eliminate invalid points.", "nix", "=", "nix", "[", "goodx", "]", "spamx", "=", "spamx", "[", "goodx", "]", "# now do the same for the column direction.", "niy", "=", "np", ".", "dot", "(", "data1", ",", "y", ")", "spamy", "=", "np", ".", "dot", "(", "data1", ",", "oney", ")", "goody", "=", "(", "(", "niy", "!=", "0", ")", "&", "(", "spamy", "!=", "0", ")", ")", "niy", "=", "niy", "[", "goody", "]", "spamy", "=", "spamy", "[", "goody", "]", "# column coordinate of the center in each row will be contained in", "# ycent, the row coordinate of the center in each column will be", "# in xcent.", "ycent", "=", "nix", "/", "spamx", "xcent", "=", "niy", "/", "spamy", "# return the mean values as the centers.", "return", "[", "xcent", ".", "mean", "(", ")", ",", "ycent", ".", "mean", "(", ")", "]" ]
Find beam center with the "gravity" method Inputs: data: scattering image mask: mask matrix Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin, starting from 1
[ "Find", "beam", "center", "with", "the", "gravity", "method" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L11-L58
awacha/sastool
sastool/utils2d/centering.py
findbeam_slices
def findbeam_slices(data, orig_initial, mask=None, maxiter=0, epsfcn=0.001, dmin=0, dmax=np.inf, sector_width=np.pi / 9.0, extent=10, callback=None): """Find beam center with the "slices" method Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.leastsq epsfcn: input for scipy.optimize.leastsq dmin: disregard pixels nearer to the origin than this dmax: disregard pixels farther from the origin than this sector_width: width of sectors in radians extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin. """ if mask is None: mask = np.ones(data.shape) data = data.astype(np.double) def targetfunc(orig, data, mask, orig_orig, callback): # integrate four sectors I = [None] * 4 p, Ints, A = radint_nsector(data, None, -1, -1, -1, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask=mask, phi0=np.pi / 4 - 0.5 * sector_width, dphi=sector_width, Nsector=4) minpix = max(max(p.min(0).tolist()), dmin) maxpix = min(min(p.max(0).tolist()), dmax) if (maxpix < minpix): raise ValueError('The four slices do not overlap! Please give a\ better approximation for the origin or use another centering method.') for i in range(4): I[i] = Ints[:, i][(p[:, i] >= minpix) & (p[:, i] <= maxpix)] ret = ((I[0] - I[2]) ** 2 + (I[1] - I[3]) ** 2) / (maxpix - minpix) if callback is not None: callback() return ret orig = scipy.optimize.leastsq(targetfunc, np.array([extent, extent]), args=(data, 1 - mask.astype(np.uint8), np.array(orig_initial) - extent, callback), maxfev=maxiter, epsfcn=0.01) return orig[0] + np.array(orig_initial) - extent
python
def findbeam_slices(data, orig_initial, mask=None, maxiter=0, epsfcn=0.001, dmin=0, dmax=np.inf, sector_width=np.pi / 9.0, extent=10, callback=None): """Find beam center with the "slices" method Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.leastsq epsfcn: input for scipy.optimize.leastsq dmin: disregard pixels nearer to the origin than this dmax: disregard pixels farther from the origin than this sector_width: width of sectors in radians extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin. """ if mask is None: mask = np.ones(data.shape) data = data.astype(np.double) def targetfunc(orig, data, mask, orig_orig, callback): # integrate four sectors I = [None] * 4 p, Ints, A = radint_nsector(data, None, -1, -1, -1, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask=mask, phi0=np.pi / 4 - 0.5 * sector_width, dphi=sector_width, Nsector=4) minpix = max(max(p.min(0).tolist()), dmin) maxpix = min(min(p.max(0).tolist()), dmax) if (maxpix < minpix): raise ValueError('The four slices do not overlap! Please give a\ better approximation for the origin or use another centering method.') for i in range(4): I[i] = Ints[:, i][(p[:, i] >= minpix) & (p[:, i] <= maxpix)] ret = ((I[0] - I[2]) ** 2 + (I[1] - I[3]) ** 2) / (maxpix - minpix) if callback is not None: callback() return ret orig = scipy.optimize.leastsq(targetfunc, np.array([extent, extent]), args=(data, 1 - mask.astype(np.uint8), np.array(orig_initial) - extent, callback), maxfev=maxiter, epsfcn=0.01) return orig[0] + np.array(orig_initial) - extent
[ "def", "findbeam_slices", "(", "data", ",", "orig_initial", ",", "mask", "=", "None", ",", "maxiter", "=", "0", ",", "epsfcn", "=", "0.001", ",", "dmin", "=", "0", ",", "dmax", "=", "np", ".", "inf", ",", "sector_width", "=", "np", ".", "pi", "/", "9.0", ",", "extent", "=", "10", ",", "callback", "=", "None", ")", ":", "if", "mask", "is", "None", ":", "mask", "=", "np", ".", "ones", "(", "data", ".", "shape", ")", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "def", "targetfunc", "(", "orig", ",", "data", ",", "mask", ",", "orig_orig", ",", "callback", ")", ":", "# integrate four sectors", "I", "=", "[", "None", "]", "*", "4", "p", ",", "Ints", ",", "A", "=", "radint_nsector", "(", "data", ",", "None", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "orig", "[", "0", "]", "+", "orig_orig", "[", "0", "]", ",", "orig", "[", "1", "]", "+", "orig_orig", "[", "1", "]", ",", "mask", "=", "mask", ",", "phi0", "=", "np", ".", "pi", "/", "4", "-", "0.5", "*", "sector_width", ",", "dphi", "=", "sector_width", ",", "Nsector", "=", "4", ")", "minpix", "=", "max", "(", "max", "(", "p", ".", "min", "(", "0", ")", ".", "tolist", "(", ")", ")", ",", "dmin", ")", "maxpix", "=", "min", "(", "min", "(", "p", ".", "max", "(", "0", ")", ".", "tolist", "(", ")", ")", ",", "dmax", ")", "if", "(", "maxpix", "<", "minpix", ")", ":", "raise", "ValueError", "(", "'The four slices do not overlap! Please give a\\\n better approximation for the origin or use another centering method.'", ")", "for", "i", "in", "range", "(", "4", ")", ":", "I", "[", "i", "]", "=", "Ints", "[", ":", ",", "i", "]", "[", "(", "p", "[", ":", ",", "i", "]", ">=", "minpix", ")", "&", "(", "p", "[", ":", ",", "i", "]", "<=", "maxpix", ")", "]", "ret", "=", "(", "(", "I", "[", "0", "]", "-", "I", "[", "2", "]", ")", "**", "2", "+", "(", "I", "[", "1", "]", "-", "I", "[", "3", "]", ")", "**", "2", ")", "/", "(", "maxpix", "-", "minpix", ")", "if", "callback", "is", "not", "None", ":", "callback", "(", ")", "return", "ret", "orig", "=", "scipy", ".", "optimize", ".", "leastsq", "(", "targetfunc", ",", "np", ".", "array", "(", "[", "extent", ",", "extent", "]", ")", ",", "args", "=", "(", "data", ",", "1", "-", "mask", ".", "astype", "(", "np", ".", "uint8", ")", ",", "np", ".", "array", "(", "orig_initial", ")", "-", "extent", ",", "callback", ")", ",", "maxfev", "=", "maxiter", ",", "epsfcn", "=", "0.01", ")", "return", "orig", "[", "0", "]", "+", "np", ".", "array", "(", "orig_initial", ")", "-", "extent" ]
Find beam center with the "slices" method Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.leastsq epsfcn: input for scipy.optimize.leastsq dmin: disregard pixels nearer to the origin than this dmax: disregard pixels farther from the origin than this sector_width: width of sectors in radians extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin.
[ "Find", "beam", "center", "with", "the", "slices", "method" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L61-L110
awacha/sastool
sastool/utils2d/centering.py
findbeam_azimuthal
def findbeam_azimuthal(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None): """Find beam center using azimuthal integration Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """ if mask is None: mask = np.ones(data.shape) data = data.astype(np.double) def targetfunc(orig, data, mask, orig_orig, callback): def sinfun(p, x, y): return (y - np.sin(x + p[1]) * p[0] - p[2]) / np.sqrt(len(x)) t, I, a = azimintpix(data, None, orig[ 0] + orig_orig[0], orig[1] + orig_orig[1], mask.astype('uint8'), Ntheta, dmin, dmax) if len(a) > (a > 0).sum(): raise ValueError('findbeam_azimuthal: non-complete azimuthal average, please consider changing dmin, dmax and/or orig_initial!') p = ((I.max() - I.min()) / 2.0, t[I == I.max()][0], I.mean()) p = scipy.optimize.leastsq(sinfun, p, (t, I))[0] # print "findbeam_azimuthal: orig=",orig,"amplitude=",abs(p[0]) if callback is not None: callback() return abs(p[0]) orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0) return orig1 + np.array(orig_initial) - extent
python
def findbeam_azimuthal(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None): """Find beam center using azimuthal integration Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """ if mask is None: mask = np.ones(data.shape) data = data.astype(np.double) def targetfunc(orig, data, mask, orig_orig, callback): def sinfun(p, x, y): return (y - np.sin(x + p[1]) * p[0] - p[2]) / np.sqrt(len(x)) t, I, a = azimintpix(data, None, orig[ 0] + orig_orig[0], orig[1] + orig_orig[1], mask.astype('uint8'), Ntheta, dmin, dmax) if len(a) > (a > 0).sum(): raise ValueError('findbeam_azimuthal: non-complete azimuthal average, please consider changing dmin, dmax and/or orig_initial!') p = ((I.max() - I.min()) / 2.0, t[I == I.max()][0], I.mean()) p = scipy.optimize.leastsq(sinfun, p, (t, I))[0] # print "findbeam_azimuthal: orig=",orig,"amplitude=",abs(p[0]) if callback is not None: callback() return abs(p[0]) orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0) return orig1 + np.array(orig_initial) - extent
[ "def", "findbeam_azimuthal", "(", "data", ",", "orig_initial", ",", "mask", "=", "None", ",", "maxiter", "=", "100", ",", "Ntheta", "=", "50", ",", "dmin", "=", "0", ",", "dmax", "=", "np", ".", "inf", ",", "extent", "=", "10", ",", "callback", "=", "None", ")", ":", "if", "mask", "is", "None", ":", "mask", "=", "np", ".", "ones", "(", "data", ".", "shape", ")", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "def", "targetfunc", "(", "orig", ",", "data", ",", "mask", ",", "orig_orig", ",", "callback", ")", ":", "def", "sinfun", "(", "p", ",", "x", ",", "y", ")", ":", "return", "(", "y", "-", "np", ".", "sin", "(", "x", "+", "p", "[", "1", "]", ")", "*", "p", "[", "0", "]", "-", "p", "[", "2", "]", ")", "/", "np", ".", "sqrt", "(", "len", "(", "x", ")", ")", "t", ",", "I", ",", "a", "=", "azimintpix", "(", "data", ",", "None", ",", "orig", "[", "0", "]", "+", "orig_orig", "[", "0", "]", ",", "orig", "[", "1", "]", "+", "orig_orig", "[", "1", "]", ",", "mask", ".", "astype", "(", "'uint8'", ")", ",", "Ntheta", ",", "dmin", ",", "dmax", ")", "if", "len", "(", "a", ")", ">", "(", "a", ">", "0", ")", ".", "sum", "(", ")", ":", "raise", "ValueError", "(", "'findbeam_azimuthal: non-complete azimuthal average, please consider changing dmin, dmax and/or orig_initial!'", ")", "p", "=", "(", "(", "I", ".", "max", "(", ")", "-", "I", ".", "min", "(", ")", ")", "/", "2.0", ",", "t", "[", "I", "==", "I", ".", "max", "(", ")", "]", "[", "0", "]", ",", "I", ".", "mean", "(", ")", ")", "p", "=", "scipy", ".", "optimize", ".", "leastsq", "(", "sinfun", ",", "p", ",", "(", "t", ",", "I", ")", ")", "[", "0", "]", "# print \"findbeam_azimuthal: orig=\",orig,\"amplitude=\",abs(p[0])", "if", "callback", "is", "not", "None", ":", "callback", "(", ")", "return", "abs", "(", "p", "[", "0", "]", ")", "orig1", "=", "scipy", ".", "optimize", ".", "fmin", "(", "targetfunc", ",", "np", ".", "array", "(", "[", "extent", ",", "extent", "]", ")", ",", "args", "=", "(", "data", ",", "1", "-", "mask", ",", "np", ".", "array", "(", "orig_initial", ")", "-", "extent", ",", "callback", ")", ",", "maxiter", "=", "maxiter", ",", "disp", "=", "0", ")", "return", "orig1", "+", "np", ".", "array", "(", "orig_initial", ")", "-", "extent" ]
Find beam center using azimuthal integration Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1
[ "Find", "beam", "center", "using", "azimuthal", "integration" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L113-L157
awacha/sastool
sastool/utils2d/centering.py
findbeam_azimuthal_fold
def findbeam_azimuthal_fold(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None): """Find beam center using azimuthal integration and folding Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration. Should be even! dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """ if Ntheta % 2: raise ValueError('Ntheta should be even!') if mask is None: mask = np.ones_like(data).astype(np.uint8) data = data.astype(np.double) # the function to minimize is the sum of squared difference of two halves of # the azimuthal integral. def targetfunc(orig, data, mask, orig_orig, callback): I = azimintpix(data, None, orig[ 0] + orig_orig[0], orig[1] + orig_orig[1], mask, Ntheta, dmin, dmax)[1] if callback is not None: callback() return np.sum((I[:Ntheta / 2] - I[Ntheta / 2:]) ** 2) / Ntheta orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0) return orig1 + np.array(orig_initial) - extent
python
def findbeam_azimuthal_fold(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None): """Find beam center using azimuthal integration and folding Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration. Should be even! dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """ if Ntheta % 2: raise ValueError('Ntheta should be even!') if mask is None: mask = np.ones_like(data).astype(np.uint8) data = data.astype(np.double) # the function to minimize is the sum of squared difference of two halves of # the azimuthal integral. def targetfunc(orig, data, mask, orig_orig, callback): I = azimintpix(data, None, orig[ 0] + orig_orig[0], orig[1] + orig_orig[1], mask, Ntheta, dmin, dmax)[1] if callback is not None: callback() return np.sum((I[:Ntheta / 2] - I[Ntheta / 2:]) ** 2) / Ntheta orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0) return orig1 + np.array(orig_initial) - extent
[ "def", "findbeam_azimuthal_fold", "(", "data", ",", "orig_initial", ",", "mask", "=", "None", ",", "maxiter", "=", "100", ",", "Ntheta", "=", "50", ",", "dmin", "=", "0", ",", "dmax", "=", "np", ".", "inf", ",", "extent", "=", "10", ",", "callback", "=", "None", ")", ":", "if", "Ntheta", "%", "2", ":", "raise", "ValueError", "(", "'Ntheta should be even!'", ")", "if", "mask", "is", "None", ":", "mask", "=", "np", ".", "ones_like", "(", "data", ")", ".", "astype", "(", "np", ".", "uint8", ")", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "# the function to minimize is the sum of squared difference of two halves of", "# the azimuthal integral.", "def", "targetfunc", "(", "orig", ",", "data", ",", "mask", ",", "orig_orig", ",", "callback", ")", ":", "I", "=", "azimintpix", "(", "data", ",", "None", ",", "orig", "[", "0", "]", "+", "orig_orig", "[", "0", "]", ",", "orig", "[", "1", "]", "+", "orig_orig", "[", "1", "]", ",", "mask", ",", "Ntheta", ",", "dmin", ",", "dmax", ")", "[", "1", "]", "if", "callback", "is", "not", "None", ":", "callback", "(", ")", "return", "np", ".", "sum", "(", "(", "I", "[", ":", "Ntheta", "/", "2", "]", "-", "I", "[", "Ntheta", "/", "2", ":", "]", ")", "**", "2", ")", "/", "Ntheta", "orig1", "=", "scipy", ".", "optimize", ".", "fmin", "(", "targetfunc", ",", "np", ".", "array", "(", "[", "extent", ",", "extent", "]", ")", ",", "args", "=", "(", "data", ",", "1", "-", "mask", ",", "np", ".", "array", "(", "orig_initial", ")", "-", "extent", ",", "callback", ")", ",", "maxiter", "=", "maxiter", ",", "disp", "=", "0", ")", "return", "orig1", "+", "np", ".", "array", "(", "orig_initial", ")", "-", "extent" ]
Find beam center using azimuthal integration and folding Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration. Should be even! dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1
[ "Find", "beam", "center", "using", "azimuthal", "integration", "and", "folding" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L160-L201
awacha/sastool
sastool/utils2d/centering.py
findbeam_semitransparent
def findbeam_semitransparent(data, pri, threshold=0.05): """Find beam with 2D weighting of semitransparent beamstop area Inputs: data: scattering matrix pri: list of four: [xmin,xmax,ymin,ymax] for the borders of the beam area under the semitransparent beamstop. X corresponds to the column index (ie. A[Y,X] is the element of A from the Xth column and the Yth row). You can get these by zooming on the figure and retrieving the result of axis() (like in Matlab) threshold: do not count pixels if their intensity falls below max_intensity*threshold. max_intensity is the highest count rate in the current row or column, respectively. Set None to disable this feature. Outputs: bcx,bcy the x and y coordinates of the primary beam """ rowmin = np.floor(min(pri[2:])) rowmax = np.ceil(max(pri[2:])) colmin = np.floor(min(pri[:2])) colmax = np.ceil(max(pri[:2])) if threshold is not None: # beam area on the scattering image B = data[rowmin:rowmax, colmin:colmax] # print B.shape # row and column indices Ri = np.arange(rowmin, rowmax) Ci = np.arange(colmin, colmax) # print len(Ri) # print len(Ci) Ravg = B.mean(1) # average over column index, will be a concave curve Cavg = B.mean(0) # average over row index, will be a concave curve # find the maxima im both directions and their positions maxR = Ravg.max() maxRpos = Ravg.argmax() maxC = Cavg.max() maxCpos = Cavg.argmax() # cut off pixels which are smaller than threshold*peak_height Rmin = Ri[ ((Ravg - Ravg[0]) >= ((maxR - Ravg[0]) * threshold)) & (Ri < maxRpos)][0] Rmax = Ri[ ((Ravg - Ravg[-1]) >= ((maxR - Ravg[-1]) * threshold)) & (Ri > maxRpos)][-1] Cmin = Ci[ ((Cavg - Cavg[0]) >= ((maxC - Cavg[0]) * threshold)) & (Ci < maxCpos)][0] Cmax = Ci[ ((Cavg - Cavg[-1]) >= ((maxC - Cavg[-1]) * threshold)) & (Ci > maxCpos)][-1] else: Rmin = rowmin Rmax = rowmax Cmin = colmin Cmax = colmax d = data[Rmin:Rmax + 1, Cmin:Cmax + 1] x = np.arange(Rmin, Rmax + 1) y = np.arange(Cmin, Cmax + 1) bcx = (d.sum(1) * x).sum() / d.sum() bcy = (d.sum(0) * y).sum() / d.sum() return bcx, bcy
python
def findbeam_semitransparent(data, pri, threshold=0.05): """Find beam with 2D weighting of semitransparent beamstop area Inputs: data: scattering matrix pri: list of four: [xmin,xmax,ymin,ymax] for the borders of the beam area under the semitransparent beamstop. X corresponds to the column index (ie. A[Y,X] is the element of A from the Xth column and the Yth row). You can get these by zooming on the figure and retrieving the result of axis() (like in Matlab) threshold: do not count pixels if their intensity falls below max_intensity*threshold. max_intensity is the highest count rate in the current row or column, respectively. Set None to disable this feature. Outputs: bcx,bcy the x and y coordinates of the primary beam """ rowmin = np.floor(min(pri[2:])) rowmax = np.ceil(max(pri[2:])) colmin = np.floor(min(pri[:2])) colmax = np.ceil(max(pri[:2])) if threshold is not None: # beam area on the scattering image B = data[rowmin:rowmax, colmin:colmax] # print B.shape # row and column indices Ri = np.arange(rowmin, rowmax) Ci = np.arange(colmin, colmax) # print len(Ri) # print len(Ci) Ravg = B.mean(1) # average over column index, will be a concave curve Cavg = B.mean(0) # average over row index, will be a concave curve # find the maxima im both directions and their positions maxR = Ravg.max() maxRpos = Ravg.argmax() maxC = Cavg.max() maxCpos = Cavg.argmax() # cut off pixels which are smaller than threshold*peak_height Rmin = Ri[ ((Ravg - Ravg[0]) >= ((maxR - Ravg[0]) * threshold)) & (Ri < maxRpos)][0] Rmax = Ri[ ((Ravg - Ravg[-1]) >= ((maxR - Ravg[-1]) * threshold)) & (Ri > maxRpos)][-1] Cmin = Ci[ ((Cavg - Cavg[0]) >= ((maxC - Cavg[0]) * threshold)) & (Ci < maxCpos)][0] Cmax = Ci[ ((Cavg - Cavg[-1]) >= ((maxC - Cavg[-1]) * threshold)) & (Ci > maxCpos)][-1] else: Rmin = rowmin Rmax = rowmax Cmin = colmin Cmax = colmax d = data[Rmin:Rmax + 1, Cmin:Cmax + 1] x = np.arange(Rmin, Rmax + 1) y = np.arange(Cmin, Cmax + 1) bcx = (d.sum(1) * x).sum() / d.sum() bcy = (d.sum(0) * y).sum() / d.sum() return bcx, bcy
[ "def", "findbeam_semitransparent", "(", "data", ",", "pri", ",", "threshold", "=", "0.05", ")", ":", "rowmin", "=", "np", ".", "floor", "(", "min", "(", "pri", "[", "2", ":", "]", ")", ")", "rowmax", "=", "np", ".", "ceil", "(", "max", "(", "pri", "[", "2", ":", "]", ")", ")", "colmin", "=", "np", ".", "floor", "(", "min", "(", "pri", "[", ":", "2", "]", ")", ")", "colmax", "=", "np", ".", "ceil", "(", "max", "(", "pri", "[", ":", "2", "]", ")", ")", "if", "threshold", "is", "not", "None", ":", "# beam area on the scattering image", "B", "=", "data", "[", "rowmin", ":", "rowmax", ",", "colmin", ":", "colmax", "]", "# print B.shape", "# row and column indices", "Ri", "=", "np", ".", "arange", "(", "rowmin", ",", "rowmax", ")", "Ci", "=", "np", ".", "arange", "(", "colmin", ",", "colmax", ")", "# print len(Ri)", "# print len(Ci)", "Ravg", "=", "B", ".", "mean", "(", "1", ")", "# average over column index, will be a concave curve", "Cavg", "=", "B", ".", "mean", "(", "0", ")", "# average over row index, will be a concave curve", "# find the maxima im both directions and their positions", "maxR", "=", "Ravg", ".", "max", "(", ")", "maxRpos", "=", "Ravg", ".", "argmax", "(", ")", "maxC", "=", "Cavg", ".", "max", "(", ")", "maxCpos", "=", "Cavg", ".", "argmax", "(", ")", "# cut off pixels which are smaller than threshold*peak_height", "Rmin", "=", "Ri", "[", "(", "(", "Ravg", "-", "Ravg", "[", "0", "]", ")", ">=", "(", "(", "maxR", "-", "Ravg", "[", "0", "]", ")", "*", "threshold", ")", ")", "&", "(", "Ri", "<", "maxRpos", ")", "]", "[", "0", "]", "Rmax", "=", "Ri", "[", "(", "(", "Ravg", "-", "Ravg", "[", "-", "1", "]", ")", ">=", "(", "(", "maxR", "-", "Ravg", "[", "-", "1", "]", ")", "*", "threshold", ")", ")", "&", "(", "Ri", ">", "maxRpos", ")", "]", "[", "-", "1", "]", "Cmin", "=", "Ci", "[", "(", "(", "Cavg", "-", "Cavg", "[", "0", "]", ")", ">=", "(", "(", "maxC", "-", "Cavg", "[", "0", "]", ")", "*", "threshold", ")", ")", "&", "(", "Ci", "<", "maxCpos", ")", "]", "[", "0", "]", "Cmax", "=", "Ci", "[", "(", "(", "Cavg", "-", "Cavg", "[", "-", "1", "]", ")", ">=", "(", "(", "maxC", "-", "Cavg", "[", "-", "1", "]", ")", "*", "threshold", ")", ")", "&", "(", "Ci", ">", "maxCpos", ")", "]", "[", "-", "1", "]", "else", ":", "Rmin", "=", "rowmin", "Rmax", "=", "rowmax", "Cmin", "=", "colmin", "Cmax", "=", "colmax", "d", "=", "data", "[", "Rmin", ":", "Rmax", "+", "1", ",", "Cmin", ":", "Cmax", "+", "1", "]", "x", "=", "np", ".", "arange", "(", "Rmin", ",", "Rmax", "+", "1", ")", "y", "=", "np", ".", "arange", "(", "Cmin", ",", "Cmax", "+", "1", ")", "bcx", "=", "(", "d", ".", "sum", "(", "1", ")", "*", "x", ")", ".", "sum", "(", ")", "/", "d", ".", "sum", "(", ")", "bcy", "=", "(", "d", ".", "sum", "(", "0", ")", "*", "y", ")", ".", "sum", "(", ")", "/", "d", ".", "sum", "(", ")", "return", "bcx", ",", "bcy" ]
Find beam with 2D weighting of semitransparent beamstop area Inputs: data: scattering matrix pri: list of four: [xmin,xmax,ymin,ymax] for the borders of the beam area under the semitransparent beamstop. X corresponds to the column index (ie. A[Y,X] is the element of A from the Xth column and the Yth row). You can get these by zooming on the figure and retrieving the result of axis() (like in Matlab) threshold: do not count pixels if their intensity falls below max_intensity*threshold. max_intensity is the highest count rate in the current row or column, respectively. Set None to disable this feature. Outputs: bcx,bcy the x and y coordinates of the primary beam
[ "Find", "beam", "with", "2D", "weighting", "of", "semitransparent", "beamstop", "area" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L204-L262
awacha/sastool
sastool/utils2d/centering.py
findbeam_radialpeak
def findbeam_radialpeak(data, orig_initial, mask, rmin, rmax, maxiter=100, drive_by='amplitude', extent=10, callback=None): """Find the beam by minimizing the width of a peak in the radial average. Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the peak range. drive_by: 'hwhm' to minimize the hwhm of the peak or 'amplitude' to maximize the peak amplitude extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian will be fitted. """ orig_initial = np.array(orig_initial) mask = 1 - mask.astype(np.uint8) data = data.astype(np.double) pix = np.arange(rmin * 1.0, rmax * 1.0, 1) if drive_by.lower() == 'hwhm': def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] hwhm = float(misc.findpeak_single(pix, I)[1]) # print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p if callback is not None: callback() return abs(hwhm) elif drive_by.lower() == 'amplitude': def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] fp = misc.findpeak_single(pix, I) height = -float(fp[2] + fp[3]) # print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p if callback is not None: callback() return height else: raise ValueError('Invalid argument for drive_by %s' % drive_by) orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=( data, mask, orig_initial - extent, callback), maxiter=maxiter, disp=0) return np.array(orig_initial) - extent + orig1
python
def findbeam_radialpeak(data, orig_initial, mask, rmin, rmax, maxiter=100, drive_by='amplitude', extent=10, callback=None): """Find the beam by minimizing the width of a peak in the radial average. Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the peak range. drive_by: 'hwhm' to minimize the hwhm of the peak or 'amplitude' to maximize the peak amplitude extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian will be fitted. """ orig_initial = np.array(orig_initial) mask = 1 - mask.astype(np.uint8) data = data.astype(np.double) pix = np.arange(rmin * 1.0, rmax * 1.0, 1) if drive_by.lower() == 'hwhm': def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] hwhm = float(misc.findpeak_single(pix, I)[1]) # print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p if callback is not None: callback() return abs(hwhm) elif drive_by.lower() == 'amplitude': def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] fp = misc.findpeak_single(pix, I) height = -float(fp[2] + fp[3]) # print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p if callback is not None: callback() return height else: raise ValueError('Invalid argument for drive_by %s' % drive_by) orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=( data, mask, orig_initial - extent, callback), maxiter=maxiter, disp=0) return np.array(orig_initial) - extent + orig1
[ "def", "findbeam_radialpeak", "(", "data", ",", "orig_initial", ",", "mask", ",", "rmin", ",", "rmax", ",", "maxiter", "=", "100", ",", "drive_by", "=", "'amplitude'", ",", "extent", "=", "10", ",", "callback", "=", "None", ")", ":", "orig_initial", "=", "np", ".", "array", "(", "orig_initial", ")", "mask", "=", "1", "-", "mask", ".", "astype", "(", "np", ".", "uint8", ")", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "pix", "=", "np", ".", "arange", "(", "rmin", "*", "1.0", ",", "rmax", "*", "1.0", ",", "1", ")", "if", "drive_by", ".", "lower", "(", ")", "==", "'hwhm'", ":", "def", "targetfunc", "(", "orig", ",", "data", ",", "mask", ",", "orig_orig", ",", "callback", ")", ":", "I", "=", "radintpix", "(", "data", ",", "None", ",", "orig", "[", "0", "]", "+", "orig_orig", "[", "0", "]", ",", "orig", "[", "1", "]", "+", "orig_orig", "[", "1", "]", ",", "mask", ",", "pix", ")", "[", "1", "]", "hwhm", "=", "float", "(", "misc", ".", "findpeak_single", "(", "pix", ",", "I", ")", "[", "1", "]", ")", "# print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p", "if", "callback", "is", "not", "None", ":", "callback", "(", ")", "return", "abs", "(", "hwhm", ")", "elif", "drive_by", ".", "lower", "(", ")", "==", "'amplitude'", ":", "def", "targetfunc", "(", "orig", ",", "data", ",", "mask", ",", "orig_orig", ",", "callback", ")", ":", "I", "=", "radintpix", "(", "data", ",", "None", ",", "orig", "[", "0", "]", "+", "orig_orig", "[", "0", "]", ",", "orig", "[", "1", "]", "+", "orig_orig", "[", "1", "]", ",", "mask", ",", "pix", ")", "[", "1", "]", "fp", "=", "misc", ".", "findpeak_single", "(", "pix", ",", "I", ")", "height", "=", "-", "float", "(", "fp", "[", "2", "]", "+", "fp", "[", "3", "]", ")", "# print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p", "if", "callback", "is", "not", "None", ":", "callback", "(", ")", "return", "height", "else", ":", "raise", "ValueError", "(", "'Invalid argument for drive_by %s'", "%", "drive_by", ")", "orig1", "=", "scipy", ".", "optimize", ".", "fmin", "(", "targetfunc", ",", "np", ".", "array", "(", "[", "extent", ",", "extent", "]", ")", ",", "args", "=", "(", "data", ",", "mask", ",", "orig_initial", "-", "extent", ",", "callback", ")", ",", "maxiter", "=", "maxiter", ",", "disp", "=", "0", ")", "return", "np", ".", "array", "(", "orig_initial", ")", "-", "extent", "+", "orig1" ]
Find the beam by minimizing the width of a peak in the radial average. Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the peak range. drive_by: 'hwhm' to minimize the hwhm of the peak or 'amplitude' to maximize the peak amplitude extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian will be fitted.
[ "Find", "the", "beam", "by", "minimizing", "the", "width", "of", "a", "peak", "in", "the", "radial", "average", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L265-L315
awacha/sastool
sastool/utils2d/centering.py
findbeam_Guinier
def findbeam_Guinier(data, orig_initial, mask, rmin, rmax, maxiter=100, extent=10, callback=None): """Find the beam by minimizing the width of a Gaussian centered at the origin (i.e. maximizing the radius of gyration in a Guinier scattering). Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the Guinier range. extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian with its will be fitted. """ orig_initial = np.array(orig_initial) mask = 1 - mask.astype(np.uint8) data = data.astype(np.double) pix = np.arange(rmin * 1.0, rmax * 1.0, 1) pix2 = pix ** 2 def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] p = np.polyfit(pix2, np.log(I), 1)[0] if callback is not None: callback() return p orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=( data, mask, orig_initial - extent, callback), maxiter=maxiter, disp=0) return np.array(orig_initial) - extent + orig1
python
def findbeam_Guinier(data, orig_initial, mask, rmin, rmax, maxiter=100, extent=10, callback=None): """Find the beam by minimizing the width of a Gaussian centered at the origin (i.e. maximizing the radius of gyration in a Guinier scattering). Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the Guinier range. extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian with its will be fitted. """ orig_initial = np.array(orig_initial) mask = 1 - mask.astype(np.uint8) data = data.astype(np.double) pix = np.arange(rmin * 1.0, rmax * 1.0, 1) pix2 = pix ** 2 def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] p = np.polyfit(pix2, np.log(I), 1)[0] if callback is not None: callback() return p orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=( data, mask, orig_initial - extent, callback), maxiter=maxiter, disp=0) return np.array(orig_initial) - extent + orig1
[ "def", "findbeam_Guinier", "(", "data", ",", "orig_initial", ",", "mask", ",", "rmin", ",", "rmax", ",", "maxiter", "=", "100", ",", "extent", "=", "10", ",", "callback", "=", "None", ")", ":", "orig_initial", "=", "np", ".", "array", "(", "orig_initial", ")", "mask", "=", "1", "-", "mask", ".", "astype", "(", "np", ".", "uint8", ")", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "pix", "=", "np", ".", "arange", "(", "rmin", "*", "1.0", ",", "rmax", "*", "1.0", ",", "1", ")", "pix2", "=", "pix", "**", "2", "def", "targetfunc", "(", "orig", ",", "data", ",", "mask", ",", "orig_orig", ",", "callback", ")", ":", "I", "=", "radintpix", "(", "data", ",", "None", ",", "orig", "[", "0", "]", "+", "orig_orig", "[", "0", "]", ",", "orig", "[", "1", "]", "+", "orig_orig", "[", "1", "]", ",", "mask", ",", "pix", ")", "[", "1", "]", "p", "=", "np", ".", "polyfit", "(", "pix2", ",", "np", ".", "log", "(", "I", ")", ",", "1", ")", "[", "0", "]", "if", "callback", "is", "not", "None", ":", "callback", "(", ")", "return", "p", "orig1", "=", "scipy", ".", "optimize", ".", "fmin", "(", "targetfunc", ",", "np", ".", "array", "(", "[", "extent", ",", "extent", "]", ")", ",", "args", "=", "(", "data", ",", "mask", ",", "orig_initial", "-", "extent", ",", "callback", ")", ",", "maxiter", "=", "maxiter", ",", "disp", "=", "0", ")", "return", "np", ".", "array", "(", "orig_initial", ")", "-", "extent", "+", "orig1" ]
Find the beam by minimizing the width of a Gaussian centered at the origin (i.e. maximizing the radius of gyration in a Guinier scattering). Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the Guinier range. extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian with its will be fitted.
[ "Find", "the", "beam", "by", "minimizing", "the", "width", "of", "a", "Gaussian", "centered", "at", "the", "origin", "(", "i", ".", "e", ".", "maximizing", "the", "radius", "of", "gyration", "in", "a", "Guinier", "scattering", ")", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L318-L355
awacha/sastool
sastool/utils2d/centering.py
findbeam_powerlaw
def findbeam_powerlaw(data, orig_initial, mask, rmin, rmax, maxiter=100, drive_by='R2', extent=10, callback=None): """Find the beam by minimizing the width of a Gaussian centered at the origin (i.e. maximizing the radius of gyration in a Guinier scattering). Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the fitting range drive_by: 'R2' or 'Chi2' extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A power-law will be fitted """ orig_initial = np.array(orig_initial) mask = 1 - mask.astype(np.uint8) data = data.astype(np.double) pix = np.arange(rmin * 1.0, rmax * 1.0, 1) def targetfunc(orig, data, mask, orig_orig, callback): I, E = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1:3] p, dp, stat = misc.easylsq.nlsq_fit( pix, I, E, lambda q, A, alpha: A * q ** alpha, [1.0, -3.0]) if callback is not None: callback() # print(orig, orig_orig, orig + orig_orig, stat[drive_by]) if drive_by == 'R2': return 1 - stat['R2'] elif drive_by.startswith('Chi2'): return stat[drive_by] orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=( data, mask, orig_initial - extent, callback), maxiter=maxiter, disp=False) return np.array(orig_initial) - extent + orig1
python
def findbeam_powerlaw(data, orig_initial, mask, rmin, rmax, maxiter=100, drive_by='R2', extent=10, callback=None): """Find the beam by minimizing the width of a Gaussian centered at the origin (i.e. maximizing the radius of gyration in a Guinier scattering). Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the fitting range drive_by: 'R2' or 'Chi2' extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A power-law will be fitted """ orig_initial = np.array(orig_initial) mask = 1 - mask.astype(np.uint8) data = data.astype(np.double) pix = np.arange(rmin * 1.0, rmax * 1.0, 1) def targetfunc(orig, data, mask, orig_orig, callback): I, E = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1:3] p, dp, stat = misc.easylsq.nlsq_fit( pix, I, E, lambda q, A, alpha: A * q ** alpha, [1.0, -3.0]) if callback is not None: callback() # print(orig, orig_orig, orig + orig_orig, stat[drive_by]) if drive_by == 'R2': return 1 - stat['R2'] elif drive_by.startswith('Chi2'): return stat[drive_by] orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=( data, mask, orig_initial - extent, callback), maxiter=maxiter, disp=False) return np.array(orig_initial) - extent + orig1
[ "def", "findbeam_powerlaw", "(", "data", ",", "orig_initial", ",", "mask", ",", "rmin", ",", "rmax", ",", "maxiter", "=", "100", ",", "drive_by", "=", "'R2'", ",", "extent", "=", "10", ",", "callback", "=", "None", ")", ":", "orig_initial", "=", "np", ".", "array", "(", "orig_initial", ")", "mask", "=", "1", "-", "mask", ".", "astype", "(", "np", ".", "uint8", ")", "data", "=", "data", ".", "astype", "(", "np", ".", "double", ")", "pix", "=", "np", ".", "arange", "(", "rmin", "*", "1.0", ",", "rmax", "*", "1.0", ",", "1", ")", "def", "targetfunc", "(", "orig", ",", "data", ",", "mask", ",", "orig_orig", ",", "callback", ")", ":", "I", ",", "E", "=", "radintpix", "(", "data", ",", "None", ",", "orig", "[", "0", "]", "+", "orig_orig", "[", "0", "]", ",", "orig", "[", "1", "]", "+", "orig_orig", "[", "1", "]", ",", "mask", ",", "pix", ")", "[", "1", ":", "3", "]", "p", ",", "dp", ",", "stat", "=", "misc", ".", "easylsq", ".", "nlsq_fit", "(", "pix", ",", "I", ",", "E", ",", "lambda", "q", ",", "A", ",", "alpha", ":", "A", "*", "q", "**", "alpha", ",", "[", "1.0", ",", "-", "3.0", "]", ")", "if", "callback", "is", "not", "None", ":", "callback", "(", ")", "# print(orig, orig_orig, orig + orig_orig, stat[drive_by])", "if", "drive_by", "==", "'R2'", ":", "return", "1", "-", "stat", "[", "'R2'", "]", "elif", "drive_by", ".", "startswith", "(", "'Chi2'", ")", ":", "return", "stat", "[", "drive_by", "]", "orig1", "=", "scipy", ".", "optimize", ".", "fmin", "(", "targetfunc", ",", "np", ".", "array", "(", "[", "extent", ",", "extent", "]", ")", ",", "args", "=", "(", "data", ",", "mask", ",", "orig_initial", "-", "extent", ",", "callback", ")", ",", "maxiter", "=", "maxiter", ",", "disp", "=", "False", ")", "return", "np", ".", "array", "(", "orig_initial", ")", "-", "extent", "+", "orig1" ]
Find the beam by minimizing the width of a Gaussian centered at the origin (i.e. maximizing the radius of gyration in a Guinier scattering). Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the fitting range drive_by: 'R2' or 'Chi2' extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A power-law will be fitted
[ "Find", "the", "beam", "by", "minimizing", "the", "width", "of", "a", "Gaussian", "centered", "at", "the", "origin", "(", "i", ".", "e", ".", "maximizing", "the", "radius", "of", "gyration", "in", "a", "Guinier", "scattering", ")", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/centering.py#L358-L400
awacha/sastool
sastool/io/credo_cct/header.py
Header.beamcenterx
def beamcenterx(self) -> ErrorValue: """X (column) coordinate of the beam center, pixel units, 0-based.""" try: return ErrorValue(self._data['geometry']['beamposy'], self._data['geometry']['beamposy.err']) except KeyError: return ErrorValue(self._data['geometry']['beamposy'], 0.0)
python
def beamcenterx(self) -> ErrorValue: """X (column) coordinate of the beam center, pixel units, 0-based.""" try: return ErrorValue(self._data['geometry']['beamposy'], self._data['geometry']['beamposy.err']) except KeyError: return ErrorValue(self._data['geometry']['beamposy'], 0.0)
[ "def", "beamcenterx", "(", "self", ")", "->", "ErrorValue", ":", "try", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposy'", "]", ",", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposy.err'", "]", ")", "except", "KeyError", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposy'", "]", ",", "0.0", ")" ]
X (column) coordinate of the beam center, pixel units, 0-based.
[ "X", "(", "column", ")", "coordinate", "of", "the", "beam", "center", "pixel", "units", "0", "-", "based", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cct/header.py#L106-L113
awacha/sastool
sastool/io/credo_cct/header.py
Header.beamcentery
def beamcentery(self) -> ErrorValue: """Y (row) coordinate of the beam center, pixel units, 0-based.""" try: return ErrorValue(self._data['geometry']['beamposx'], self._data['geometry']['beamposx.err']) except KeyError: return ErrorValue(self._data['geometry']['beamposx'], 0.0)
python
def beamcentery(self) -> ErrorValue: """Y (row) coordinate of the beam center, pixel units, 0-based.""" try: return ErrorValue(self._data['geometry']['beamposx'], self._data['geometry']['beamposx.err']) except KeyError: return ErrorValue(self._data['geometry']['beamposx'], 0.0)
[ "def", "beamcentery", "(", "self", ")", "->", "ErrorValue", ":", "try", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposx'", "]", ",", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposx.err'", "]", ")", "except", "KeyError", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposx'", "]", ",", "0.0", ")" ]
Y (row) coordinate of the beam center, pixel units, 0-based.
[ "Y", "(", "row", ")", "coordinate", "of", "the", "beam", "center", "pixel", "units", "0", "-", "based", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cct/header.py#L124-L131
awacha/sastool
sastool/io/credo_cct/header.py
Header.maskname
def maskname(self) -> Optional[str]: """Name of the mask matrix file.""" mask = self._data['geometry']['mask'] if os.path.abspath(mask): mask = os.path.split(mask)[-1] return mask
python
def maskname(self) -> Optional[str]: """Name of the mask matrix file.""" mask = self._data['geometry']['mask'] if os.path.abspath(mask): mask = os.path.split(mask)[-1] return mask
[ "def", "maskname", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "mask", "=", "self", ".", "_data", "[", "'geometry'", "]", "[", "'mask'", "]", "if", "os", ".", "path", ".", "abspath", "(", "mask", ")", ":", "mask", "=", "os", ".", "path", ".", "split", "(", "mask", ")", "[", "-", "1", "]", "return", "mask" ]
Name of the mask matrix file.
[ "Name", "of", "the", "mask", "matrix", "file", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cct/header.py#L197-L202
awacha/sastool
sastool/classes2/curve.py
errtrapz
def errtrapz(x, yerr): """Error of the trapezoid formula Inputs: x: the abscissa yerr: the error of the dependent variable Outputs: the error of the integral """ x = np.array(x) assert isinstance(x, np.ndarray) yerr = np.array(yerr) return 0.5 * np.sqrt((x[1] - x[0]) ** 2 * yerr[0] ** 2 + np.sum((x[2:] - x[:-2]) ** 2 * yerr[1:-1] ** 2) + (x[-1] - x[-2]) ** 2 * yerr[-1] ** 2)
python
def errtrapz(x, yerr): """Error of the trapezoid formula Inputs: x: the abscissa yerr: the error of the dependent variable Outputs: the error of the integral """ x = np.array(x) assert isinstance(x, np.ndarray) yerr = np.array(yerr) return 0.5 * np.sqrt((x[1] - x[0]) ** 2 * yerr[0] ** 2 + np.sum((x[2:] - x[:-2]) ** 2 * yerr[1:-1] ** 2) + (x[-1] - x[-2]) ** 2 * yerr[-1] ** 2)
[ "def", "errtrapz", "(", "x", ",", "yerr", ")", ":", "x", "=", "np", ".", "array", "(", "x", ")", "assert", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", "yerr", "=", "np", ".", "array", "(", "yerr", ")", "return", "0.5", "*", "np", ".", "sqrt", "(", "(", "x", "[", "1", "]", "-", "x", "[", "0", "]", ")", "**", "2", "*", "yerr", "[", "0", "]", "**", "2", "+", "np", ".", "sum", "(", "(", "x", "[", "2", ":", "]", "-", "x", "[", ":", "-", "2", "]", ")", "**", "2", "*", "yerr", "[", "1", ":", "-", "1", "]", "**", "2", ")", "+", "(", "x", "[", "-", "1", "]", "-", "x", "[", "-", "2", "]", ")", "**", "2", "*", "yerr", "[", "-", "1", "]", "**", "2", ")" ]
Error of the trapezoid formula Inputs: x: the abscissa yerr: the error of the dependent variable Outputs: the error of the integral
[ "Error", "of", "the", "trapezoid", "formula", "Inputs", ":", "x", ":", "the", "abscissa", "yerr", ":", "the", "error", "of", "the", "dependent", "variable" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/curve.py#L15-L29
awacha/sastool
sastool/classes2/curve.py
Curve.fit
def fit(self, fitfunction, parinit, unfittableparameters=(), *args, **kwargs): """Perform a nonlinear least-squares fit, using sastool.misc.fitter.Fitter() Other arguments and keyword arguments will be passed through to the __init__ method of Fitter. For example, these are: - lbounds - ubounds - ytransform - loss - method Returns: the final parameters as ErrorValue instances, the stats dictionary and the fitted curve instance of the same type as this) """ kwargs['otherparameters'] = unfittableparameters fitter = Fitter(fitfunction, parinit, self.q, self.Intensity, self.qError, self.Error, *args, **kwargs) fixedvalues = [[None, p][isinstance(p, FixedParameter)] for p in parinit] fitter.fixparameters(fixedvalues) fitter.fit() pars = fitter.parameters() uncs = fitter.uncertainties() stats = fitter.stats() results = [ErrorValue(p, u) for p, u in zip(pars, uncs)] + [stats, type(self)(self.q, stats['func_value'])] return results
python
def fit(self, fitfunction, parinit, unfittableparameters=(), *args, **kwargs): """Perform a nonlinear least-squares fit, using sastool.misc.fitter.Fitter() Other arguments and keyword arguments will be passed through to the __init__ method of Fitter. For example, these are: - lbounds - ubounds - ytransform - loss - method Returns: the final parameters as ErrorValue instances, the stats dictionary and the fitted curve instance of the same type as this) """ kwargs['otherparameters'] = unfittableparameters fitter = Fitter(fitfunction, parinit, self.q, self.Intensity, self.qError, self.Error, *args, **kwargs) fixedvalues = [[None, p][isinstance(p, FixedParameter)] for p in parinit] fitter.fixparameters(fixedvalues) fitter.fit() pars = fitter.parameters() uncs = fitter.uncertainties() stats = fitter.stats() results = [ErrorValue(p, u) for p, u in zip(pars, uncs)] + [stats, type(self)(self.q, stats['func_value'])] return results
[ "def", "fit", "(", "self", ",", "fitfunction", ",", "parinit", ",", "unfittableparameters", "=", "(", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'otherparameters'", "]", "=", "unfittableparameters", "fitter", "=", "Fitter", "(", "fitfunction", ",", "parinit", ",", "self", ".", "q", ",", "self", ".", "Intensity", ",", "self", ".", "qError", ",", "self", ".", "Error", ",", "*", "args", ",", "*", "*", "kwargs", ")", "fixedvalues", "=", "[", "[", "None", ",", "p", "]", "[", "isinstance", "(", "p", ",", "FixedParameter", ")", "]", "for", "p", "in", "parinit", "]", "fitter", ".", "fixparameters", "(", "fixedvalues", ")", "fitter", ".", "fit", "(", ")", "pars", "=", "fitter", ".", "parameters", "(", ")", "uncs", "=", "fitter", ".", "uncertainties", "(", ")", "stats", "=", "fitter", ".", "stats", "(", ")", "results", "=", "[", "ErrorValue", "(", "p", ",", "u", ")", "for", "p", ",", "u", "in", "zip", "(", "pars", ",", "uncs", ")", "]", "+", "[", "stats", ",", "type", "(", "self", ")", "(", "self", ".", "q", ",", "stats", "[", "'func_value'", "]", ")", "]", "return", "results" ]
Perform a nonlinear least-squares fit, using sastool.misc.fitter.Fitter() Other arguments and keyword arguments will be passed through to the __init__ method of Fitter. For example, these are: - lbounds - ubounds - ytransform - loss - method Returns: the final parameters as ErrorValue instances, the stats dictionary and the fitted curve instance of the same type as this)
[ "Perform", "a", "nonlinear", "least", "-", "squares", "fit", "using", "sastool", ".", "misc", ".", "fitter", ".", "Fitter", "()" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/curve.py#L63-L87
awacha/sastool
sastool/classes2/curve.py
Curve.momentum
def momentum(self, exponent=1, errorrequested=True): """Calculate momenta (integral of y times x^exponent) The integration is done by the trapezoid formula (np.trapz). Inputs: exponent: the exponent of q in the integration. errorrequested: True if error should be returned (true Gaussian error-propagation of the trapezoid formula) """ y = self.Intensity * self.q ** exponent m = np.trapz(y, self.q) if errorrequested: err = self.Error * self.q ** exponent dm = errtrapz(self.q, err) return ErrorValue(m, dm) else: return m
python
def momentum(self, exponent=1, errorrequested=True): """Calculate momenta (integral of y times x^exponent) The integration is done by the trapezoid formula (np.trapz). Inputs: exponent: the exponent of q in the integration. errorrequested: True if error should be returned (true Gaussian error-propagation of the trapezoid formula) """ y = self.Intensity * self.q ** exponent m = np.trapz(y, self.q) if errorrequested: err = self.Error * self.q ** exponent dm = errtrapz(self.q, err) return ErrorValue(m, dm) else: return m
[ "def", "momentum", "(", "self", ",", "exponent", "=", "1", ",", "errorrequested", "=", "True", ")", ":", "y", "=", "self", ".", "Intensity", "*", "self", ".", "q", "**", "exponent", "m", "=", "np", ".", "trapz", "(", "y", ",", "self", ".", "q", ")", "if", "errorrequested", ":", "err", "=", "self", ".", "Error", "*", "self", ".", "q", "**", "exponent", "dm", "=", "errtrapz", "(", "self", ".", "q", ",", "err", ")", "return", "ErrorValue", "(", "m", ",", "dm", ")", "else", ":", "return", "m" ]
Calculate momenta (integral of y times x^exponent) The integration is done by the trapezoid formula (np.trapz). Inputs: exponent: the exponent of q in the integration. errorrequested: True if error should be returned (true Gaussian error-propagation of the trapezoid formula)
[ "Calculate", "momenta", "(", "integral", "of", "y", "times", "x^exponent", ")", "The", "integration", "is", "done", "by", "the", "trapezoid", "formula", "(", "np", ".", "trapz", ")", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/curve.py#L238-L254
awacha/sastool
sastool/classes2/curve.py
Curve.scalefactor
def scalefactor(self, other, qmin=None, qmax=None, Npoints=None): """Calculate a scaling factor, by which this curve is to be multiplied to best fit the other one. Inputs: other: the other curve (an instance of GeneralCurve or of a subclass of it) qmin: lower cut-off (None to determine the common range automatically) qmax: upper cut-off (None to determine the common range automatically) Npoints: number of points to use in the common x-range (None defaults to the lowest value among the two datasets) Outputs: The scaling factor determined by interpolating both datasets to the same abscissa and calculating the ratio of their integrals, calculated by the trapezoid formula. Error propagation is taken into account. """ if qmin is None: qmin = max(self.q.min(), other.q.min()) if qmax is None: xmax = min(self.q.max(), other.q.max()) data1 = self.trim(qmin, qmax) data2 = other.trim(qmin, qmax) if Npoints is None: Npoints = min(len(data1), len(data2)) commonx = np.linspace( max(data1.q.min(), data2.q.min()), min(data2.q.max(), data1.q.max()), Npoints) data1 = data1.interpolate(commonx) data2 = data2.interpolate(commonx) return nonlinear_odr(data1.Intensity, data2.Intensity, data1.Error, data2.Error, lambda x, a: a * x, [1])[0]
python
def scalefactor(self, other, qmin=None, qmax=None, Npoints=None): """Calculate a scaling factor, by which this curve is to be multiplied to best fit the other one. Inputs: other: the other curve (an instance of GeneralCurve or of a subclass of it) qmin: lower cut-off (None to determine the common range automatically) qmax: upper cut-off (None to determine the common range automatically) Npoints: number of points to use in the common x-range (None defaults to the lowest value among the two datasets) Outputs: The scaling factor determined by interpolating both datasets to the same abscissa and calculating the ratio of their integrals, calculated by the trapezoid formula. Error propagation is taken into account. """ if qmin is None: qmin = max(self.q.min(), other.q.min()) if qmax is None: xmax = min(self.q.max(), other.q.max()) data1 = self.trim(qmin, qmax) data2 = other.trim(qmin, qmax) if Npoints is None: Npoints = min(len(data1), len(data2)) commonx = np.linspace( max(data1.q.min(), data2.q.min()), min(data2.q.max(), data1.q.max()), Npoints) data1 = data1.interpolate(commonx) data2 = data2.interpolate(commonx) return nonlinear_odr(data1.Intensity, data2.Intensity, data1.Error, data2.Error, lambda x, a: a * x, [1])[0]
[ "def", "scalefactor", "(", "self", ",", "other", ",", "qmin", "=", "None", ",", "qmax", "=", "None", ",", "Npoints", "=", "None", ")", ":", "if", "qmin", "is", "None", ":", "qmin", "=", "max", "(", "self", ".", "q", ".", "min", "(", ")", ",", "other", ".", "q", ".", "min", "(", ")", ")", "if", "qmax", "is", "None", ":", "xmax", "=", "min", "(", "self", ".", "q", ".", "max", "(", ")", ",", "other", ".", "q", ".", "max", "(", ")", ")", "data1", "=", "self", ".", "trim", "(", "qmin", ",", "qmax", ")", "data2", "=", "other", ".", "trim", "(", "qmin", ",", "qmax", ")", "if", "Npoints", "is", "None", ":", "Npoints", "=", "min", "(", "len", "(", "data1", ")", ",", "len", "(", "data2", ")", ")", "commonx", "=", "np", ".", "linspace", "(", "max", "(", "data1", ".", "q", ".", "min", "(", ")", ",", "data2", ".", "q", ".", "min", "(", ")", ")", ",", "min", "(", "data2", ".", "q", ".", "max", "(", ")", ",", "data1", ".", "q", ".", "max", "(", ")", ")", ",", "Npoints", ")", "data1", "=", "data1", ".", "interpolate", "(", "commonx", ")", "data2", "=", "data2", ".", "interpolate", "(", "commonx", ")", "return", "nonlinear_odr", "(", "data1", ".", "Intensity", ",", "data2", ".", "Intensity", ",", "data1", ".", "Error", ",", "data2", ".", "Error", ",", "lambda", "x", ",", "a", ":", "a", "*", "x", ",", "[", "1", "]", ")", "[", "0", "]" ]
Calculate a scaling factor, by which this curve is to be multiplied to best fit the other one. Inputs: other: the other curve (an instance of GeneralCurve or of a subclass of it) qmin: lower cut-off (None to determine the common range automatically) qmax: upper cut-off (None to determine the common range automatically) Npoints: number of points to use in the common x-range (None defaults to the lowest value among the two datasets) Outputs: The scaling factor determined by interpolating both datasets to the same abscissa and calculating the ratio of their integrals, calculated by the trapezoid formula. Error propagation is taken into account.
[ "Calculate", "a", "scaling", "factor", "by", "which", "this", "curve", "is", "to", "be", "multiplied", "to", "best", "fit", "the", "other", "one", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/curve.py#L279-L306
awacha/sastool
sastool/misc/fitter.py
Fitter._substitute_fixed_parameters_covar
def _substitute_fixed_parameters_covar(self, covar): """Insert fixed parameters in a covariance matrix""" covar_resolved = np.empty((len(self._fixed_parameters), len(self._fixed_parameters))) indices_of_fixed_parameters = [i for i in range(len(self.parameters())) if self._fixed_parameters[i] is not None] indices_of_free_parameters = [i for i in range(len(self.parameters())) if self._fixed_parameters[i] is None] for i in range(covar_resolved.shape[0]): if i in indices_of_fixed_parameters: # the i-eth argument was fixed. This means that the row and column corresponding to this argument # must be None covar_resolved[i, :] = 0 continue for j in range(covar_resolved.shape[1]): if j in indices_of_fixed_parameters: covar_resolved[:, j] = 0 continue covar_resolved[i, j] = covar[indices_of_free_parameters.index(i), indices_of_free_parameters.index(j)] return covar_resolved
python
def _substitute_fixed_parameters_covar(self, covar): """Insert fixed parameters in a covariance matrix""" covar_resolved = np.empty((len(self._fixed_parameters), len(self._fixed_parameters))) indices_of_fixed_parameters = [i for i in range(len(self.parameters())) if self._fixed_parameters[i] is not None] indices_of_free_parameters = [i for i in range(len(self.parameters())) if self._fixed_parameters[i] is None] for i in range(covar_resolved.shape[0]): if i in indices_of_fixed_parameters: # the i-eth argument was fixed. This means that the row and column corresponding to this argument # must be None covar_resolved[i, :] = 0 continue for j in range(covar_resolved.shape[1]): if j in indices_of_fixed_parameters: covar_resolved[:, j] = 0 continue covar_resolved[i, j] = covar[indices_of_free_parameters.index(i), indices_of_free_parameters.index(j)] return covar_resolved
[ "def", "_substitute_fixed_parameters_covar", "(", "self", ",", "covar", ")", ":", "covar_resolved", "=", "np", ".", "empty", "(", "(", "len", "(", "self", ".", "_fixed_parameters", ")", ",", "len", "(", "self", ".", "_fixed_parameters", ")", ")", ")", "indices_of_fixed_parameters", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "self", ".", "parameters", "(", ")", ")", ")", "if", "self", ".", "_fixed_parameters", "[", "i", "]", "is", "not", "None", "]", "indices_of_free_parameters", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "self", ".", "parameters", "(", ")", ")", ")", "if", "self", ".", "_fixed_parameters", "[", "i", "]", "is", "None", "]", "for", "i", "in", "range", "(", "covar_resolved", ".", "shape", "[", "0", "]", ")", ":", "if", "i", "in", "indices_of_fixed_parameters", ":", "# the i-eth argument was fixed. This means that the row and column corresponding to this argument", "# must be None", "covar_resolved", "[", "i", ",", ":", "]", "=", "0", "continue", "for", "j", "in", "range", "(", "covar_resolved", ".", "shape", "[", "1", "]", ")", ":", "if", "j", "in", "indices_of_fixed_parameters", ":", "covar_resolved", "[", ":", ",", "j", "]", "=", "0", "continue", "covar_resolved", "[", "i", ",", "j", "]", "=", "covar", "[", "indices_of_free_parameters", ".", "index", "(", "i", ")", ",", "indices_of_free_parameters", ".", "index", "(", "j", ")", "]", "return", "covar_resolved" ]
Insert fixed parameters in a covariance matrix
[ "Insert", "fixed", "parameters", "in", "a", "covariance", "matrix" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/fitter.py#L209-L226
awacha/sastool
sastool/io/credo_cct/loader.py
Loader.loadmask
def loadmask(self, filename: str) -> np.ndarray: """Load a mask file.""" mask = scipy.io.loadmat(self.find_file(filename, what='mask')) maskkey = [k for k in mask.keys() if not (k.startswith('_') or k.endswith('_'))][0] return mask[maskkey].astype(np.bool)
python
def loadmask(self, filename: str) -> np.ndarray: """Load a mask file.""" mask = scipy.io.loadmat(self.find_file(filename, what='mask')) maskkey = [k for k in mask.keys() if not (k.startswith('_') or k.endswith('_'))][0] return mask[maskkey].astype(np.bool)
[ "def", "loadmask", "(", "self", ",", "filename", ":", "str", ")", "->", "np", ".", "ndarray", ":", "mask", "=", "scipy", ".", "io", ".", "loadmat", "(", "self", ".", "find_file", "(", "filename", ",", "what", "=", "'mask'", ")", ")", "maskkey", "=", "[", "k", "for", "k", "in", "mask", ".", "keys", "(", ")", "if", "not", "(", "k", ".", "startswith", "(", "'_'", ")", "or", "k", ".", "endswith", "(", "'_'", ")", ")", "]", "[", "0", "]", "return", "mask", "[", "maskkey", "]", ".", "astype", "(", "np", ".", "bool", ")" ]
Load a mask file.
[ "Load", "a", "mask", "file", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cct/loader.py#L57-L61
awacha/sastool
sastool/io/credo_cct/loader.py
Loader.loadcurve
def loadcurve(self, fsn: int) -> classes2.Curve: """Load a radial scattering curve""" return classes2.Curve.new_from_file(self.find_file(self._exposureclass + '_%05d.txt' % fsn))
python
def loadcurve(self, fsn: int) -> classes2.Curve: """Load a radial scattering curve""" return classes2.Curve.new_from_file(self.find_file(self._exposureclass + '_%05d.txt' % fsn))
[ "def", "loadcurve", "(", "self", ",", "fsn", ":", "int", ")", "->", "classes2", ".", "Curve", ":", "return", "classes2", ".", "Curve", ".", "new_from_file", "(", "self", ".", "find_file", "(", "self", ".", "_exposureclass", "+", "'_%05d.txt'", "%", "fsn", ")", ")" ]
Load a radial scattering curve
[ "Load", "a", "radial", "scattering", "curve" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cct/loader.py#L63-L65
awacha/sastool
sastool/io/twodim.py
readcbf
def readcbf(name, load_header=False, load_data=True, for_nexus=False): """Read a cbf (crystallographic binary format) file from a Dectris PILATUS detector. Inputs ------ name: string the file name load_header: bool if the header data is to be loaded. load_data: bool if the binary data is to be loaded. for_nexus: bool if the array should be opened with NeXus ordering. Output ------ a numpy array of the scattering data Notes ----- currently only Little endian, "signed 32-bit integer" type and byte-offset compressed data are accepted. """ with open(name, 'rb') as f: cbfbin = f.read() datastart = cbfbin.find(b'\x0c\x1a\x04\xd5') + 4 hed = [x.strip() for x in cbfbin[:datastart].split(b'\n')] header = {} readingmode = None for i in range(len(hed)): if not hed[i]: # skip empty header lines continue elif hed[i] == b';': continue elif hed[i].startswith(b'_array_data.header_convention'): header['CBF_header_convention'] = str(hed[i][ len(b'_array_data.header_convention'):].strip().replace(b'"', b''), encoding='utf-8') elif hed[i].startswith(b'_array_data.header_contents'): readingmode = 'PilatusHeader' elif hed[i].startswith(b'_array_data.data'): readingmode = 'CIFHeader' elif readingmode == 'PilatusHeader': if not hed[i].startswith(b'#'): continue line = hed[i].strip()[1:].strip() try: # try to interpret the line as the date. header['CBF_Date'] = dateutil.parser.parse(line) header['Date'] = header['CBF_Date'] continue except (ValueError, TypeError): # eat exception: if we cannot parse this line as a date, try # another format. pass treated = False for sep in (b':', b'='): if treated: continue if line.count(sep) == 1: name, value = tuple(x.strip() for x in line.split(sep, 1)) try: m = re.match( b'^(?P<number>-?(\d+(.\d+)?(e-?\d+)?))\s+(?P<unit>m|s|counts|eV)$', value).groupdict() value = float(m['number']) m['unit'] = str(m['unit'], encoding='utf-8') except AttributeError: # the regex did not match the string, thus re.match() # returned None. pass header[str(name, 'utf-8')] = value treated = True if treated: continue if line.startswith(b'Pixel_size'): header['XPixel'], header['YPixel'] = tuple( [float(a.strip().split(b' ')[0]) * 1000 for a in line[len(b'Pixel_size'):].split(b'x')]) else: try: m = re.match( b'^(?P<label>[a-zA-Z0-9,_\.\-!\?\ ]*?)\s+(?P<number>-?(\d+(.\d+)?(e-?\d+)?))\s+(?P<unit>m|s|counts|eV)$', line).groupdict() except AttributeError: pass else: m['label'] = str(m['label'], 'utf-8') m['unit'] = str(m['unit'], encoding='utf-8') if m['unit'] == b'counts': header[m['label']] = int(m['number']) else: header[m['label']] = float(m['number']) if 'sensor' in m['label'] and 'thickness' in m['label']: header[m['label']] *= 1e6 elif readingmode == 'CIFHeader': line = hed[i] for sep in (b':', b'='): if line.count(sep) == 1: label, content = tuple(x.strip() for x in line.split(sep, 1)) if b'"' in content: content = content.replace(b'"', b'') try: content = int(content) except ValueError: content = str(content, encoding='utf-8') header['CBF_' + str(label, encoding='utf-8')] = content else: pass ret = [] if load_data: if header['CBF_X-Binary-Element-Type'] != 'signed 32-bit integer': raise NotImplementedError( 'element type is not "signed 32-bit integer" in CBF, but %s.' % header['CBF_X-Binary-Element-Type']) if header['CBF_conversions'] != 'x-CBF_BYTE_OFFSET': raise NotImplementedError( 'compression is not "x-CBF_BYTE_OFFSET" in CBF!') dim1 = header['CBF_X-Binary-Size-Fastest-Dimension'] dim2 = header['CBF_X-Binary-Size-Second-Dimension'] nbytes = header['CBF_X-Binary-Size'] cbfdata = cbfdecompress( bytearray(cbfbin[datastart:datastart + nbytes]), dim1, dim2, for_nexus) ret.append(cbfdata) if load_header: ret.append(header) return tuple(ret)
python
def readcbf(name, load_header=False, load_data=True, for_nexus=False): """Read a cbf (crystallographic binary format) file from a Dectris PILATUS detector. Inputs ------ name: string the file name load_header: bool if the header data is to be loaded. load_data: bool if the binary data is to be loaded. for_nexus: bool if the array should be opened with NeXus ordering. Output ------ a numpy array of the scattering data Notes ----- currently only Little endian, "signed 32-bit integer" type and byte-offset compressed data are accepted. """ with open(name, 'rb') as f: cbfbin = f.read() datastart = cbfbin.find(b'\x0c\x1a\x04\xd5') + 4 hed = [x.strip() for x in cbfbin[:datastart].split(b'\n')] header = {} readingmode = None for i in range(len(hed)): if not hed[i]: # skip empty header lines continue elif hed[i] == b';': continue elif hed[i].startswith(b'_array_data.header_convention'): header['CBF_header_convention'] = str(hed[i][ len(b'_array_data.header_convention'):].strip().replace(b'"', b''), encoding='utf-8') elif hed[i].startswith(b'_array_data.header_contents'): readingmode = 'PilatusHeader' elif hed[i].startswith(b'_array_data.data'): readingmode = 'CIFHeader' elif readingmode == 'PilatusHeader': if not hed[i].startswith(b'#'): continue line = hed[i].strip()[1:].strip() try: # try to interpret the line as the date. header['CBF_Date'] = dateutil.parser.parse(line) header['Date'] = header['CBF_Date'] continue except (ValueError, TypeError): # eat exception: if we cannot parse this line as a date, try # another format. pass treated = False for sep in (b':', b'='): if treated: continue if line.count(sep) == 1: name, value = tuple(x.strip() for x in line.split(sep, 1)) try: m = re.match( b'^(?P<number>-?(\d+(.\d+)?(e-?\d+)?))\s+(?P<unit>m|s|counts|eV)$', value).groupdict() value = float(m['number']) m['unit'] = str(m['unit'], encoding='utf-8') except AttributeError: # the regex did not match the string, thus re.match() # returned None. pass header[str(name, 'utf-8')] = value treated = True if treated: continue if line.startswith(b'Pixel_size'): header['XPixel'], header['YPixel'] = tuple( [float(a.strip().split(b' ')[0]) * 1000 for a in line[len(b'Pixel_size'):].split(b'x')]) else: try: m = re.match( b'^(?P<label>[a-zA-Z0-9,_\.\-!\?\ ]*?)\s+(?P<number>-?(\d+(.\d+)?(e-?\d+)?))\s+(?P<unit>m|s|counts|eV)$', line).groupdict() except AttributeError: pass else: m['label'] = str(m['label'], 'utf-8') m['unit'] = str(m['unit'], encoding='utf-8') if m['unit'] == b'counts': header[m['label']] = int(m['number']) else: header[m['label']] = float(m['number']) if 'sensor' in m['label'] and 'thickness' in m['label']: header[m['label']] *= 1e6 elif readingmode == 'CIFHeader': line = hed[i] for sep in (b':', b'='): if line.count(sep) == 1: label, content = tuple(x.strip() for x in line.split(sep, 1)) if b'"' in content: content = content.replace(b'"', b'') try: content = int(content) except ValueError: content = str(content, encoding='utf-8') header['CBF_' + str(label, encoding='utf-8')] = content else: pass ret = [] if load_data: if header['CBF_X-Binary-Element-Type'] != 'signed 32-bit integer': raise NotImplementedError( 'element type is not "signed 32-bit integer" in CBF, but %s.' % header['CBF_X-Binary-Element-Type']) if header['CBF_conversions'] != 'x-CBF_BYTE_OFFSET': raise NotImplementedError( 'compression is not "x-CBF_BYTE_OFFSET" in CBF!') dim1 = header['CBF_X-Binary-Size-Fastest-Dimension'] dim2 = header['CBF_X-Binary-Size-Second-Dimension'] nbytes = header['CBF_X-Binary-Size'] cbfdata = cbfdecompress( bytearray(cbfbin[datastart:datastart + nbytes]), dim1, dim2, for_nexus) ret.append(cbfdata) if load_header: ret.append(header) return tuple(ret)
[ "def", "readcbf", "(", "name", ",", "load_header", "=", "False", ",", "load_data", "=", "True", ",", "for_nexus", "=", "False", ")", ":", "with", "open", "(", "name", ",", "'rb'", ")", "as", "f", ":", "cbfbin", "=", "f", ".", "read", "(", ")", "datastart", "=", "cbfbin", ".", "find", "(", "b'\\x0c\\x1a\\x04\\xd5'", ")", "+", "4", "hed", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "cbfbin", "[", ":", "datastart", "]", ".", "split", "(", "b'\\n'", ")", "]", "header", "=", "{", "}", "readingmode", "=", "None", "for", "i", "in", "range", "(", "len", "(", "hed", ")", ")", ":", "if", "not", "hed", "[", "i", "]", ":", "# skip empty header lines", "continue", "elif", "hed", "[", "i", "]", "==", "b';'", ":", "continue", "elif", "hed", "[", "i", "]", ".", "startswith", "(", "b'_array_data.header_convention'", ")", ":", "header", "[", "'CBF_header_convention'", "]", "=", "str", "(", "hed", "[", "i", "]", "[", "len", "(", "b'_array_data.header_convention'", ")", ":", "]", ".", "strip", "(", ")", ".", "replace", "(", "b'\"'", ",", "b''", ")", ",", "encoding", "=", "'utf-8'", ")", "elif", "hed", "[", "i", "]", ".", "startswith", "(", "b'_array_data.header_contents'", ")", ":", "readingmode", "=", "'PilatusHeader'", "elif", "hed", "[", "i", "]", ".", "startswith", "(", "b'_array_data.data'", ")", ":", "readingmode", "=", "'CIFHeader'", "elif", "readingmode", "==", "'PilatusHeader'", ":", "if", "not", "hed", "[", "i", "]", ".", "startswith", "(", "b'#'", ")", ":", "continue", "line", "=", "hed", "[", "i", "]", ".", "strip", "(", ")", "[", "1", ":", "]", ".", "strip", "(", ")", "try", ":", "# try to interpret the line as the date.", "header", "[", "'CBF_Date'", "]", "=", "dateutil", ".", "parser", ".", "parse", "(", "line", ")", "header", "[", "'Date'", "]", "=", "header", "[", "'CBF_Date'", "]", "continue", "except", "(", "ValueError", ",", "TypeError", ")", ":", "# eat exception: if we cannot parse this line as a date, try", "# another format.", "pass", "treated", "=", "False", "for", "sep", "in", "(", "b':'", ",", "b'='", ")", ":", "if", "treated", ":", "continue", "if", "line", ".", "count", "(", "sep", ")", "==", "1", ":", "name", ",", "value", "=", "tuple", "(", "x", ".", "strip", "(", ")", "for", "x", "in", "line", ".", "split", "(", "sep", ",", "1", ")", ")", "try", ":", "m", "=", "re", ".", "match", "(", "b'^(?P<number>-?(\\d+(.\\d+)?(e-?\\d+)?))\\s+(?P<unit>m|s|counts|eV)$'", ",", "value", ")", ".", "groupdict", "(", ")", "value", "=", "float", "(", "m", "[", "'number'", "]", ")", "m", "[", "'unit'", "]", "=", "str", "(", "m", "[", "'unit'", "]", ",", "encoding", "=", "'utf-8'", ")", "except", "AttributeError", ":", "# the regex did not match the string, thus re.match()", "# returned None.", "pass", "header", "[", "str", "(", "name", ",", "'utf-8'", ")", "]", "=", "value", "treated", "=", "True", "if", "treated", ":", "continue", "if", "line", ".", "startswith", "(", "b'Pixel_size'", ")", ":", "header", "[", "'XPixel'", "]", ",", "header", "[", "'YPixel'", "]", "=", "tuple", "(", "[", "float", "(", "a", ".", "strip", "(", ")", ".", "split", "(", "b' '", ")", "[", "0", "]", ")", "*", "1000", "for", "a", "in", "line", "[", "len", "(", "b'Pixel_size'", ")", ":", "]", ".", "split", "(", "b'x'", ")", "]", ")", "else", ":", "try", ":", "m", "=", "re", ".", "match", "(", "b'^(?P<label>[a-zA-Z0-9,_\\.\\-!\\?\\ ]*?)\\s+(?P<number>-?(\\d+(.\\d+)?(e-?\\d+)?))\\s+(?P<unit>m|s|counts|eV)$'", ",", "line", ")", ".", "groupdict", "(", ")", "except", "AttributeError", ":", "pass", "else", ":", "m", "[", "'label'", "]", "=", "str", "(", "m", "[", "'label'", "]", ",", "'utf-8'", ")", "m", "[", "'unit'", "]", "=", "str", "(", "m", "[", "'unit'", "]", ",", "encoding", "=", "'utf-8'", ")", "if", "m", "[", "'unit'", "]", "==", "b'counts'", ":", "header", "[", "m", "[", "'label'", "]", "]", "=", "int", "(", "m", "[", "'number'", "]", ")", "else", ":", "header", "[", "m", "[", "'label'", "]", "]", "=", "float", "(", "m", "[", "'number'", "]", ")", "if", "'sensor'", "in", "m", "[", "'label'", "]", "and", "'thickness'", "in", "m", "[", "'label'", "]", ":", "header", "[", "m", "[", "'label'", "]", "]", "*=", "1e6", "elif", "readingmode", "==", "'CIFHeader'", ":", "line", "=", "hed", "[", "i", "]", "for", "sep", "in", "(", "b':'", ",", "b'='", ")", ":", "if", "line", ".", "count", "(", "sep", ")", "==", "1", ":", "label", ",", "content", "=", "tuple", "(", "x", ".", "strip", "(", ")", "for", "x", "in", "line", ".", "split", "(", "sep", ",", "1", ")", ")", "if", "b'\"'", "in", "content", ":", "content", "=", "content", ".", "replace", "(", "b'\"'", ",", "b''", ")", "try", ":", "content", "=", "int", "(", "content", ")", "except", "ValueError", ":", "content", "=", "str", "(", "content", ",", "encoding", "=", "'utf-8'", ")", "header", "[", "'CBF_'", "+", "str", "(", "label", ",", "encoding", "=", "'utf-8'", ")", "]", "=", "content", "else", ":", "pass", "ret", "=", "[", "]", "if", "load_data", ":", "if", "header", "[", "'CBF_X-Binary-Element-Type'", "]", "!=", "'signed 32-bit integer'", ":", "raise", "NotImplementedError", "(", "'element type is not \"signed 32-bit integer\" in CBF, but %s.'", "%", "header", "[", "'CBF_X-Binary-Element-Type'", "]", ")", "if", "header", "[", "'CBF_conversions'", "]", "!=", "'x-CBF_BYTE_OFFSET'", ":", "raise", "NotImplementedError", "(", "'compression is not \"x-CBF_BYTE_OFFSET\" in CBF!'", ")", "dim1", "=", "header", "[", "'CBF_X-Binary-Size-Fastest-Dimension'", "]", "dim2", "=", "header", "[", "'CBF_X-Binary-Size-Second-Dimension'", "]", "nbytes", "=", "header", "[", "'CBF_X-Binary-Size'", "]", "cbfdata", "=", "cbfdecompress", "(", "bytearray", "(", "cbfbin", "[", "datastart", ":", "datastart", "+", "nbytes", "]", ")", ",", "dim1", ",", "dim2", ",", "for_nexus", ")", "ret", ".", "append", "(", "cbfdata", ")", "if", "load_header", ":", "ret", ".", "append", "(", "header", ")", "return", "tuple", "(", "ret", ")" ]
Read a cbf (crystallographic binary format) file from a Dectris PILATUS detector. Inputs ------ name: string the file name load_header: bool if the header data is to be loaded. load_data: bool if the binary data is to be loaded. for_nexus: bool if the array should be opened with NeXus ordering. Output ------ a numpy array of the scattering data Notes ----- currently only Little endian, "signed 32-bit integer" type and byte-offset compressed data are accepted.
[ "Read", "a", "cbf", "(", "crystallographic", "binary", "format", ")", "file", "from", "a", "Dectris", "PILATUS", "detector", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L87-L212
awacha/sastool
sastool/io/twodim.py
readbdfv1
def readbdfv1(filename, bdfext='.bdf', bhfext='.bhf'): """Read bdf file (Bessy Data Format v1) Input ----- filename: string the name of the file Output ------ the BDF structure in a dict Notes ----- This is an adaptation of the bdf_read.m macro of Sylvio Haas. """ return header.readbhfv1(filename, True, bdfext, bhfext)
python
def readbdfv1(filename, bdfext='.bdf', bhfext='.bhf'): """Read bdf file (Bessy Data Format v1) Input ----- filename: string the name of the file Output ------ the BDF structure in a dict Notes ----- This is an adaptation of the bdf_read.m macro of Sylvio Haas. """ return header.readbhfv1(filename, True, bdfext, bhfext)
[ "def", "readbdfv1", "(", "filename", ",", "bdfext", "=", "'.bdf'", ",", "bhfext", "=", "'.bhf'", ")", ":", "return", "header", ".", "readbhfv1", "(", "filename", ",", "True", ",", "bdfext", ",", "bhfext", ")" ]
Read bdf file (Bessy Data Format v1) Input ----- filename: string the name of the file Output ------ the BDF structure in a dict Notes ----- This is an adaptation of the bdf_read.m macro of Sylvio Haas.
[ "Read", "bdf", "file", "(", "Bessy", "Data", "Format", "v1", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L215-L231
awacha/sastool
sastool/io/twodim.py
readint2dnorm
def readint2dnorm(filename): """Read corrected intensity and error matrices (Matlab mat or numpy npz format for Beamline B1 (HASYLAB/DORISIII)) Input ----- filename: string the name of the file Outputs ------- two ``np.ndarray``-s, the Intensity and the Error matrices File formats supported: ----------------------- ``.mat`` Matlab MAT file, with (at least) two fields: Intensity and Error ``.npz`` Numpy zip file, with (at least) two fields: Intensity and Error other the file is opened with ``np.loadtxt``. The error matrix is tried to be loaded from the file ``<name>_error<ext>`` where the intensity was loaded from file ``<name><ext>``. I.e. if ``somedir/matrix.dat`` is given, the existence of ``somedir/matrix_error.dat`` is checked. If not found, None is returned for the error matrix. Notes ----- The non-existence of the Intensity matrix results in an exception. If the Error matrix does not exist, None is returned for it. """ # the core of read2dintfile if filename.upper().endswith('.MAT'): # Matlab m = scipy.io.loadmat(filename) elif filename.upper().endswith('.NPZ'): # Numpy m = np.load(filename) else: # loadtxt m = {'Intensity': np.loadtxt(filename)} name, ext = os.path.splitext(filename) errorfilename = name + '_error' + ext if os.path.exists(errorfilename): m['Error'] = np.loadtxt(errorfilename) Intensity = m['Intensity'] try: Error = m['Error'] return Intensity, Error except: return Intensity, None
python
def readint2dnorm(filename): """Read corrected intensity and error matrices (Matlab mat or numpy npz format for Beamline B1 (HASYLAB/DORISIII)) Input ----- filename: string the name of the file Outputs ------- two ``np.ndarray``-s, the Intensity and the Error matrices File formats supported: ----------------------- ``.mat`` Matlab MAT file, with (at least) two fields: Intensity and Error ``.npz`` Numpy zip file, with (at least) two fields: Intensity and Error other the file is opened with ``np.loadtxt``. The error matrix is tried to be loaded from the file ``<name>_error<ext>`` where the intensity was loaded from file ``<name><ext>``. I.e. if ``somedir/matrix.dat`` is given, the existence of ``somedir/matrix_error.dat`` is checked. If not found, None is returned for the error matrix. Notes ----- The non-existence of the Intensity matrix results in an exception. If the Error matrix does not exist, None is returned for it. """ # the core of read2dintfile if filename.upper().endswith('.MAT'): # Matlab m = scipy.io.loadmat(filename) elif filename.upper().endswith('.NPZ'): # Numpy m = np.load(filename) else: # loadtxt m = {'Intensity': np.loadtxt(filename)} name, ext = os.path.splitext(filename) errorfilename = name + '_error' + ext if os.path.exists(errorfilename): m['Error'] = np.loadtxt(errorfilename) Intensity = m['Intensity'] try: Error = m['Error'] return Intensity, Error except: return Intensity, None
[ "def", "readint2dnorm", "(", "filename", ")", ":", "# the core of read2dintfile", "if", "filename", ".", "upper", "(", ")", ".", "endswith", "(", "'.MAT'", ")", ":", "# Matlab", "m", "=", "scipy", ".", "io", ".", "loadmat", "(", "filename", ")", "elif", "filename", ".", "upper", "(", ")", ".", "endswith", "(", "'.NPZ'", ")", ":", "# Numpy", "m", "=", "np", ".", "load", "(", "filename", ")", "else", ":", "# loadtxt", "m", "=", "{", "'Intensity'", ":", "np", ".", "loadtxt", "(", "filename", ")", "}", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "errorfilename", "=", "name", "+", "'_error'", "+", "ext", "if", "os", ".", "path", ".", "exists", "(", "errorfilename", ")", ":", "m", "[", "'Error'", "]", "=", "np", ".", "loadtxt", "(", "errorfilename", ")", "Intensity", "=", "m", "[", "'Intensity'", "]", "try", ":", "Error", "=", "m", "[", "'Error'", "]", "return", "Intensity", ",", "Error", "except", ":", "return", "Intensity", ",", "None" ]
Read corrected intensity and error matrices (Matlab mat or numpy npz format for Beamline B1 (HASYLAB/DORISIII)) Input ----- filename: string the name of the file Outputs ------- two ``np.ndarray``-s, the Intensity and the Error matrices File formats supported: ----------------------- ``.mat`` Matlab MAT file, with (at least) two fields: Intensity and Error ``.npz`` Numpy zip file, with (at least) two fields: Intensity and Error other the file is opened with ``np.loadtxt``. The error matrix is tried to be loaded from the file ``<name>_error<ext>`` where the intensity was loaded from file ``<name><ext>``. I.e. if ``somedir/matrix.dat`` is given, the existence of ``somedir/matrix_error.dat`` is checked. If not found, None is returned for the error matrix. Notes ----- The non-existence of the Intensity matrix results in an exception. If the Error matrix does not exist, None is returned for it.
[ "Read", "corrected", "intensity", "and", "error", "matrices", "(", "Matlab", "mat", "or", "numpy", "npz", "format", "for", "Beamline", "B1", "(", "HASYLAB", "/", "DORISIII", "))" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L253-L303
awacha/sastool
sastool/io/twodim.py
writeint2dnorm
def writeint2dnorm(filename, Intensity, Error=None): """Save the intensity and error matrices to a file Inputs ------ filename: string the name of the file Intensity: np.ndarray the intensity matrix Error: np.ndarray, optional the error matrix (can be ``None``, if no error matrix is to be saved) Output ------ None """ whattosave = {'Intensity': Intensity} if Error is not None: whattosave['Error'] = Error if filename.upper().endswith('.NPZ'): np.savez(filename, **whattosave) elif filename.upper().endswith('.MAT'): scipy.io.savemat(filename, whattosave) else: # text file np.savetxt(filename, Intensity) if Error is not None: name, ext = os.path.splitext(filename) np.savetxt(name + '_error' + ext, Error)
python
def writeint2dnorm(filename, Intensity, Error=None): """Save the intensity and error matrices to a file Inputs ------ filename: string the name of the file Intensity: np.ndarray the intensity matrix Error: np.ndarray, optional the error matrix (can be ``None``, if no error matrix is to be saved) Output ------ None """ whattosave = {'Intensity': Intensity} if Error is not None: whattosave['Error'] = Error if filename.upper().endswith('.NPZ'): np.savez(filename, **whattosave) elif filename.upper().endswith('.MAT'): scipy.io.savemat(filename, whattosave) else: # text file np.savetxt(filename, Intensity) if Error is not None: name, ext = os.path.splitext(filename) np.savetxt(name + '_error' + ext, Error)
[ "def", "writeint2dnorm", "(", "filename", ",", "Intensity", ",", "Error", "=", "None", ")", ":", "whattosave", "=", "{", "'Intensity'", ":", "Intensity", "}", "if", "Error", "is", "not", "None", ":", "whattosave", "[", "'Error'", "]", "=", "Error", "if", "filename", ".", "upper", "(", ")", ".", "endswith", "(", "'.NPZ'", ")", ":", "np", ".", "savez", "(", "filename", ",", "*", "*", "whattosave", ")", "elif", "filename", ".", "upper", "(", ")", ".", "endswith", "(", "'.MAT'", ")", ":", "scipy", ".", "io", ".", "savemat", "(", "filename", ",", "whattosave", ")", "else", ":", "# text file", "np", ".", "savetxt", "(", "filename", ",", "Intensity", ")", "if", "Error", "is", "not", "None", ":", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "np", ".", "savetxt", "(", "name", "+", "'_error'", "+", "ext", ",", "Error", ")" ]
Save the intensity and error matrices to a file Inputs ------ filename: string the name of the file Intensity: np.ndarray the intensity matrix Error: np.ndarray, optional the error matrix (can be ``None``, if no error matrix is to be saved) Output ------ None
[ "Save", "the", "intensity", "and", "error", "matrices", "to", "a", "file" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L306-L333
awacha/sastool
sastool/io/twodim.py
readmask
def readmask(filename, fieldname=None): """Try to load a maskfile from a matlab(R) matrix file Inputs ------ filename: string the input file name fieldname: string, optional field in the mat file. None to autodetect. Outputs ------- the mask in a numpy array of type np.uint8 """ f = scipy.io.loadmat(filename) if fieldname is not None: return f[fieldname].astype(np.uint8) else: validkeys = [ k for k in list(f.keys()) if not (k.startswith('_') and k.endswith('_'))] if len(validkeys) < 1: raise ValueError('mask file contains no masks!') if len(validkeys) > 1: raise ValueError('mask file contains multiple masks!') return f[validkeys[0]].astype(np.uint8)
python
def readmask(filename, fieldname=None): """Try to load a maskfile from a matlab(R) matrix file Inputs ------ filename: string the input file name fieldname: string, optional field in the mat file. None to autodetect. Outputs ------- the mask in a numpy array of type np.uint8 """ f = scipy.io.loadmat(filename) if fieldname is not None: return f[fieldname].astype(np.uint8) else: validkeys = [ k for k in list(f.keys()) if not (k.startswith('_') and k.endswith('_'))] if len(validkeys) < 1: raise ValueError('mask file contains no masks!') if len(validkeys) > 1: raise ValueError('mask file contains multiple masks!') return f[validkeys[0]].astype(np.uint8)
[ "def", "readmask", "(", "filename", ",", "fieldname", "=", "None", ")", ":", "f", "=", "scipy", ".", "io", ".", "loadmat", "(", "filename", ")", "if", "fieldname", "is", "not", "None", ":", "return", "f", "[", "fieldname", "]", ".", "astype", "(", "np", ".", "uint8", ")", "else", ":", "validkeys", "=", "[", "k", "for", "k", "in", "list", "(", "f", ".", "keys", "(", ")", ")", "if", "not", "(", "k", ".", "startswith", "(", "'_'", ")", "and", "k", ".", "endswith", "(", "'_'", ")", ")", "]", "if", "len", "(", "validkeys", ")", "<", "1", ":", "raise", "ValueError", "(", "'mask file contains no masks!'", ")", "if", "len", "(", "validkeys", ")", ">", "1", ":", "raise", "ValueError", "(", "'mask file contains multiple masks!'", ")", "return", "f", "[", "validkeys", "[", "0", "]", "]", ".", "astype", "(", "np", ".", "uint8", ")" ]
Try to load a maskfile from a matlab(R) matrix file Inputs ------ filename: string the input file name fieldname: string, optional field in the mat file. None to autodetect. Outputs ------- the mask in a numpy array of type np.uint8
[ "Try", "to", "load", "a", "maskfile", "from", "a", "matlab", "(", "R", ")", "matrix", "file" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L336-L360
awacha/sastool
sastool/io/twodim.py
readedf
def readedf(filename): """Read an ESRF data file (measured at beamlines ID01 or ID02) Inputs ------ filename: string the input file name Output ------ the imported EDF structure in a dict. The scattering pattern is under key 'data'. Notes ----- Only datatype ``FloatValue`` is supported right now. """ edf = header.readehf(filename) f = open(filename, 'rb') f.read(edf['EDF_HeaderSize']) # skip header. if edf['DataType'] == 'FloatValue': dtype = np.float32 else: raise NotImplementedError( 'Not supported data type: %s' % edf['DataType']) edf['data'] = np.fromstring(f.read(edf['EDF_BinarySize']), dtype).reshape( edf['Dim_1'], edf['Dim_2']) return edf
python
def readedf(filename): """Read an ESRF data file (measured at beamlines ID01 or ID02) Inputs ------ filename: string the input file name Output ------ the imported EDF structure in a dict. The scattering pattern is under key 'data'. Notes ----- Only datatype ``FloatValue`` is supported right now. """ edf = header.readehf(filename) f = open(filename, 'rb') f.read(edf['EDF_HeaderSize']) # skip header. if edf['DataType'] == 'FloatValue': dtype = np.float32 else: raise NotImplementedError( 'Not supported data type: %s' % edf['DataType']) edf['data'] = np.fromstring(f.read(edf['EDF_BinarySize']), dtype).reshape( edf['Dim_1'], edf['Dim_2']) return edf
[ "def", "readedf", "(", "filename", ")", ":", "edf", "=", "header", ".", "readehf", "(", "filename", ")", "f", "=", "open", "(", "filename", ",", "'rb'", ")", "f", ".", "read", "(", "edf", "[", "'EDF_HeaderSize'", "]", ")", "# skip header.", "if", "edf", "[", "'DataType'", "]", "==", "'FloatValue'", ":", "dtype", "=", "np", ".", "float32", "else", ":", "raise", "NotImplementedError", "(", "'Not supported data type: %s'", "%", "edf", "[", "'DataType'", "]", ")", "edf", "[", "'data'", "]", "=", "np", ".", "fromstring", "(", "f", ".", "read", "(", "edf", "[", "'EDF_BinarySize'", "]", ")", ",", "dtype", ")", ".", "reshape", "(", "edf", "[", "'Dim_1'", "]", ",", "edf", "[", "'Dim_2'", "]", ")", "return", "edf" ]
Read an ESRF data file (measured at beamlines ID01 or ID02) Inputs ------ filename: string the input file name Output ------ the imported EDF structure in a dict. The scattering pattern is under key 'data'. Notes ----- Only datatype ``FloatValue`` is supported right now.
[ "Read", "an", "ESRF", "data", "file", "(", "measured", "at", "beamlines", "ID01", "or", "ID02", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L363-L390
awacha/sastool
sastool/io/twodim.py
readbdfv2
def readbdfv2(filename, bdfext='.bdf', bhfext='.bhf'): """Read a version 2 Bessy Data File Inputs ------ filename: string the name of the input file. One can give the complete header or datafile name or just the base name without the extensions. bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ the data structure in a dict. Header is loaded implicitely. Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are loaded. """ datas = header.readbhfv2(filename, True, bdfext, bhfext) return datas
python
def readbdfv2(filename, bdfext='.bdf', bhfext='.bhf'): """Read a version 2 Bessy Data File Inputs ------ filename: string the name of the input file. One can give the complete header or datafile name or just the base name without the extensions. bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ the data structure in a dict. Header is loaded implicitely. Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are loaded. """ datas = header.readbhfv2(filename, True, bdfext, bhfext) return datas
[ "def", "readbdfv2", "(", "filename", ",", "bdfext", "=", "'.bdf'", ",", "bhfext", "=", "'.bhf'", ")", ":", "datas", "=", "header", ".", "readbhfv2", "(", "filename", ",", "True", ",", "bdfext", ",", "bhfext", ")", "return", "datas" ]
Read a version 2 Bessy Data File Inputs ------ filename: string the name of the input file. One can give the complete header or datafile name or just the base name without the extensions. bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ the data structure in a dict. Header is loaded implicitely. Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are loaded.
[ "Read", "a", "version", "2", "Bessy", "Data", "File" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L393-L416
awacha/sastool
sastool/io/twodim.py
readmar
def readmar(filename): """Read a two-dimensional scattering pattern from a MarResearch .image file. """ hed = header.readmarheader(filename) with open(filename, 'rb') as f: h = f.read(hed['recordlength']) data = np.fromstring( f.read(2 * hed['Xsize'] * hed['Ysize']), '<u2').astype(np.float64) if hed['highintensitypixels'] > 0: raise NotImplementedError( 'Intensities over 65535 are not yet supported!') data = data.reshape(hed['Xsize'], hed['Ysize']) return data, hed
python
def readmar(filename): """Read a two-dimensional scattering pattern from a MarResearch .image file. """ hed = header.readmarheader(filename) with open(filename, 'rb') as f: h = f.read(hed['recordlength']) data = np.fromstring( f.read(2 * hed['Xsize'] * hed['Ysize']), '<u2').astype(np.float64) if hed['highintensitypixels'] > 0: raise NotImplementedError( 'Intensities over 65535 are not yet supported!') data = data.reshape(hed['Xsize'], hed['Ysize']) return data, hed
[ "def", "readmar", "(", "filename", ")", ":", "hed", "=", "header", ".", "readmarheader", "(", "filename", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "h", "=", "f", ".", "read", "(", "hed", "[", "'recordlength'", "]", ")", "data", "=", "np", ".", "fromstring", "(", "f", ".", "read", "(", "2", "*", "hed", "[", "'Xsize'", "]", "*", "hed", "[", "'Ysize'", "]", ")", ",", "'<u2'", ")", ".", "astype", "(", "np", ".", "float64", ")", "if", "hed", "[", "'highintensitypixels'", "]", ">", "0", ":", "raise", "NotImplementedError", "(", "'Intensities over 65535 are not yet supported!'", ")", "data", "=", "data", ".", "reshape", "(", "hed", "[", "'Xsize'", "]", ",", "hed", "[", "'Ysize'", "]", ")", "return", "data", ",", "hed" ]
Read a two-dimensional scattering pattern from a MarResearch .image file.
[ "Read", "a", "two", "-", "dimensional", "scattering", "pattern", "from", "a", "MarResearch", ".", "image", "file", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L423-L435
awacha/sastool
sastool/io/twodim.py
writebdfv2
def writebdfv2(filename, bdf, bdfext='.bdf', bhfext='.bhf'): """Write a version 2 Bessy Data File Inputs ------ filename: string the name of the output file. One can give the complete header or datafile name or just the base name without the extensions. bdf: dict the BDF structure (in the same format as loaded by ``readbdfv2()`` bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ None Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are saved. """ if filename.endswith(bdfext): basename = filename[:-len(bdfext)] elif filename.endswith(bhfext): basename = filename[:-len(bhfext)] else: basename = filename header.writebhfv2(basename + '.bhf', bdf) f = open(basename + '.bdf', 'wb') keys = ['RAWDATA', 'RAWERROR', 'CORRDATA', 'CORRERROR', 'NANDATA'] keys.extend( [x for x in list(bdf.keys()) if isinstance(bdf[x], np.ndarray) and x not in keys]) for k in keys: if k not in list(bdf.keys()): continue f.write('#%s[%d:%d]\n' % (k, bdf['xdim'], bdf['ydim'])) f.write(np.rot90(bdf[k], 3).astype('float32').tostring(order='F')) f.close()
python
def writebdfv2(filename, bdf, bdfext='.bdf', bhfext='.bhf'): """Write a version 2 Bessy Data File Inputs ------ filename: string the name of the output file. One can give the complete header or datafile name or just the base name without the extensions. bdf: dict the BDF structure (in the same format as loaded by ``readbdfv2()`` bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ None Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are saved. """ if filename.endswith(bdfext): basename = filename[:-len(bdfext)] elif filename.endswith(bhfext): basename = filename[:-len(bhfext)] else: basename = filename header.writebhfv2(basename + '.bhf', bdf) f = open(basename + '.bdf', 'wb') keys = ['RAWDATA', 'RAWERROR', 'CORRDATA', 'CORRERROR', 'NANDATA'] keys.extend( [x for x in list(bdf.keys()) if isinstance(bdf[x], np.ndarray) and x not in keys]) for k in keys: if k not in list(bdf.keys()): continue f.write('#%s[%d:%d]\n' % (k, bdf['xdim'], bdf['ydim'])) f.write(np.rot90(bdf[k], 3).astype('float32').tostring(order='F')) f.close()
[ "def", "writebdfv2", "(", "filename", ",", "bdf", ",", "bdfext", "=", "'.bdf'", ",", "bhfext", "=", "'.bhf'", ")", ":", "if", "filename", ".", "endswith", "(", "bdfext", ")", ":", "basename", "=", "filename", "[", ":", "-", "len", "(", "bdfext", ")", "]", "elif", "filename", ".", "endswith", "(", "bhfext", ")", ":", "basename", "=", "filename", "[", ":", "-", "len", "(", "bhfext", ")", "]", "else", ":", "basename", "=", "filename", "header", ".", "writebhfv2", "(", "basename", "+", "'.bhf'", ",", "bdf", ")", "f", "=", "open", "(", "basename", "+", "'.bdf'", ",", "'wb'", ")", "keys", "=", "[", "'RAWDATA'", ",", "'RAWERROR'", ",", "'CORRDATA'", ",", "'CORRERROR'", ",", "'NANDATA'", "]", "keys", ".", "extend", "(", "[", "x", "for", "x", "in", "list", "(", "bdf", ".", "keys", "(", ")", ")", "if", "isinstance", "(", "bdf", "[", "x", "]", ",", "np", ".", "ndarray", ")", "and", "x", "not", "in", "keys", "]", ")", "for", "k", "in", "keys", ":", "if", "k", "not", "in", "list", "(", "bdf", ".", "keys", "(", ")", ")", ":", "continue", "f", ".", "write", "(", "'#%s[%d:%d]\\n'", "%", "(", "k", ",", "bdf", "[", "'xdim'", "]", ",", "bdf", "[", "'ydim'", "]", ")", ")", "f", ".", "write", "(", "np", ".", "rot90", "(", "bdf", "[", "k", "]", ",", "3", ")", ".", "astype", "(", "'float32'", ")", ".", "tostring", "(", "order", "=", "'F'", ")", ")", "f", ".", "close", "(", ")" ]
Write a version 2 Bessy Data File Inputs ------ filename: string the name of the output file. One can give the complete header or datafile name or just the base name without the extensions. bdf: dict the BDF structure (in the same format as loaded by ``readbdfv2()`` bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ None Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are saved.
[ "Write", "a", "version", "2", "Bessy", "Data", "File" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L438-L478
awacha/sastool
sastool/io/twodim.py
rebinmask
def rebinmask(mask, binx, biny, enlarge=False): """Re-bin (shrink or enlarge) a mask matrix. Inputs ------ mask: np.ndarray mask matrix. binx: integer binning along the 0th axis biny: integer binning along the 1st axis enlarge: bool, optional direction of binning. If True, the matrix will be enlarged, otherwise shrinked (this is the default) Output ------ the binned mask matrix, of shape ``M/binx`` times ``N/biny`` or ``M*binx`` times ``N*biny``, depending on the value of ``enlarge`` (if ``mask`` is ``M`` times ``N`` pixels). Notes ----- one is nonmasked, zero is masked. """ if not enlarge and ((mask.shape[0] % binx) or (mask.shape[1] % biny)): raise ValueError( 'The number of pixels of the mask matrix should be divisible by the binning in each direction!') if enlarge: return mask.repeat(binx, axis=0).repeat(biny, axis=1) else: return mask[::binx, ::biny]
python
def rebinmask(mask, binx, biny, enlarge=False): """Re-bin (shrink or enlarge) a mask matrix. Inputs ------ mask: np.ndarray mask matrix. binx: integer binning along the 0th axis biny: integer binning along the 1st axis enlarge: bool, optional direction of binning. If True, the matrix will be enlarged, otherwise shrinked (this is the default) Output ------ the binned mask matrix, of shape ``M/binx`` times ``N/biny`` or ``M*binx`` times ``N*biny``, depending on the value of ``enlarge`` (if ``mask`` is ``M`` times ``N`` pixels). Notes ----- one is nonmasked, zero is masked. """ if not enlarge and ((mask.shape[0] % binx) or (mask.shape[1] % biny)): raise ValueError( 'The number of pixels of the mask matrix should be divisible by the binning in each direction!') if enlarge: return mask.repeat(binx, axis=0).repeat(biny, axis=1) else: return mask[::binx, ::biny]
[ "def", "rebinmask", "(", "mask", ",", "binx", ",", "biny", ",", "enlarge", "=", "False", ")", ":", "if", "not", "enlarge", "and", "(", "(", "mask", ".", "shape", "[", "0", "]", "%", "binx", ")", "or", "(", "mask", ".", "shape", "[", "1", "]", "%", "biny", ")", ")", ":", "raise", "ValueError", "(", "'The number of pixels of the mask matrix should be divisible by the binning in each direction!'", ")", "if", "enlarge", ":", "return", "mask", ".", "repeat", "(", "binx", ",", "axis", "=", "0", ")", ".", "repeat", "(", "biny", ",", "axis", "=", "1", ")", "else", ":", "return", "mask", "[", ":", ":", "binx", ",", ":", ":", "biny", "]" ]
Re-bin (shrink or enlarge) a mask matrix. Inputs ------ mask: np.ndarray mask matrix. binx: integer binning along the 0th axis biny: integer binning along the 1st axis enlarge: bool, optional direction of binning. If True, the matrix will be enlarged, otherwise shrinked (this is the default) Output ------ the binned mask matrix, of shape ``M/binx`` times ``N/biny`` or ``M*binx`` times ``N*biny``, depending on the value of ``enlarge`` (if ``mask`` is ``M`` times ``N`` pixels). Notes ----- one is nonmasked, zero is masked.
[ "Re", "-", "bin", "(", "shrink", "or", "enlarge", ")", "a", "mask", "matrix", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/twodim.py#L481-L512
ClearcodeHQ/querystringsafe_base64
src/querystringsafe_base64/__init__.py
fill_padding
def fill_padding(padded_string): # type: (bytes) -> bytes """ Fill up missing padding in a string. This function makes sure that the string has length which is multiplication of 4, and if not, fills the missing places with dots. :param str padded_string: string to be decoded that might miss padding dots. :return: properly padded string :rtype: str """ length = len(padded_string) reminder = len(padded_string) % 4 if reminder: return padded_string.ljust(length + 4 - reminder, b'.') return padded_string
python
def fill_padding(padded_string): # type: (bytes) -> bytes """ Fill up missing padding in a string. This function makes sure that the string has length which is multiplication of 4, and if not, fills the missing places with dots. :param str padded_string: string to be decoded that might miss padding dots. :return: properly padded string :rtype: str """ length = len(padded_string) reminder = len(padded_string) % 4 if reminder: return padded_string.ljust(length + 4 - reminder, b'.') return padded_string
[ "def", "fill_padding", "(", "padded_string", ")", ":", "# type: (bytes) -> bytes", "length", "=", "len", "(", "padded_string", ")", "reminder", "=", "len", "(", "padded_string", ")", "%", "4", "if", "reminder", ":", "return", "padded_string", ".", "ljust", "(", "length", "+", "4", "-", "reminder", ",", "b'.'", ")", "return", "padded_string" ]
Fill up missing padding in a string. This function makes sure that the string has length which is multiplication of 4, and if not, fills the missing places with dots. :param str padded_string: string to be decoded that might miss padding dots. :return: properly padded string :rtype: str
[ "Fill", "up", "missing", "padding", "in", "a", "string", "." ]
train
https://github.com/ClearcodeHQ/querystringsafe_base64/blob/5353c4a8e275435d9d38356a0db64e5c43198ccd/src/querystringsafe_base64/__init__.py#L27-L43
ClearcodeHQ/querystringsafe_base64
src/querystringsafe_base64/__init__.py
decode
def decode(encoded): # type: (bytes) -> bytes """ Decode the result of querystringsafe_base64_encode or a regular base64. .. note :: As a regular base64 string does not contain dots, replacing dots with equal signs does basically noting to it. Also, base64.urlsafe_b64decode allows to decode both safe and unsafe base64. Therefore this function may also be used to decode the regular base64. :param (str, unicode) encoded: querystringsafe_base64 string or unicode :rtype: str, bytes :return: decoded string """ padded_string = fill_padding(encoded) return urlsafe_b64decode(padded_string.replace(b'.', b'='))
python
def decode(encoded): # type: (bytes) -> bytes """ Decode the result of querystringsafe_base64_encode or a regular base64. .. note :: As a regular base64 string does not contain dots, replacing dots with equal signs does basically noting to it. Also, base64.urlsafe_b64decode allows to decode both safe and unsafe base64. Therefore this function may also be used to decode the regular base64. :param (str, unicode) encoded: querystringsafe_base64 string or unicode :rtype: str, bytes :return: decoded string """ padded_string = fill_padding(encoded) return urlsafe_b64decode(padded_string.replace(b'.', b'='))
[ "def", "decode", "(", "encoded", ")", ":", "# type: (bytes) -> bytes", "padded_string", "=", "fill_padding", "(", "encoded", ")", "return", "urlsafe_b64decode", "(", "padded_string", ".", "replace", "(", "b'.'", ",", "b'='", ")", ")" ]
Decode the result of querystringsafe_base64_encode or a regular base64. .. note :: As a regular base64 string does not contain dots, replacing dots with equal signs does basically noting to it. Also, base64.urlsafe_b64decode allows to decode both safe and unsafe base64. Therefore this function may also be used to decode the regular base64. :param (str, unicode) encoded: querystringsafe_base64 string or unicode :rtype: str, bytes :return: decoded string
[ "Decode", "the", "result", "of", "querystringsafe_base64_encode", "or", "a", "regular", "base64", "." ]
train
https://github.com/ClearcodeHQ/querystringsafe_base64/blob/5353c4a8e275435d9d38356a0db64e5c43198ccd/src/querystringsafe_base64/__init__.py#L63-L79
awacha/sastool
sastool/misc/utils.py
normalize_listargument
def normalize_listargument(arg): """Check if arg is an iterable (list, tuple, set, dict, np.ndarray, except string!). If not, make a list of it. Numpy arrays are flattened and converted to lists.""" if isinstance(arg, np.ndarray): return arg.flatten() if isinstance(arg, str): return [arg] if isinstance(arg, list) or isinstance(arg, tuple) or isinstance(arg, dict) or isinstance(arg, set): return list(arg) return [arg]
python
def normalize_listargument(arg): """Check if arg is an iterable (list, tuple, set, dict, np.ndarray, except string!). If not, make a list of it. Numpy arrays are flattened and converted to lists.""" if isinstance(arg, np.ndarray): return arg.flatten() if isinstance(arg, str): return [arg] if isinstance(arg, list) or isinstance(arg, tuple) or isinstance(arg, dict) or isinstance(arg, set): return list(arg) return [arg]
[ "def", "normalize_listargument", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "np", ".", "ndarray", ")", ":", "return", "arg", ".", "flatten", "(", ")", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "return", "[", "arg", "]", "if", "isinstance", "(", "arg", ",", "list", ")", "or", "isinstance", "(", "arg", ",", "tuple", ")", "or", "isinstance", "(", "arg", ",", "dict", ")", "or", "isinstance", "(", "arg", ",", "set", ")", ":", "return", "list", "(", "arg", ")", "return", "[", "arg", "]" ]
Check if arg is an iterable (list, tuple, set, dict, np.ndarray, except string!). If not, make a list of it. Numpy arrays are flattened and converted to lists.
[ "Check", "if", "arg", "is", "an", "iterable", "(", "list", "tuple", "set", "dict", "np", ".", "ndarray", "except", "string!", ")", ".", "If", "not", "make", "a", "list", "of", "it", ".", "Numpy", "arrays", "are", "flattened", "and", "converted", "to", "lists", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/utils.py#L27-L37
awacha/sastool
sastool/misc/utils.py
parse_number
def parse_number(val, use_dateutilparser=False): """Try to auto-detect the numeric type of the value. First a conversion to int is tried. If this fails float is tried, and if that fails too, unicode() is executed. If this also fails, a ValueError is raised. """ if use_dateutilparser: funcs = [int, float, parse_list_from_string, dateutil.parser.parse, str] else: funcs = [int, float, parse_list_from_string, str] if (val.strip().startswith("'") and val.strip().endswith("'")) or (val.strip().startswith('"') and val.strip().endswith('"')): return val[1:-1] for f in funcs: try: return f(val) # eat exception except (ValueError, UnicodeEncodeError, UnicodeDecodeError) as ve: pass raise ValueError('Cannot parse number:', val)
python
def parse_number(val, use_dateutilparser=False): """Try to auto-detect the numeric type of the value. First a conversion to int is tried. If this fails float is tried, and if that fails too, unicode() is executed. If this also fails, a ValueError is raised. """ if use_dateutilparser: funcs = [int, float, parse_list_from_string, dateutil.parser.parse, str] else: funcs = [int, float, parse_list_from_string, str] if (val.strip().startswith("'") and val.strip().endswith("'")) or (val.strip().startswith('"') and val.strip().endswith('"')): return val[1:-1] for f in funcs: try: return f(val) # eat exception except (ValueError, UnicodeEncodeError, UnicodeDecodeError) as ve: pass raise ValueError('Cannot parse number:', val)
[ "def", "parse_number", "(", "val", ",", "use_dateutilparser", "=", "False", ")", ":", "if", "use_dateutilparser", ":", "funcs", "=", "[", "int", ",", "float", ",", "parse_list_from_string", ",", "dateutil", ".", "parser", ".", "parse", ",", "str", "]", "else", ":", "funcs", "=", "[", "int", ",", "float", ",", "parse_list_from_string", ",", "str", "]", "if", "(", "val", ".", "strip", "(", ")", ".", "startswith", "(", "\"'\"", ")", "and", "val", ".", "strip", "(", ")", ".", "endswith", "(", "\"'\"", ")", ")", "or", "(", "val", ".", "strip", "(", ")", ".", "startswith", "(", "'\"'", ")", "and", "val", ".", "strip", "(", ")", ".", "endswith", "(", "'\"'", ")", ")", ":", "return", "val", "[", "1", ":", "-", "1", "]", "for", "f", "in", "funcs", ":", "try", ":", "return", "f", "(", "val", ")", "# eat exception", "except", "(", "ValueError", ",", "UnicodeEncodeError", ",", "UnicodeDecodeError", ")", "as", "ve", ":", "pass", "raise", "ValueError", "(", "'Cannot parse number:'", ",", "val", ")" ]
Try to auto-detect the numeric type of the value. First a conversion to int is tried. If this fails float is tried, and if that fails too, unicode() is executed. If this also fails, a ValueError is raised.
[ "Try", "to", "auto", "-", "detect", "the", "numeric", "type", "of", "the", "value", ".", "First", "a", "conversion", "to", "int", "is", "tried", ".", "If", "this", "fails", "float", "is", "tried", "and", "if", "that", "fails", "too", "unicode", "()", "is", "executed", ".", "If", "this", "also", "fails", "a", "ValueError", "is", "raised", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/utils.py#L40-L58
awacha/sastool
sastool/misc/utils.py
flatten_hierarchical_dict
def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None): """Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """ if max_recursion_depth is not None and max_recursion_depth <= 0: # we reached the maximum recursion depth, refuse to go further return original_dict if max_recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = max_recursion_depth - 1 dict1 = {} for k in original_dict: if not isinstance(original_dict[k], dict): dict1[k] = original_dict[k] else: dict_recursed = flatten_hierarchical_dict( original_dict[k], separator, next_recursion_depth) dict1.update( dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed])) return dict1
python
def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None): """Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """ if max_recursion_depth is not None and max_recursion_depth <= 0: # we reached the maximum recursion depth, refuse to go further return original_dict if max_recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = max_recursion_depth - 1 dict1 = {} for k in original_dict: if not isinstance(original_dict[k], dict): dict1[k] = original_dict[k] else: dict_recursed = flatten_hierarchical_dict( original_dict[k], separator, next_recursion_depth) dict1.update( dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed])) return dict1
[ "def", "flatten_hierarchical_dict", "(", "original_dict", ",", "separator", "=", "'.'", ",", "max_recursion_depth", "=", "None", ")", ":", "if", "max_recursion_depth", "is", "not", "None", "and", "max_recursion_depth", "<=", "0", ":", "# we reached the maximum recursion depth, refuse to go further", "return", "original_dict", "if", "max_recursion_depth", "is", "None", ":", "next_recursion_depth", "=", "None", "else", ":", "next_recursion_depth", "=", "max_recursion_depth", "-", "1", "dict1", "=", "{", "}", "for", "k", "in", "original_dict", ":", "if", "not", "isinstance", "(", "original_dict", "[", "k", "]", ",", "dict", ")", ":", "dict1", "[", "k", "]", "=", "original_dict", "[", "k", "]", "else", ":", "dict_recursed", "=", "flatten_hierarchical_dict", "(", "original_dict", "[", "k", "]", ",", "separator", ",", "next_recursion_depth", ")", "dict1", ".", "update", "(", "dict", "(", "[", "(", "k", "+", "separator", "+", "x", ",", "dict_recursed", "[", "x", "]", ")", "for", "x", "in", "dict_recursed", "]", ")", ")", "return", "dict1" ]
Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported.
[ "Flatten", "a", "dict", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/utils.py#L61-L107
awacha/sastool
sastool/misc/utils.py
random_str
def random_str(Nchars=6, randstrbase='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'): """Return a random string of <Nchars> characters. Characters are sampled uniformly from <randstrbase>. """ return ''.join([randstrbase[random.randint(0, len(randstrbase) - 1)] for i in range(Nchars)])
python
def random_str(Nchars=6, randstrbase='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'): """Return a random string of <Nchars> characters. Characters are sampled uniformly from <randstrbase>. """ return ''.join([randstrbase[random.randint(0, len(randstrbase) - 1)] for i in range(Nchars)])
[ "def", "random_str", "(", "Nchars", "=", "6", ",", "randstrbase", "=", "'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'", ")", ":", "return", "''", ".", "join", "(", "[", "randstrbase", "[", "random", ".", "randint", "(", "0", ",", "len", "(", "randstrbase", ")", "-", "1", ")", "]", "for", "i", "in", "range", "(", "Nchars", ")", "]", ")" ]
Return a random string of <Nchars> characters. Characters are sampled uniformly from <randstrbase>.
[ "Return", "a", "random", "string", "of", "<Nchars", ">", "characters", ".", "Characters", "are", "sampled", "uniformly", "from", "<randstrbase", ">", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/utils.py#L115-L119
awacha/sastool
sastool/io/statistics.py
listB1
def listB1(fsns, xlsname, dirs, whattolist = None, headerformat = 'org_%05d.header'): """ getsamplenames revisited, XLS output. Inputs: fsns: FSN sequence xlsname: XLS file name to output listing dirs: either a single directory (string) or a list of directories, a la readheader() whattolist: format specifier for listing. Should be a list of tuples. Each tuple corresponds to a column in the worksheet, in sequence. The first element of each tuple is the column title, eg. 'Distance' or 'Calibrated energy (eV)'. The second element is either the corresponding field in the header dictionary ('Dist' or 'EnergyCalibrated'), or a tuple of them, eg. ('FSN', 'Title', 'Energy'). If the column-descriptor tuple does not have a third element, the string representation of each field (str(param[i][fieldname])) will be written in the corresponding cell. If a third element is present, it is treated as a format string, and the values of the fields are substituted. headerformat: C-style format string of header file names (e.g. org_%05d.header) Outputs: an XLS workbook is saved. Notes: if whattolist is not specified exactly (ie. is None), then the output is similar to getsamplenames(). module xlwt is needed in order for this function to work. If it cannot be imported, the other functions may work, only this function will raise a NotImplementedError. """ if whattolist is None: whattolist = [('FSN', 'FSN'), ('Time', 'MeasTime'), ('Energy', 'Energy'), ('Distance', 'Dist'), ('Position', 'PosSample'), ('Transmission', 'Transm'), ('Temperature', 'Temperature'), ('Title', 'Title'), ('Date', ('Day', 'Month', 'Year', 'Hour', 'Minutes'), '%02d.%02d.%04d %02d:%02d')] wb = xlwt.Workbook(encoding = 'utf8') ws = wb.add_sheet('Measurements') for i in range(len(whattolist)): ws.write(0, i, whattolist[i][0]) i = 1 for fsn in fsns: try: hed = readB1header(findfileindirs(headerformat % fsn, dirs)) except IOError: continue # for each param structure create a line in the table for j in range(len(whattolist)): # for each parameter to be listed, create a column if np.isscalar(whattolist[j][1]): # if the parameter is a scalar, make it a list fields = tuple([whattolist[j][1]]) else: fields = whattolist[j][1] if len(whattolist[j]) == 2: if len(fields) >= 2: strtowrite = ''.join([str(hed[f]) for f in fields]) else: strtowrite = hed[fields[0]] elif len(whattolist[j]) >= 3: strtowrite = whattolist[j][2] % tuple([hed[f] for f in fields]) else: assert False ws.write(i, j, strtowrite) i += 1 wb.save(xlsname)
python
def listB1(fsns, xlsname, dirs, whattolist = None, headerformat = 'org_%05d.header'): """ getsamplenames revisited, XLS output. Inputs: fsns: FSN sequence xlsname: XLS file name to output listing dirs: either a single directory (string) or a list of directories, a la readheader() whattolist: format specifier for listing. Should be a list of tuples. Each tuple corresponds to a column in the worksheet, in sequence. The first element of each tuple is the column title, eg. 'Distance' or 'Calibrated energy (eV)'. The second element is either the corresponding field in the header dictionary ('Dist' or 'EnergyCalibrated'), or a tuple of them, eg. ('FSN', 'Title', 'Energy'). If the column-descriptor tuple does not have a third element, the string representation of each field (str(param[i][fieldname])) will be written in the corresponding cell. If a third element is present, it is treated as a format string, and the values of the fields are substituted. headerformat: C-style format string of header file names (e.g. org_%05d.header) Outputs: an XLS workbook is saved. Notes: if whattolist is not specified exactly (ie. is None), then the output is similar to getsamplenames(). module xlwt is needed in order for this function to work. If it cannot be imported, the other functions may work, only this function will raise a NotImplementedError. """ if whattolist is None: whattolist = [('FSN', 'FSN'), ('Time', 'MeasTime'), ('Energy', 'Energy'), ('Distance', 'Dist'), ('Position', 'PosSample'), ('Transmission', 'Transm'), ('Temperature', 'Temperature'), ('Title', 'Title'), ('Date', ('Day', 'Month', 'Year', 'Hour', 'Minutes'), '%02d.%02d.%04d %02d:%02d')] wb = xlwt.Workbook(encoding = 'utf8') ws = wb.add_sheet('Measurements') for i in range(len(whattolist)): ws.write(0, i, whattolist[i][0]) i = 1 for fsn in fsns: try: hed = readB1header(findfileindirs(headerformat % fsn, dirs)) except IOError: continue # for each param structure create a line in the table for j in range(len(whattolist)): # for each parameter to be listed, create a column if np.isscalar(whattolist[j][1]): # if the parameter is a scalar, make it a list fields = tuple([whattolist[j][1]]) else: fields = whattolist[j][1] if len(whattolist[j]) == 2: if len(fields) >= 2: strtowrite = ''.join([str(hed[f]) for f in fields]) else: strtowrite = hed[fields[0]] elif len(whattolist[j]) >= 3: strtowrite = whattolist[j][2] % tuple([hed[f] for f in fields]) else: assert False ws.write(i, j, strtowrite) i += 1 wb.save(xlsname)
[ "def", "listB1", "(", "fsns", ",", "xlsname", ",", "dirs", ",", "whattolist", "=", "None", ",", "headerformat", "=", "'org_%05d.header'", ")", ":", "if", "whattolist", "is", "None", ":", "whattolist", "=", "[", "(", "'FSN'", ",", "'FSN'", ")", ",", "(", "'Time'", ",", "'MeasTime'", ")", ",", "(", "'Energy'", ",", "'Energy'", ")", ",", "(", "'Distance'", ",", "'Dist'", ")", ",", "(", "'Position'", ",", "'PosSample'", ")", ",", "(", "'Transmission'", ",", "'Transm'", ")", ",", "(", "'Temperature'", ",", "'Temperature'", ")", ",", "(", "'Title'", ",", "'Title'", ")", ",", "(", "'Date'", ",", "(", "'Day'", ",", "'Month'", ",", "'Year'", ",", "'Hour'", ",", "'Minutes'", ")", ",", "'%02d.%02d.%04d %02d:%02d'", ")", "]", "wb", "=", "xlwt", ".", "Workbook", "(", "encoding", "=", "'utf8'", ")", "ws", "=", "wb", ".", "add_sheet", "(", "'Measurements'", ")", "for", "i", "in", "range", "(", "len", "(", "whattolist", ")", ")", ":", "ws", ".", "write", "(", "0", ",", "i", ",", "whattolist", "[", "i", "]", "[", "0", "]", ")", "i", "=", "1", "for", "fsn", "in", "fsns", ":", "try", ":", "hed", "=", "readB1header", "(", "findfileindirs", "(", "headerformat", "%", "fsn", ",", "dirs", ")", ")", "except", "IOError", ":", "continue", "# for each param structure create a line in the table", "for", "j", "in", "range", "(", "len", "(", "whattolist", ")", ")", ":", "# for each parameter to be listed, create a column", "if", "np", ".", "isscalar", "(", "whattolist", "[", "j", "]", "[", "1", "]", ")", ":", "# if the parameter is a scalar, make it a list", "fields", "=", "tuple", "(", "[", "whattolist", "[", "j", "]", "[", "1", "]", "]", ")", "else", ":", "fields", "=", "whattolist", "[", "j", "]", "[", "1", "]", "if", "len", "(", "whattolist", "[", "j", "]", ")", "==", "2", ":", "if", "len", "(", "fields", ")", ">=", "2", ":", "strtowrite", "=", "''", ".", "join", "(", "[", "str", "(", "hed", "[", "f", "]", ")", "for", "f", "in", "fields", "]", ")", "else", ":", "strtowrite", "=", "hed", "[", "fields", "[", "0", "]", "]", "elif", "len", "(", "whattolist", "[", "j", "]", ")", ">=", "3", ":", "strtowrite", "=", "whattolist", "[", "j", "]", "[", "2", "]", "%", "tuple", "(", "[", "hed", "[", "f", "]", "for", "f", "in", "fields", "]", ")", "else", ":", "assert", "False", "ws", ".", "write", "(", "i", ",", "j", ",", "strtowrite", ")", "i", "+=", "1", "wb", ".", "save", "(", "xlsname", ")" ]
getsamplenames revisited, XLS output. Inputs: fsns: FSN sequence xlsname: XLS file name to output listing dirs: either a single directory (string) or a list of directories, a la readheader() whattolist: format specifier for listing. Should be a list of tuples. Each tuple corresponds to a column in the worksheet, in sequence. The first element of each tuple is the column title, eg. 'Distance' or 'Calibrated energy (eV)'. The second element is either the corresponding field in the header dictionary ('Dist' or 'EnergyCalibrated'), or a tuple of them, eg. ('FSN', 'Title', 'Energy'). If the column-descriptor tuple does not have a third element, the string representation of each field (str(param[i][fieldname])) will be written in the corresponding cell. If a third element is present, it is treated as a format string, and the values of the fields are substituted. headerformat: C-style format string of header file names (e.g. org_%05d.header) Outputs: an XLS workbook is saved. Notes: if whattolist is not specified exactly (ie. is None), then the output is similar to getsamplenames(). module xlwt is needed in order for this function to work. If it cannot be imported, the other functions may work, only this function will raise a NotImplementedError.
[ "getsamplenames", "revisited", "XLS", "output", ".", "Inputs", ":", "fsns", ":", "FSN", "sequence", "xlsname", ":", "XLS", "file", "name", "to", "output", "listing", "dirs", ":", "either", "a", "single", "directory", "(", "string", ")", "or", "a", "list", "of", "directories", "a", "la", "readheader", "()", "whattolist", ":", "format", "specifier", "for", "listing", ".", "Should", "be", "a", "list", "of", "tuples", ".", "Each", "tuple", "corresponds", "to", "a", "column", "in", "the", "worksheet", "in", "sequence", ".", "The", "first", "element", "of", "each", "tuple", "is", "the", "column", "title", "eg", ".", "Distance", "or", "Calibrated", "energy", "(", "eV", ")", ".", "The", "second", "element", "is", "either", "the", "corresponding", "field", "in", "the", "header", "dictionary", "(", "Dist", "or", "EnergyCalibrated", ")", "or", "a", "tuple", "of", "them", "eg", ".", "(", "FSN", "Title", "Energy", ")", ".", "If", "the", "column", "-", "descriptor", "tuple", "does", "not", "have", "a", "third", "element", "the", "string", "representation", "of", "each", "field", "(", "str", "(", "param", "[", "i", "]", "[", "fieldname", "]", "))", "will", "be", "written", "in", "the", "corresponding", "cell", ".", "If", "a", "third", "element", "is", "present", "it", "is", "treated", "as", "a", "format", "string", "and", "the", "values", "of", "the", "fields", "are", "substituted", ".", "headerformat", ":", "C", "-", "style", "format", "string", "of", "header", "file", "names", "(", "e", ".", "g", ".", "org_%05d", ".", "header", ")", "Outputs", ":", "an", "XLS", "workbook", "is", "saved", ".", "Notes", ":", "if", "whattolist", "is", "not", "specified", "exactly", "(", "ie", ".", "is", "None", ")", "then", "the", "output", "is", "similar", "to", "getsamplenames", "()", ".", "module", "xlwt", "is", "needed", "in", "order", "for", "this", "function", "to", "work", ".", "If", "it", "cannot", "be", "imported", "the", "other", "functions", "may", "work", "only", "this", "function", "will", "raise", "a", "NotImplementedError", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/statistics.py#L105-L167
awacha/sastool
sastool/fitting/standalone.py
fit_shullroess
def fit_shullroess(q, Intensity, Error, R0=None, r=None): """Do a Shull-Roess fitting on the scattering data. Inputs: q: np.ndarray[ndim=1] vector of the q values (4*pi*sin(theta)/lambda) Intensity: np.ndarray[ndim=1] Intensity vector Error: np.ndarray[ndim=1] Error of the intensity (absolute uncertainty, 1sigma) R0: scalar first guess for the mean radius (None to autodetermine, default) r: np.ndarray[ndim=1] vector of the abscissa of the resulting size distribution (None to autodetermine, default) Output: A: ErrorValue the fitted value of the intensity scaling factor r0: the r0 parameter of the maxwellian size distribution n: the n parameter of the maxwellian size distribution r: the abscissa of the fitted size distribution maxw: the size distribution stat: the statistics dictionary, returned by nlsq_fit() Note: This first searches for r0, which best linearizes the log(Intensity) vs. log(q**2+3/r0**2) relation. After this is found, the parameters of the fitted line give the parameters of a Maxwellian-like particle size distribution function. After it a proper least squares fitting is carried out, using the obtained values as initial parameters. """ q = np.array(q) Intensity = np.array(Intensity) Error = np.array(Error) if R0 is None: r0s = np.linspace(1, 2 * np.pi / q.min(), 1000) def naive_fit_chi2(q, Intensity, r0): p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1) return ((np.polyval(p, q) - Intensity) ** 2).sum() / (len(q) - 3) chi2 = np.array([naive_fit_chi2(q, Intensity, r0) for r0 in r0s.tolist()]) R0 = r0s[chi2 == chi2.min()][0] def naive_fit(q, Intensity, r0): p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1) return np.exp(p[1]), -2 * p[0] - 4 K, n = naive_fit(q, Intensity, R0) def SR_function(q, A, r0, n): return A * (q ** 2 + 3 / r0 ** 2) ** (-(n + 4.) * 0.5) p, dp, statdict = easylsq.nlsq_fit(q, Intensity, Error, SR_function, (K, R0, n)) n = ErrorValue(p[2], dp[2]) r0 = ErrorValue(p[1], dp[1]) A = ErrorValue(p[0], dp[0]) if r is None: r = np.linspace(np.pi / q.max(), np.pi / q.min(), 1000) return A, r0, n, r, maxwellian(r, r0, n), statdict
python
def fit_shullroess(q, Intensity, Error, R0=None, r=None): """Do a Shull-Roess fitting on the scattering data. Inputs: q: np.ndarray[ndim=1] vector of the q values (4*pi*sin(theta)/lambda) Intensity: np.ndarray[ndim=1] Intensity vector Error: np.ndarray[ndim=1] Error of the intensity (absolute uncertainty, 1sigma) R0: scalar first guess for the mean radius (None to autodetermine, default) r: np.ndarray[ndim=1] vector of the abscissa of the resulting size distribution (None to autodetermine, default) Output: A: ErrorValue the fitted value of the intensity scaling factor r0: the r0 parameter of the maxwellian size distribution n: the n parameter of the maxwellian size distribution r: the abscissa of the fitted size distribution maxw: the size distribution stat: the statistics dictionary, returned by nlsq_fit() Note: This first searches for r0, which best linearizes the log(Intensity) vs. log(q**2+3/r0**2) relation. After this is found, the parameters of the fitted line give the parameters of a Maxwellian-like particle size distribution function. After it a proper least squares fitting is carried out, using the obtained values as initial parameters. """ q = np.array(q) Intensity = np.array(Intensity) Error = np.array(Error) if R0 is None: r0s = np.linspace(1, 2 * np.pi / q.min(), 1000) def naive_fit_chi2(q, Intensity, r0): p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1) return ((np.polyval(p, q) - Intensity) ** 2).sum() / (len(q) - 3) chi2 = np.array([naive_fit_chi2(q, Intensity, r0) for r0 in r0s.tolist()]) R0 = r0s[chi2 == chi2.min()][0] def naive_fit(q, Intensity, r0): p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1) return np.exp(p[1]), -2 * p[0] - 4 K, n = naive_fit(q, Intensity, R0) def SR_function(q, A, r0, n): return A * (q ** 2 + 3 / r0 ** 2) ** (-(n + 4.) * 0.5) p, dp, statdict = easylsq.nlsq_fit(q, Intensity, Error, SR_function, (K, R0, n)) n = ErrorValue(p[2], dp[2]) r0 = ErrorValue(p[1], dp[1]) A = ErrorValue(p[0], dp[0]) if r is None: r = np.linspace(np.pi / q.max(), np.pi / q.min(), 1000) return A, r0, n, r, maxwellian(r, r0, n), statdict
[ "def", "fit_shullroess", "(", "q", ",", "Intensity", ",", "Error", ",", "R0", "=", "None", ",", "r", "=", "None", ")", ":", "q", "=", "np", ".", "array", "(", "q", ")", "Intensity", "=", "np", ".", "array", "(", "Intensity", ")", "Error", "=", "np", ".", "array", "(", "Error", ")", "if", "R0", "is", "None", ":", "r0s", "=", "np", ".", "linspace", "(", "1", ",", "2", "*", "np", ".", "pi", "/", "q", ".", "min", "(", ")", ",", "1000", ")", "def", "naive_fit_chi2", "(", "q", ",", "Intensity", ",", "r0", ")", ":", "p", "=", "np", ".", "polyfit", "(", "np", ".", "log", "(", "q", "**", "2", "+", "3", "/", "r0", "**", "2", ")", ",", "np", ".", "log", "(", "Intensity", ")", ",", "1", ")", "return", "(", "(", "np", ".", "polyval", "(", "p", ",", "q", ")", "-", "Intensity", ")", "**", "2", ")", ".", "sum", "(", ")", "/", "(", "len", "(", "q", ")", "-", "3", ")", "chi2", "=", "np", ".", "array", "(", "[", "naive_fit_chi2", "(", "q", ",", "Intensity", ",", "r0", ")", "for", "r0", "in", "r0s", ".", "tolist", "(", ")", "]", ")", "R0", "=", "r0s", "[", "chi2", "==", "chi2", ".", "min", "(", ")", "]", "[", "0", "]", "def", "naive_fit", "(", "q", ",", "Intensity", ",", "r0", ")", ":", "p", "=", "np", ".", "polyfit", "(", "np", ".", "log", "(", "q", "**", "2", "+", "3", "/", "r0", "**", "2", ")", ",", "np", ".", "log", "(", "Intensity", ")", ",", "1", ")", "return", "np", ".", "exp", "(", "p", "[", "1", "]", ")", ",", "-", "2", "*", "p", "[", "0", "]", "-", "4", "K", ",", "n", "=", "naive_fit", "(", "q", ",", "Intensity", ",", "R0", ")", "def", "SR_function", "(", "q", ",", "A", ",", "r0", ",", "n", ")", ":", "return", "A", "*", "(", "q", "**", "2", "+", "3", "/", "r0", "**", "2", ")", "**", "(", "-", "(", "n", "+", "4.", ")", "*", "0.5", ")", "p", ",", "dp", ",", "statdict", "=", "easylsq", ".", "nlsq_fit", "(", "q", ",", "Intensity", ",", "Error", ",", "SR_function", ",", "(", "K", ",", "R0", ",", "n", ")", ")", "n", "=", "ErrorValue", "(", "p", "[", "2", "]", ",", "dp", "[", "2", "]", ")", "r0", "=", "ErrorValue", "(", "p", "[", "1", "]", ",", "dp", "[", "1", "]", ")", "A", "=", "ErrorValue", "(", "p", "[", "0", "]", ",", "dp", "[", "0", "]", ")", "if", "r", "is", "None", ":", "r", "=", "np", ".", "linspace", "(", "np", ".", "pi", "/", "q", ".", "max", "(", ")", ",", "np", ".", "pi", "/", "q", ".", "min", "(", ")", ",", "1000", ")", "return", "A", ",", "r0", ",", "n", ",", "r", ",", "maxwellian", "(", "r", ",", "r0", ",", "n", ")", ",", "statdict" ]
Do a Shull-Roess fitting on the scattering data. Inputs: q: np.ndarray[ndim=1] vector of the q values (4*pi*sin(theta)/lambda) Intensity: np.ndarray[ndim=1] Intensity vector Error: np.ndarray[ndim=1] Error of the intensity (absolute uncertainty, 1sigma) R0: scalar first guess for the mean radius (None to autodetermine, default) r: np.ndarray[ndim=1] vector of the abscissa of the resulting size distribution (None to autodetermine, default) Output: A: ErrorValue the fitted value of the intensity scaling factor r0: the r0 parameter of the maxwellian size distribution n: the n parameter of the maxwellian size distribution r: the abscissa of the fitted size distribution maxw: the size distribution stat: the statistics dictionary, returned by nlsq_fit() Note: This first searches for r0, which best linearizes the log(Intensity) vs. log(q**2+3/r0**2) relation. After this is found, the parameters of the fitted line give the parameters of a Maxwellian-like particle size distribution function. After it a proper least squares fitting is carried out, using the obtained values as initial parameters.
[ "Do", "a", "Shull", "-", "Roess", "fitting", "on", "the", "scattering", "data", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/standalone.py#L11-L65
awacha/sastool
sastool/fitting/standalone.py
maxwellian
def maxwellian(r, r0, n): """Maxwellian-like distribution of spherical particles Inputs: ------- r: np.ndarray or scalar radii r0: positive scalar or ErrorValue mean radius n: positive scalar or ErrorValue "n" parameter Output: ------- the distribution function and its uncertainty as an ErrorValue containing arrays. The uncertainty of 'r0' and 'n' is taken into account. Notes: ------ M(r)=2*r^n/r0^(n+1)*exp(-r^2/r0^2) / gamma((n+1)/2) """ r0 = ErrorValue(r0) n = ErrorValue(n) expterm = np.exp(-r ** 2 / r0.val ** 2) dmaxdr0 = -2 * r ** n.val * r0.val ** (-n.val - 4) * ((n.val + 1) * r0.val ** 2 - 2 * r ** 2) * expterm / gamma((n.val + 1) * 0.5) dmaxdn = -r ** n.val * r0.val ** (-n.val - 1) * expterm * (2 * np.log(r0.val) - 2 * np.log(r) + psi((n.val + 1) * 0.5)) / gamma((n.val + 1) * 0.5) maxwellian = 2 * r ** n.val * r0.val ** (-n.val - 1) * expterm / gamma((n.val + 1) * 0.5) dmaxwellian = (dmaxdn ** 2 * n.err ** 2 + dmaxdr0 ** 2 * r0.err ** 2) ** 0.5 return ErrorValue(maxwellian, dmaxwellian)
python
def maxwellian(r, r0, n): """Maxwellian-like distribution of spherical particles Inputs: ------- r: np.ndarray or scalar radii r0: positive scalar or ErrorValue mean radius n: positive scalar or ErrorValue "n" parameter Output: ------- the distribution function and its uncertainty as an ErrorValue containing arrays. The uncertainty of 'r0' and 'n' is taken into account. Notes: ------ M(r)=2*r^n/r0^(n+1)*exp(-r^2/r0^2) / gamma((n+1)/2) """ r0 = ErrorValue(r0) n = ErrorValue(n) expterm = np.exp(-r ** 2 / r0.val ** 2) dmaxdr0 = -2 * r ** n.val * r0.val ** (-n.val - 4) * ((n.val + 1) * r0.val ** 2 - 2 * r ** 2) * expterm / gamma((n.val + 1) * 0.5) dmaxdn = -r ** n.val * r0.val ** (-n.val - 1) * expterm * (2 * np.log(r0.val) - 2 * np.log(r) + psi((n.val + 1) * 0.5)) / gamma((n.val + 1) * 0.5) maxwellian = 2 * r ** n.val * r0.val ** (-n.val - 1) * expterm / gamma((n.val + 1) * 0.5) dmaxwellian = (dmaxdn ** 2 * n.err ** 2 + dmaxdr0 ** 2 * r0.err ** 2) ** 0.5 return ErrorValue(maxwellian, dmaxwellian)
[ "def", "maxwellian", "(", "r", ",", "r0", ",", "n", ")", ":", "r0", "=", "ErrorValue", "(", "r0", ")", "n", "=", "ErrorValue", "(", "n", ")", "expterm", "=", "np", ".", "exp", "(", "-", "r", "**", "2", "/", "r0", ".", "val", "**", "2", ")", "dmaxdr0", "=", "-", "2", "*", "r", "**", "n", ".", "val", "*", "r0", ".", "val", "**", "(", "-", "n", ".", "val", "-", "4", ")", "*", "(", "(", "n", ".", "val", "+", "1", ")", "*", "r0", ".", "val", "**", "2", "-", "2", "*", "r", "**", "2", ")", "*", "expterm", "/", "gamma", "(", "(", "n", ".", "val", "+", "1", ")", "*", "0.5", ")", "dmaxdn", "=", "-", "r", "**", "n", ".", "val", "*", "r0", ".", "val", "**", "(", "-", "n", ".", "val", "-", "1", ")", "*", "expterm", "*", "(", "2", "*", "np", ".", "log", "(", "r0", ".", "val", ")", "-", "2", "*", "np", ".", "log", "(", "r", ")", "+", "psi", "(", "(", "n", ".", "val", "+", "1", ")", "*", "0.5", ")", ")", "/", "gamma", "(", "(", "n", ".", "val", "+", "1", ")", "*", "0.5", ")", "maxwellian", "=", "2", "*", "r", "**", "n", ".", "val", "*", "r0", ".", "val", "**", "(", "-", "n", ".", "val", "-", "1", ")", "*", "expterm", "/", "gamma", "(", "(", "n", ".", "val", "+", "1", ")", "*", "0.5", ")", "dmaxwellian", "=", "(", "dmaxdn", "**", "2", "*", "n", ".", "err", "**", "2", "+", "dmaxdr0", "**", "2", "*", "r0", ".", "err", "**", "2", ")", "**", "0.5", "return", "ErrorValue", "(", "maxwellian", ",", "dmaxwellian", ")" ]
Maxwellian-like distribution of spherical particles Inputs: ------- r: np.ndarray or scalar radii r0: positive scalar or ErrorValue mean radius n: positive scalar or ErrorValue "n" parameter Output: ------- the distribution function and its uncertainty as an ErrorValue containing arrays. The uncertainty of 'r0' and 'n' is taken into account. Notes: ------ M(r)=2*r^n/r0^(n+1)*exp(-r^2/r0^2) / gamma((n+1)/2)
[ "Maxwellian", "-", "like", "distribution", "of", "spherical", "particles", "Inputs", ":", "-------", "r", ":", "np", ".", "ndarray", "or", "scalar", "radii", "r0", ":", "positive", "scalar", "or", "ErrorValue", "mean", "radius", "n", ":", "positive", "scalar", "or", "ErrorValue", "n", "parameter", "Output", ":", "-------", "the", "distribution", "function", "and", "its", "uncertainty", "as", "an", "ErrorValue", "containing", "arrays", ".", "The", "uncertainty", "of", "r0", "and", "n", "is", "taken", "into", "account", ".", "Notes", ":", "------", "M", "(", "r", ")", "=", "2", "*", "r^n", "/", "r0^", "(", "n", "+", "1", ")", "*", "exp", "(", "-", "r^2", "/", "r0^2", ")", "/", "gamma", "((", "n", "+", "1", ")", "/", "2", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/standalone.py#L67-L97
awacha/sastool
sastool/misc/pathutils.py
findfileindirs
def findfileindirs(filename, dirs=None, use_pythonpath=True, use_searchpath=True, notfound_is_fatal=True, notfound_val=None): """Find file in multiple directories. Inputs: filename: the file name to be searched for. dirs: list of folders or None use_pythonpath: use the Python module search path use_searchpath: use the sastool search path. notfound_is_fatal: if an exception is to be raised if the file cannot be found. notfound_val: the value which should be returned if the file is not found (only relevant if notfound_is_fatal is False) Outputs: the full path of the file. Notes: if filename is an absolute path by itself, folders in 'dir' won't be checked, only the existence of the file will be verified. """ if os.path.isabs(filename): if os.path.exists(filename): return filename elif notfound_is_fatal: raise IOError('File ' + filename + ' not found.') else: return notfound_val if dirs is None: dirs = [] dirs = normalize_listargument(dirs) if not dirs: # dirs is empty dirs = ['.'] if use_pythonpath: dirs.extend(sys.path) if use_searchpath: dirs.extend(sastool_search_path) # expand ~ and ~user constructs dirs = [os.path.expanduser(d) for d in dirs] logger.debug('Searching for file %s in several folders: %s' % (filename, ', '.join(dirs))) for d in dirs: if os.path.exists(os.path.join(d, filename)): logger.debug('Found file %s in folder %s.' % (filename, d)) return os.path.join(d, filename) logger.debug('Not found file %s in any folders.' % filename) if notfound_is_fatal: raise IOError('File %s not found in any of the directories.' % filename) else: return notfound_val
python
def findfileindirs(filename, dirs=None, use_pythonpath=True, use_searchpath=True, notfound_is_fatal=True, notfound_val=None): """Find file in multiple directories. Inputs: filename: the file name to be searched for. dirs: list of folders or None use_pythonpath: use the Python module search path use_searchpath: use the sastool search path. notfound_is_fatal: if an exception is to be raised if the file cannot be found. notfound_val: the value which should be returned if the file is not found (only relevant if notfound_is_fatal is False) Outputs: the full path of the file. Notes: if filename is an absolute path by itself, folders in 'dir' won't be checked, only the existence of the file will be verified. """ if os.path.isabs(filename): if os.path.exists(filename): return filename elif notfound_is_fatal: raise IOError('File ' + filename + ' not found.') else: return notfound_val if dirs is None: dirs = [] dirs = normalize_listargument(dirs) if not dirs: # dirs is empty dirs = ['.'] if use_pythonpath: dirs.extend(sys.path) if use_searchpath: dirs.extend(sastool_search_path) # expand ~ and ~user constructs dirs = [os.path.expanduser(d) for d in dirs] logger.debug('Searching for file %s in several folders: %s' % (filename, ', '.join(dirs))) for d in dirs: if os.path.exists(os.path.join(d, filename)): logger.debug('Found file %s in folder %s.' % (filename, d)) return os.path.join(d, filename) logger.debug('Not found file %s in any folders.' % filename) if notfound_is_fatal: raise IOError('File %s not found in any of the directories.' % filename) else: return notfound_val
[ "def", "findfileindirs", "(", "filename", ",", "dirs", "=", "None", ",", "use_pythonpath", "=", "True", ",", "use_searchpath", "=", "True", ",", "notfound_is_fatal", "=", "True", ",", "notfound_val", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "filename", "elif", "notfound_is_fatal", ":", "raise", "IOError", "(", "'File '", "+", "filename", "+", "' not found.'", ")", "else", ":", "return", "notfound_val", "if", "dirs", "is", "None", ":", "dirs", "=", "[", "]", "dirs", "=", "normalize_listargument", "(", "dirs", ")", "if", "not", "dirs", ":", "# dirs is empty", "dirs", "=", "[", "'.'", "]", "if", "use_pythonpath", ":", "dirs", ".", "extend", "(", "sys", ".", "path", ")", "if", "use_searchpath", ":", "dirs", ".", "extend", "(", "sastool_search_path", ")", "# expand ~ and ~user constructs", "dirs", "=", "[", "os", ".", "path", ".", "expanduser", "(", "d", ")", "for", "d", "in", "dirs", "]", "logger", ".", "debug", "(", "'Searching for file %s in several folders: %s'", "%", "(", "filename", ",", "', '", ".", "join", "(", "dirs", ")", ")", ")", "for", "d", "in", "dirs", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "d", ",", "filename", ")", ")", ":", "logger", ".", "debug", "(", "'Found file %s in folder %s.'", "%", "(", "filename", ",", "d", ")", ")", "return", "os", ".", "path", ".", "join", "(", "d", ",", "filename", ")", "logger", ".", "debug", "(", "'Not found file %s in any folders.'", "%", "filename", ")", "if", "notfound_is_fatal", ":", "raise", "IOError", "(", "'File %s not found in any of the directories.'", "%", "filename", ")", "else", ":", "return", "notfound_val" ]
Find file in multiple directories. Inputs: filename: the file name to be searched for. dirs: list of folders or None use_pythonpath: use the Python module search path use_searchpath: use the sastool search path. notfound_is_fatal: if an exception is to be raised if the file cannot be found. notfound_val: the value which should be returned if the file is not found (only relevant if notfound_is_fatal is False) Outputs: the full path of the file. Notes: if filename is an absolute path by itself, folders in 'dir' won't be checked, only the existence of the file will be verified.
[ "Find", "file", "in", "multiple", "directories", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/pathutils.py#L17-L63
awacha/sastool
sastool/utils2d/corrections.py
twotheta
def twotheta(matrix, bcx, bcy, pixsizeperdist): """Calculate the two-theta matrix for a scattering matrix Inputs: matrix: only the shape of it is needed bcx, bcy: beam position (counting from 0; x is row, y is column index) pixsizeperdist: the pixel size divided by the sample-to-detector distance Outputs: the two theta matrix, same shape as 'matrix'. """ col, row = np.meshgrid(list(range(matrix.shape[1])), list(range(matrix.shape[0]))) return np.arctan(np.sqrt((row - bcx) ** 2 + (col - bcy) ** 2) * pixsizeperdist)
python
def twotheta(matrix, bcx, bcy, pixsizeperdist): """Calculate the two-theta matrix for a scattering matrix Inputs: matrix: only the shape of it is needed bcx, bcy: beam position (counting from 0; x is row, y is column index) pixsizeperdist: the pixel size divided by the sample-to-detector distance Outputs: the two theta matrix, same shape as 'matrix'. """ col, row = np.meshgrid(list(range(matrix.shape[1])), list(range(matrix.shape[0]))) return np.arctan(np.sqrt((row - bcx) ** 2 + (col - bcy) ** 2) * pixsizeperdist)
[ "def", "twotheta", "(", "matrix", ",", "bcx", ",", "bcy", ",", "pixsizeperdist", ")", ":", "col", ",", "row", "=", "np", ".", "meshgrid", "(", "list", "(", "range", "(", "matrix", ".", "shape", "[", "1", "]", ")", ")", ",", "list", "(", "range", "(", "matrix", ".", "shape", "[", "0", "]", ")", ")", ")", "return", "np", ".", "arctan", "(", "np", ".", "sqrt", "(", "(", "row", "-", "bcx", ")", "**", "2", "+", "(", "col", "-", "bcy", ")", "**", "2", ")", "*", "pixsizeperdist", ")" ]
Calculate the two-theta matrix for a scattering matrix Inputs: matrix: only the shape of it is needed bcx, bcy: beam position (counting from 0; x is row, y is column index) pixsizeperdist: the pixel size divided by the sample-to-detector distance Outputs: the two theta matrix, same shape as 'matrix'.
[ "Calculate", "the", "two", "-", "theta", "matrix", "for", "a", "scattering", "matrix" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L10-L23
awacha/sastool
sastool/utils2d/corrections.py
solidangle
def solidangle(twotheta, sampletodetectordistance, pixelsize=None): """Solid-angle correction for two-dimensional SAS images Inputs: twotheta: matrix of two-theta values sampletodetectordistance: sample-to-detector distance pixelsize: the pixel size in mm The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. """ if pixelsize is None: pixelsize = 1 return sampletodetectordistance ** 2 / np.cos(twotheta) ** 3 / pixelsize ** 2
python
def solidangle(twotheta, sampletodetectordistance, pixelsize=None): """Solid-angle correction for two-dimensional SAS images Inputs: twotheta: matrix of two-theta values sampletodetectordistance: sample-to-detector distance pixelsize: the pixel size in mm The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. """ if pixelsize is None: pixelsize = 1 return sampletodetectordistance ** 2 / np.cos(twotheta) ** 3 / pixelsize ** 2
[ "def", "solidangle", "(", "twotheta", ",", "sampletodetectordistance", ",", "pixelsize", "=", "None", ")", ":", "if", "pixelsize", "is", "None", ":", "pixelsize", "=", "1", "return", "sampletodetectordistance", "**", "2", "/", "np", ".", "cos", "(", "twotheta", ")", "**", "3", "/", "pixelsize", "**", "2" ]
Solid-angle correction for two-dimensional SAS images Inputs: twotheta: matrix of two-theta values sampletodetectordistance: sample-to-detector distance pixelsize: the pixel size in mm The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it.
[ "Solid", "-", "angle", "correction", "for", "two", "-", "dimensional", "SAS", "images" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L26-L39
awacha/sastool
sastool/utils2d/corrections.py
solidangle_errorprop
def solidangle_errorprop(twotheta, dtwotheta, sampletodetectordistance, dsampletodetectordistance, pixelsize=None): """Solid-angle correction for two-dimensional SAS images with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: absolute error of sample-to-detector distance Outputs two matrices of the same shape as twotheta. The scattering intensity matrix should be multiplied by the first one. The second one is the propagated error of the first one. """ SAC = solidangle(twotheta, sampletodetectordistance, pixelsize) if pixelsize is None: pixelsize = 1 return (SAC, (sampletodetectordistance * (4 * dsampletodetectordistance ** 2 * np.cos(twotheta) ** 2 + 9 * dtwotheta ** 2 * sampletodetectordistance ** 2 * np.sin(twotheta) ** 2) ** 0.5 / np.cos(twotheta) ** 4) / pixelsize ** 2)
python
def solidangle_errorprop(twotheta, dtwotheta, sampletodetectordistance, dsampletodetectordistance, pixelsize=None): """Solid-angle correction for two-dimensional SAS images with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: absolute error of sample-to-detector distance Outputs two matrices of the same shape as twotheta. The scattering intensity matrix should be multiplied by the first one. The second one is the propagated error of the first one. """ SAC = solidangle(twotheta, sampletodetectordistance, pixelsize) if pixelsize is None: pixelsize = 1 return (SAC, (sampletodetectordistance * (4 * dsampletodetectordistance ** 2 * np.cos(twotheta) ** 2 + 9 * dtwotheta ** 2 * sampletodetectordistance ** 2 * np.sin(twotheta) ** 2) ** 0.5 / np.cos(twotheta) ** 4) / pixelsize ** 2)
[ "def", "solidangle_errorprop", "(", "twotheta", ",", "dtwotheta", ",", "sampletodetectordistance", ",", "dsampletodetectordistance", ",", "pixelsize", "=", "None", ")", ":", "SAC", "=", "solidangle", "(", "twotheta", ",", "sampletodetectordistance", ",", "pixelsize", ")", "if", "pixelsize", "is", "None", ":", "pixelsize", "=", "1", "return", "(", "SAC", ",", "(", "sampletodetectordistance", "*", "(", "4", "*", "dsampletodetectordistance", "**", "2", "*", "np", ".", "cos", "(", "twotheta", ")", "**", "2", "+", "9", "*", "dtwotheta", "**", "2", "*", "sampletodetectordistance", "**", "2", "*", "np", ".", "sin", "(", "twotheta", ")", "**", "2", ")", "**", "0.5", "/", "np", ".", "cos", "(", "twotheta", ")", "**", "4", ")", "/", "pixelsize", "**", "2", ")" ]
Solid-angle correction for two-dimensional SAS images with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: absolute error of sample-to-detector distance Outputs two matrices of the same shape as twotheta. The scattering intensity matrix should be multiplied by the first one. The second one is the propagated error of the first one.
[ "Solid", "-", "angle", "correction", "for", "two", "-", "dimensional", "SAS", "images", "with", "error", "propagation" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L42-L61
awacha/sastool
sastool/utils2d/corrections.py
angledependentabsorption
def angledependentabsorption(twotheta, transmission): """Correction for angle-dependent absorption of the sample Inputs: twotheta: matrix of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. Note, that this does not corrects for sample transmission by itself, as the 2*theta -> 0 limit of this matrix is unity. Twotheta==0 and transmission==1 cases are handled correctly (the limit is 1 in both cases). """ cor = np.ones(twotheta.shape) if transmission == 1: return cor mud = -np.log(transmission) cor[twotheta > 0] = transmission * mud * (1 - 1 / np.cos(twotheta[twotheta > 0])) / (np.exp(-mud / np.cos(twotheta[twotheta > 0])) - np.exp(-mud)) return cor
python
def angledependentabsorption(twotheta, transmission): """Correction for angle-dependent absorption of the sample Inputs: twotheta: matrix of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. Note, that this does not corrects for sample transmission by itself, as the 2*theta -> 0 limit of this matrix is unity. Twotheta==0 and transmission==1 cases are handled correctly (the limit is 1 in both cases). """ cor = np.ones(twotheta.shape) if transmission == 1: return cor mud = -np.log(transmission) cor[twotheta > 0] = transmission * mud * (1 - 1 / np.cos(twotheta[twotheta > 0])) / (np.exp(-mud / np.cos(twotheta[twotheta > 0])) - np.exp(-mud)) return cor
[ "def", "angledependentabsorption", "(", "twotheta", ",", "transmission", ")", ":", "cor", "=", "np", ".", "ones", "(", "twotheta", ".", "shape", ")", "if", "transmission", "==", "1", ":", "return", "cor", "mud", "=", "-", "np", ".", "log", "(", "transmission", ")", "cor", "[", "twotheta", ">", "0", "]", "=", "transmission", "*", "mud", "*", "(", "1", "-", "1", "/", "np", ".", "cos", "(", "twotheta", "[", "twotheta", ">", "0", "]", ")", ")", "/", "(", "np", ".", "exp", "(", "-", "mud", "/", "np", ".", "cos", "(", "twotheta", "[", "twotheta", ">", "0", "]", ")", ")", "-", "np", ".", "exp", "(", "-", "mud", ")", ")", "return", "cor" ]
Correction for angle-dependent absorption of the sample Inputs: twotheta: matrix of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. Note, that this does not corrects for sample transmission by itself, as the 2*theta -> 0 limit of this matrix is unity. Twotheta==0 and transmission==1 cases are handled correctly (the limit is 1 in both cases).
[ "Correction", "for", "angle", "-", "dependent", "absorption", "of", "the", "sample" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L63-L83
awacha/sastool
sastool/utils2d/corrections.py
angledependentabsorption_errorprop
def angledependentabsorption_errorprop(twotheta, dtwotheta, transmission, dtransmission): """Correction for angle-dependent absorption of the sample with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) dtransmission: the absolute error of the transmission of the sample Two matrices are returned: the first one is the correction (intensity matrix should be multiplied by it), the second is its absolute error. """ # error propagation formula calculated using sympy return (angledependentabsorption(twotheta, transmission), _calc_angledependentabsorption_error(twotheta, dtwotheta, transmission, dtransmission))
python
def angledependentabsorption_errorprop(twotheta, dtwotheta, transmission, dtransmission): """Correction for angle-dependent absorption of the sample with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) dtransmission: the absolute error of the transmission of the sample Two matrices are returned: the first one is the correction (intensity matrix should be multiplied by it), the second is its absolute error. """ # error propagation formula calculated using sympy return (angledependentabsorption(twotheta, transmission), _calc_angledependentabsorption_error(twotheta, dtwotheta, transmission, dtransmission))
[ "def", "angledependentabsorption_errorprop", "(", "twotheta", ",", "dtwotheta", ",", "transmission", ",", "dtransmission", ")", ":", "# error propagation formula calculated using sympy", "return", "(", "angledependentabsorption", "(", "twotheta", ",", "transmission", ")", ",", "_calc_angledependentabsorption_error", "(", "twotheta", ",", "dtwotheta", ",", "transmission", ",", "dtransmission", ")", ")" ]
Correction for angle-dependent absorption of the sample with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) dtransmission: the absolute error of the transmission of the sample Two matrices are returned: the first one is the correction (intensity matrix should be multiplied by it), the second is its absolute error.
[ "Correction", "for", "angle", "-", "dependent", "absorption", "of", "the", "sample", "with", "error", "propagation" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L108-L123
awacha/sastool
sastool/utils2d/corrections.py
angledependentairtransmission
def angledependentairtransmission(twotheta, mu_air, sampletodetectordistance): """Correction for the angle dependent absorption of air in the scattered beam path. Inputs: twotheta: matrix of two-theta values mu_air: the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.""" return np.exp(mu_air * sampletodetectordistance / np.cos(twotheta))
python
def angledependentairtransmission(twotheta, mu_air, sampletodetectordistance): """Correction for the angle dependent absorption of air in the scattered beam path. Inputs: twotheta: matrix of two-theta values mu_air: the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.""" return np.exp(mu_air * sampletodetectordistance / np.cos(twotheta))
[ "def", "angledependentairtransmission", "(", "twotheta", ",", "mu_air", ",", "sampletodetectordistance", ")", ":", "return", "np", ".", "exp", "(", "mu_air", "*", "sampletodetectordistance", "/", "np", ".", "cos", "(", "twotheta", ")", ")" ]
Correction for the angle dependent absorption of air in the scattered beam path. Inputs: twotheta: matrix of two-theta values mu_air: the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.
[ "Correction", "for", "the", "angle", "dependent", "absorption", "of", "air", "in", "the", "scattered", "beam", "path", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L125-L138
awacha/sastool
sastool/utils2d/corrections.py
angledependentairtransmission_errorprop
def angledependentairtransmission_errorprop(twotheta, dtwotheta, mu_air, dmu_air, sampletodetectordistance, dsampletodetectordistance): """Correction for the angle dependent absorption of air in the scattered beam path, with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: absolute error matrix of two-theta mu_air: the linear absorption coefficient of air dmu_air: error of the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: error of the sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.""" return (np.exp(mu_air * sampletodetectordistance / np.cos(twotheta)), np.sqrt(dmu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dsampletodetectordistance ** 2 * mu_air ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dtwotheta ** 2 * mu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) * np.sin(twotheta) ** 2 / np.cos(twotheta) ** 4) )
python
def angledependentairtransmission_errorprop(twotheta, dtwotheta, mu_air, dmu_air, sampletodetectordistance, dsampletodetectordistance): """Correction for the angle dependent absorption of air in the scattered beam path, with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: absolute error matrix of two-theta mu_air: the linear absorption coefficient of air dmu_air: error of the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: error of the sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.""" return (np.exp(mu_air * sampletodetectordistance / np.cos(twotheta)), np.sqrt(dmu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dsampletodetectordistance ** 2 * mu_air ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dtwotheta ** 2 * mu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) * np.sin(twotheta) ** 2 / np.cos(twotheta) ** 4) )
[ "def", "angledependentairtransmission_errorprop", "(", "twotheta", ",", "dtwotheta", ",", "mu_air", ",", "dmu_air", ",", "sampletodetectordistance", ",", "dsampletodetectordistance", ")", ":", "return", "(", "np", ".", "exp", "(", "mu_air", "*", "sampletodetectordistance", "/", "np", ".", "cos", "(", "twotheta", ")", ")", ",", "np", ".", "sqrt", "(", "dmu_air", "**", "2", "*", "sampletodetectordistance", "**", "2", "*", "np", ".", "exp", "(", "2", "*", "mu_air", "*", "sampletodetectordistance", "/", "np", ".", "cos", "(", "twotheta", ")", ")", "/", "np", ".", "cos", "(", "twotheta", ")", "**", "2", "+", "dsampletodetectordistance", "**", "2", "*", "mu_air", "**", "2", "*", "np", ".", "exp", "(", "2", "*", "mu_air", "*", "sampletodetectordistance", "/", "np", ".", "cos", "(", "twotheta", ")", ")", "/", "np", ".", "cos", "(", "twotheta", ")", "**", "2", "+", "dtwotheta", "**", "2", "*", "mu_air", "**", "2", "*", "sampletodetectordistance", "**", "2", "*", "np", ".", "exp", "(", "2", "*", "mu_air", "*", "sampletodetectordistance", "/", "np", ".", "cos", "(", "twotheta", ")", ")", "*", "np", ".", "sin", "(", "twotheta", ")", "**", "2", "/", "np", ".", "cos", "(", "twotheta", ")", "**", "4", ")", ")" ]
Correction for the angle dependent absorption of air in the scattered beam path, with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: absolute error matrix of two-theta mu_air: the linear absorption coefficient of air dmu_air: error of the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: error of the sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.
[ "Correction", "for", "the", "angle", "dependent", "absorption", "of", "air", "in", "the", "scattered", "beam", "path", "with", "error", "propagation" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L140-L168
awacha/sastool
sastool/classes2/loader.py
Loader.find_file
def find_file(self, filename: str, strip_path: bool = True, what='exposure') -> str: """Find file in the path""" if what == 'exposure': path = self._path elif what == 'header': path = self._headerpath elif what == 'mask': path = self._maskpath else: path = self._path tried = [] if strip_path: filename = os.path.split(filename)[-1] for d in path: if os.path.exists(os.path.join(d, filename)): tried.append(os.path.join(d, filename)) return os.path.join(d, filename) raise FileNotFoundError('Not found: {}. Tried: {}'.format(filename, ', '.join(tried)))
python
def find_file(self, filename: str, strip_path: bool = True, what='exposure') -> str: """Find file in the path""" if what == 'exposure': path = self._path elif what == 'header': path = self._headerpath elif what == 'mask': path = self._maskpath else: path = self._path tried = [] if strip_path: filename = os.path.split(filename)[-1] for d in path: if os.path.exists(os.path.join(d, filename)): tried.append(os.path.join(d, filename)) return os.path.join(d, filename) raise FileNotFoundError('Not found: {}. Tried: {}'.format(filename, ', '.join(tried)))
[ "def", "find_file", "(", "self", ",", "filename", ":", "str", ",", "strip_path", ":", "bool", "=", "True", ",", "what", "=", "'exposure'", ")", "->", "str", ":", "if", "what", "==", "'exposure'", ":", "path", "=", "self", ".", "_path", "elif", "what", "==", "'header'", ":", "path", "=", "self", ".", "_headerpath", "elif", "what", "==", "'mask'", ":", "path", "=", "self", ".", "_maskpath", "else", ":", "path", "=", "self", ".", "_path", "tried", "=", "[", "]", "if", "strip_path", ":", "filename", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "[", "-", "1", "]", "for", "d", "in", "path", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "d", ",", "filename", ")", ")", ":", "tried", ".", "append", "(", "os", ".", "path", ".", "join", "(", "d", ",", "filename", ")", ")", "return", "os", ".", "path", ".", "join", "(", "d", ",", "filename", ")", "raise", "FileNotFoundError", "(", "'Not found: {}. Tried: {}'", ".", "format", "(", "filename", ",", "', '", ".", "join", "(", "tried", ")", ")", ")" ]
Find file in the path
[ "Find", "file", "in", "the", "path" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/loader.py#L69-L86
awacha/sastool
sastool/classes2/loader.py
Loader.get_subpath
def get_subpath(self, subpath: str): """Search a file or directory relative to the base path""" for d in self._path: if os.path.exists(os.path.join(d, subpath)): return os.path.join(d, subpath) raise FileNotFoundError
python
def get_subpath(self, subpath: str): """Search a file or directory relative to the base path""" for d in self._path: if os.path.exists(os.path.join(d, subpath)): return os.path.join(d, subpath) raise FileNotFoundError
[ "def", "get_subpath", "(", "self", ",", "subpath", ":", "str", ")", ":", "for", "d", "in", "self", ".", "_path", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "d", ",", "subpath", ")", ")", ":", "return", "os", ".", "path", ".", "join", "(", "d", ",", "subpath", ")", "raise", "FileNotFoundError" ]
Search a file or directory relative to the base path
[ "Search", "a", "file", "or", "directory", "relative", "to", "the", "base", "path" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/loader.py#L96-L101
awacha/sastool
sastool/classes2/exposure.py
Exposure.new_from_file
def new_from_file(self, filename: str, header_data: Optional[Header] = None, mask_data: Optional[np.ndarray] = None): """Load an exposure from a file."""
python
def new_from_file(self, filename: str, header_data: Optional[Header] = None, mask_data: Optional[np.ndarray] = None): """Load an exposure from a file."""
[ "def", "new_from_file", "(", "self", ",", "filename", ":", "str", ",", "header_data", ":", "Optional", "[", "Header", "]", "=", "None", ",", "mask_data", ":", "Optional", "[", "np", ".", "ndarray", "]", "=", "None", ")", ":" ]
Load an exposure from a file.
[ "Load", "an", "exposure", "from", "a", "file", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L47-L49
awacha/sastool
sastool/classes2/exposure.py
Exposure.sum
def sum(self, only_valid=True) -> ErrorValue: """Calculate the sum of pixels, not counting the masked ones if only_valid is True.""" if not only_valid: mask = 1 else: mask = self.mask return ErrorValue((self.intensity * mask).sum(), ((self.error * mask) ** 2).sum() ** 0.5)
python
def sum(self, only_valid=True) -> ErrorValue: """Calculate the sum of pixels, not counting the masked ones if only_valid is True.""" if not only_valid: mask = 1 else: mask = self.mask return ErrorValue((self.intensity * mask).sum(), ((self.error * mask) ** 2).sum() ** 0.5)
[ "def", "sum", "(", "self", ",", "only_valid", "=", "True", ")", "->", "ErrorValue", ":", "if", "not", "only_valid", ":", "mask", "=", "1", "else", ":", "mask", "=", "self", ".", "mask", "return", "ErrorValue", "(", "(", "self", ".", "intensity", "*", "mask", ")", ".", "sum", "(", ")", ",", "(", "(", "self", ".", "error", "*", "mask", ")", "**", "2", ")", ".", "sum", "(", ")", "**", "0.5", ")" ]
Calculate the sum of pixels, not counting the masked ones if only_valid is True.
[ "Calculate", "the", "sum", "of", "pixels", "not", "counting", "the", "masked", "ones", "if", "only_valid", "is", "True", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L51-L58
awacha/sastool
sastool/classes2/exposure.py
Exposure.mean
def mean(self, only_valid=True) -> ErrorValue: """Calculate the mean of the pixels, not counting the masked ones if only_valid is True.""" if not only_valid: intensity = self.intensity error = self.error else: intensity = self.intensity[self.mask] error = self.error[self.mask] return ErrorValue(intensity.mean(), (error ** 2).mean() ** 0.5)
python
def mean(self, only_valid=True) -> ErrorValue: """Calculate the mean of the pixels, not counting the masked ones if only_valid is True.""" if not only_valid: intensity = self.intensity error = self.error else: intensity = self.intensity[self.mask] error = self.error[self.mask] return ErrorValue(intensity.mean(), (error ** 2).mean() ** 0.5)
[ "def", "mean", "(", "self", ",", "only_valid", "=", "True", ")", "->", "ErrorValue", ":", "if", "not", "only_valid", ":", "intensity", "=", "self", ".", "intensity", "error", "=", "self", ".", "error", "else", ":", "intensity", "=", "self", ".", "intensity", "[", "self", ".", "mask", "]", "error", "=", "self", ".", "error", "[", "self", ".", "mask", "]", "return", "ErrorValue", "(", "intensity", ".", "mean", "(", ")", ",", "(", "error", "**", "2", ")", ".", "mean", "(", ")", "**", "0.5", ")" ]
Calculate the mean of the pixels, not counting the masked ones if only_valid is True.
[ "Calculate", "the", "mean", "of", "the", "pixels", "not", "counting", "the", "masked", "ones", "if", "only_valid", "is", "True", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L60-L69
awacha/sastool
sastool/classes2/exposure.py
Exposure.twotheta
def twotheta(self) -> ErrorValue: """Calculate the two-theta array""" row, column = np.ogrid[0:self.shape[0], 0:self.shape[1]] rho = (((self.header.beamcentery - row) * self.header.pixelsizey) ** 2 + ((self.header.beamcenterx - column) * self.header.pixelsizex) ** 2) ** 0.5 assert isinstance(self.header.pixelsizex, ErrorValue) assert isinstance(self.header.pixelsizey, ErrorValue) assert isinstance(rho, ErrorValue) return (rho / self.header.distance).arctan()
python
def twotheta(self) -> ErrorValue: """Calculate the two-theta array""" row, column = np.ogrid[0:self.shape[0], 0:self.shape[1]] rho = (((self.header.beamcentery - row) * self.header.pixelsizey) ** 2 + ((self.header.beamcenterx - column) * self.header.pixelsizex) ** 2) ** 0.5 assert isinstance(self.header.pixelsizex, ErrorValue) assert isinstance(self.header.pixelsizey, ErrorValue) assert isinstance(rho, ErrorValue) return (rho / self.header.distance).arctan()
[ "def", "twotheta", "(", "self", ")", "->", "ErrorValue", ":", "row", ",", "column", "=", "np", ".", "ogrid", "[", "0", ":", "self", ".", "shape", "[", "0", "]", ",", "0", ":", "self", ".", "shape", "[", "1", "]", "]", "rho", "=", "(", "(", "(", "self", ".", "header", ".", "beamcentery", "-", "row", ")", "*", "self", ".", "header", ".", "pixelsizey", ")", "**", "2", "+", "(", "(", "self", ".", "header", ".", "beamcenterx", "-", "column", ")", "*", "self", ".", "header", ".", "pixelsizex", ")", "**", "2", ")", "**", "0.5", "assert", "isinstance", "(", "self", ".", "header", ".", "pixelsizex", ",", "ErrorValue", ")", "assert", "isinstance", "(", "self", ".", "header", ".", "pixelsizey", ",", "ErrorValue", ")", "assert", "isinstance", "(", "rho", ",", "ErrorValue", ")", "return", "(", "rho", "/", "self", ".", "header", ".", "distance", ")", ".", "arctan", "(", ")" ]
Calculate the two-theta array
[ "Calculate", "the", "two", "-", "theta", "array" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L72-L80
awacha/sastool
sastool/classes2/exposure.py
Exposure.pixel_to_q
def pixel_to_q(self, row: float, column: float): """Return the q coordinates of a given pixel. Inputs: row: float the row (vertical) coordinate of the pixel column: float the column (horizontal) coordinate of the pixel Coordinates are 0-based and calculated from the top left corner. """ qrow = 4 * np.pi * np.sin( 0.5 * np.arctan( (row - float(self.header.beamcentery)) * float(self.header.pixelsizey) / float(self.header.distance))) / float(self.header.wavelength) qcol = 4 * np.pi * np.sin(0.5 * np.arctan( (column - float(self.header.beamcenterx)) * float(self.header.pixelsizex) / float(self.header.distance))) / float(self.header.wavelength) return qrow, qcol
python
def pixel_to_q(self, row: float, column: float): """Return the q coordinates of a given pixel. Inputs: row: float the row (vertical) coordinate of the pixel column: float the column (horizontal) coordinate of the pixel Coordinates are 0-based and calculated from the top left corner. """ qrow = 4 * np.pi * np.sin( 0.5 * np.arctan( (row - float(self.header.beamcentery)) * float(self.header.pixelsizey) / float(self.header.distance))) / float(self.header.wavelength) qcol = 4 * np.pi * np.sin(0.5 * np.arctan( (column - float(self.header.beamcenterx)) * float(self.header.pixelsizex) / float(self.header.distance))) / float(self.header.wavelength) return qrow, qcol
[ "def", "pixel_to_q", "(", "self", ",", "row", ":", "float", ",", "column", ":", "float", ")", ":", "qrow", "=", "4", "*", "np", ".", "pi", "*", "np", ".", "sin", "(", "0.5", "*", "np", ".", "arctan", "(", "(", "row", "-", "float", "(", "self", ".", "header", ".", "beamcentery", ")", ")", "*", "float", "(", "self", ".", "header", ".", "pixelsizey", ")", "/", "float", "(", "self", ".", "header", ".", "distance", ")", ")", ")", "/", "float", "(", "self", ".", "header", ".", "wavelength", ")", "qcol", "=", "4", "*", "np", ".", "pi", "*", "np", ".", "sin", "(", "0.5", "*", "np", ".", "arctan", "(", "(", "column", "-", "float", "(", "self", ".", "header", ".", "beamcenterx", ")", ")", "*", "float", "(", "self", ".", "header", ".", "pixelsizex", ")", "/", "float", "(", "self", ".", "header", ".", "distance", ")", ")", ")", "/", "float", "(", "self", ".", "header", ".", "wavelength", ")", "return", "qrow", ",", "qcol" ]
Return the q coordinates of a given pixel. Inputs: row: float the row (vertical) coordinate of the pixel column: float the column (horizontal) coordinate of the pixel Coordinates are 0-based and calculated from the top left corner.
[ "Return", "the", "q", "coordinates", "of", "a", "given", "pixel", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L87-L107
awacha/sastool
sastool/classes2/exposure.py
Exposure.imshow
def imshow(self, *args, show_crosshair=True, show_mask=True, show_qscale=True, axes=None, invalid_color='black', mask_opacity=0.8, show_colorbar=True, **kwargs): """Plot the matrix (imshow) Keyword arguments [and their default values]: show_crosshair [True]: if a cross-hair marking the beam position is to be plotted. show_mask [True]: if the mask is to be plotted. show_qscale [True]: if the horizontal and vertical axes are to be scaled into q axes [None]: the axes into which the image should be plotted. If None, defaults to the currently active axes (returned by plt.gca()) invalid_color ['black']: the color for invalid (NaN or infinite) pixels mask_opacity [0.8]: the opacity of the overlaid mask (1 is fully opaque, 0 is fully transparent) show_colorbar [True]: if a colorbar is to be added. Can be a boolean value (True or False) or an instance of matplotlib.axes.Axes, into which the color bar should be drawn. All other keywords are forwarded to plt.imshow() or matplotlib.Axes.imshow() Returns: the image instance returned by imshow() """ if 'aspect' not in kwargs: kwargs['aspect'] = 'equal' if 'interpolation' not in kwargs: kwargs['interpolation'] = 'nearest' if 'origin' not in kwargs: kwargs['origin'] = 'upper' if show_qscale: ymin, xmin = self.pixel_to_q(0, 0) ymax, xmax = self.pixel_to_q(*self.shape) if kwargs['origin'].upper() == 'UPPER': kwargs['extent'] = [xmin, xmax, -ymax, -ymin] else: kwargs['extent'] = [xmin, xmax, ymin, ymax] bcx = 0 bcy = 0 else: bcx = self.header.beamcenterx bcy = self.header.beamcentery xmin = 0 xmax = self.shape[1] ymin = 0 ymax = self.shape[0] if kwargs['origin'].upper() == 'UPPER': kwargs['extent'] = [0, self.shape[1], self.shape[0], 0] else: kwargs['extent'] = [0, self.shape[1], 0, self.shape[0]] if axes is None: axes = plt.gca() ret = axes.imshow(self.intensity, **kwargs) if show_mask: # workaround: because of the colour-scaling we do here, full one and # full zero masks look the SAME, i.e. all the image is shaded. # Thus if we have a fully unmasked matrix, skip this section. # This also conserves memory. if (self.mask == 0).sum(): # there are some masked pixels # we construct another representation of the mask, where the masked pixels are 1.0, and the # unmasked ones will be np.nan. They will thus be not rendered. mf = np.ones(self.mask.shape, np.float) mf[self.mask != 0] = np.nan kwargs['cmap'] = matplotlib.cm.gray_r kwargs['alpha'] = mask_opacity kwargs['norm'] = matplotlib.colors.Normalize() axes.imshow(mf, **kwargs) if show_crosshair: ax = axes.axis() # save zoom state axes.plot([xmin, xmax], [bcy] * 2, 'w-') axes.plot([bcx] * 2, [ymin, ymax], 'w-') axes.axis(ax) # restore zoom state axes.set_facecolor(invalid_color) if show_colorbar: if isinstance(show_colorbar, matplotlib.axes.Axes): axes.figure.colorbar( ret, cax=show_colorbar) else: # try to find a suitable colorbar axes: check if the plot target axes already # contains some images, then check if their colorbars exist as # axes. cax = [i.colorbar[1] for i in axes.images if i.colorbar is not None] cax = [c for c in cax if c in c.figure.axes] if cax: cax = cax[0] else: cax = None axes.figure.colorbar(ret, cax=cax, ax=axes) axes.figure.canvas.draw() return ret
python
def imshow(self, *args, show_crosshair=True, show_mask=True, show_qscale=True, axes=None, invalid_color='black', mask_opacity=0.8, show_colorbar=True, **kwargs): """Plot the matrix (imshow) Keyword arguments [and their default values]: show_crosshair [True]: if a cross-hair marking the beam position is to be plotted. show_mask [True]: if the mask is to be plotted. show_qscale [True]: if the horizontal and vertical axes are to be scaled into q axes [None]: the axes into which the image should be plotted. If None, defaults to the currently active axes (returned by plt.gca()) invalid_color ['black']: the color for invalid (NaN or infinite) pixels mask_opacity [0.8]: the opacity of the overlaid mask (1 is fully opaque, 0 is fully transparent) show_colorbar [True]: if a colorbar is to be added. Can be a boolean value (True or False) or an instance of matplotlib.axes.Axes, into which the color bar should be drawn. All other keywords are forwarded to plt.imshow() or matplotlib.Axes.imshow() Returns: the image instance returned by imshow() """ if 'aspect' not in kwargs: kwargs['aspect'] = 'equal' if 'interpolation' not in kwargs: kwargs['interpolation'] = 'nearest' if 'origin' not in kwargs: kwargs['origin'] = 'upper' if show_qscale: ymin, xmin = self.pixel_to_q(0, 0) ymax, xmax = self.pixel_to_q(*self.shape) if kwargs['origin'].upper() == 'UPPER': kwargs['extent'] = [xmin, xmax, -ymax, -ymin] else: kwargs['extent'] = [xmin, xmax, ymin, ymax] bcx = 0 bcy = 0 else: bcx = self.header.beamcenterx bcy = self.header.beamcentery xmin = 0 xmax = self.shape[1] ymin = 0 ymax = self.shape[0] if kwargs['origin'].upper() == 'UPPER': kwargs['extent'] = [0, self.shape[1], self.shape[0], 0] else: kwargs['extent'] = [0, self.shape[1], 0, self.shape[0]] if axes is None: axes = plt.gca() ret = axes.imshow(self.intensity, **kwargs) if show_mask: # workaround: because of the colour-scaling we do here, full one and # full zero masks look the SAME, i.e. all the image is shaded. # Thus if we have a fully unmasked matrix, skip this section. # This also conserves memory. if (self.mask == 0).sum(): # there are some masked pixels # we construct another representation of the mask, where the masked pixels are 1.0, and the # unmasked ones will be np.nan. They will thus be not rendered. mf = np.ones(self.mask.shape, np.float) mf[self.mask != 0] = np.nan kwargs['cmap'] = matplotlib.cm.gray_r kwargs['alpha'] = mask_opacity kwargs['norm'] = matplotlib.colors.Normalize() axes.imshow(mf, **kwargs) if show_crosshair: ax = axes.axis() # save zoom state axes.plot([xmin, xmax], [bcy] * 2, 'w-') axes.plot([bcx] * 2, [ymin, ymax], 'w-') axes.axis(ax) # restore zoom state axes.set_facecolor(invalid_color) if show_colorbar: if isinstance(show_colorbar, matplotlib.axes.Axes): axes.figure.colorbar( ret, cax=show_colorbar) else: # try to find a suitable colorbar axes: check if the plot target axes already # contains some images, then check if their colorbars exist as # axes. cax = [i.colorbar[1] for i in axes.images if i.colorbar is not None] cax = [c for c in cax if c in c.figure.axes] if cax: cax = cax[0] else: cax = None axes.figure.colorbar(ret, cax=cax, ax=axes) axes.figure.canvas.draw() return ret
[ "def", "imshow", "(", "self", ",", "*", "args", ",", "show_crosshair", "=", "True", ",", "show_mask", "=", "True", ",", "show_qscale", "=", "True", ",", "axes", "=", "None", ",", "invalid_color", "=", "'black'", ",", "mask_opacity", "=", "0.8", ",", "show_colorbar", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "'aspect'", "not", "in", "kwargs", ":", "kwargs", "[", "'aspect'", "]", "=", "'equal'", "if", "'interpolation'", "not", "in", "kwargs", ":", "kwargs", "[", "'interpolation'", "]", "=", "'nearest'", "if", "'origin'", "not", "in", "kwargs", ":", "kwargs", "[", "'origin'", "]", "=", "'upper'", "if", "show_qscale", ":", "ymin", ",", "xmin", "=", "self", ".", "pixel_to_q", "(", "0", ",", "0", ")", "ymax", ",", "xmax", "=", "self", ".", "pixel_to_q", "(", "*", "self", ".", "shape", ")", "if", "kwargs", "[", "'origin'", "]", ".", "upper", "(", ")", "==", "'UPPER'", ":", "kwargs", "[", "'extent'", "]", "=", "[", "xmin", ",", "xmax", ",", "-", "ymax", ",", "-", "ymin", "]", "else", ":", "kwargs", "[", "'extent'", "]", "=", "[", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", "]", "bcx", "=", "0", "bcy", "=", "0", "else", ":", "bcx", "=", "self", ".", "header", ".", "beamcenterx", "bcy", "=", "self", ".", "header", ".", "beamcentery", "xmin", "=", "0", "xmax", "=", "self", ".", "shape", "[", "1", "]", "ymin", "=", "0", "ymax", "=", "self", ".", "shape", "[", "0", "]", "if", "kwargs", "[", "'origin'", "]", ".", "upper", "(", ")", "==", "'UPPER'", ":", "kwargs", "[", "'extent'", "]", "=", "[", "0", ",", "self", ".", "shape", "[", "1", "]", ",", "self", ".", "shape", "[", "0", "]", ",", "0", "]", "else", ":", "kwargs", "[", "'extent'", "]", "=", "[", "0", ",", "self", ".", "shape", "[", "1", "]", ",", "0", ",", "self", ".", "shape", "[", "0", "]", "]", "if", "axes", "is", "None", ":", "axes", "=", "plt", ".", "gca", "(", ")", "ret", "=", "axes", ".", "imshow", "(", "self", ".", "intensity", ",", "*", "*", "kwargs", ")", "if", "show_mask", ":", "# workaround: because of the colour-scaling we do here, full one and", "# full zero masks look the SAME, i.e. all the image is shaded.", "# Thus if we have a fully unmasked matrix, skip this section.", "# This also conserves memory.", "if", "(", "self", ".", "mask", "==", "0", ")", ".", "sum", "(", ")", ":", "# there are some masked pixels", "# we construct another representation of the mask, where the masked pixels are 1.0, and the", "# unmasked ones will be np.nan. They will thus be not rendered.", "mf", "=", "np", ".", "ones", "(", "self", ".", "mask", ".", "shape", ",", "np", ".", "float", ")", "mf", "[", "self", ".", "mask", "!=", "0", "]", "=", "np", ".", "nan", "kwargs", "[", "'cmap'", "]", "=", "matplotlib", ".", "cm", ".", "gray_r", "kwargs", "[", "'alpha'", "]", "=", "mask_opacity", "kwargs", "[", "'norm'", "]", "=", "matplotlib", ".", "colors", ".", "Normalize", "(", ")", "axes", ".", "imshow", "(", "mf", ",", "*", "*", "kwargs", ")", "if", "show_crosshair", ":", "ax", "=", "axes", ".", "axis", "(", ")", "# save zoom state", "axes", ".", "plot", "(", "[", "xmin", ",", "xmax", "]", ",", "[", "bcy", "]", "*", "2", ",", "'w-'", ")", "axes", ".", "plot", "(", "[", "bcx", "]", "*", "2", ",", "[", "ymin", ",", "ymax", "]", ",", "'w-'", ")", "axes", ".", "axis", "(", "ax", ")", "# restore zoom state", "axes", ".", "set_facecolor", "(", "invalid_color", ")", "if", "show_colorbar", ":", "if", "isinstance", "(", "show_colorbar", ",", "matplotlib", ".", "axes", ".", "Axes", ")", ":", "axes", ".", "figure", ".", "colorbar", "(", "ret", ",", "cax", "=", "show_colorbar", ")", "else", ":", "# try to find a suitable colorbar axes: check if the plot target axes already", "# contains some images, then check if their colorbars exist as", "# axes.", "cax", "=", "[", "i", ".", "colorbar", "[", "1", "]", "for", "i", "in", "axes", ".", "images", "if", "i", ".", "colorbar", "is", "not", "None", "]", "cax", "=", "[", "c", "for", "c", "in", "cax", "if", "c", "in", "c", ".", "figure", ".", "axes", "]", "if", "cax", ":", "cax", "=", "cax", "[", "0", "]", "else", ":", "cax", "=", "None", "axes", ".", "figure", ".", "colorbar", "(", "ret", ",", "cax", "=", "cax", ",", "ax", "=", "axes", ")", "axes", ".", "figure", ".", "canvas", ".", "draw", "(", ")", "return", "ret" ]
Plot the matrix (imshow) Keyword arguments [and their default values]: show_crosshair [True]: if a cross-hair marking the beam position is to be plotted. show_mask [True]: if the mask is to be plotted. show_qscale [True]: if the horizontal and vertical axes are to be scaled into q axes [None]: the axes into which the image should be plotted. If None, defaults to the currently active axes (returned by plt.gca()) invalid_color ['black']: the color for invalid (NaN or infinite) pixels mask_opacity [0.8]: the opacity of the overlaid mask (1 is fully opaque, 0 is fully transparent) show_colorbar [True]: if a colorbar is to be added. Can be a boolean value (True or False) or an instance of matplotlib.axes.Axes, into which the color bar should be drawn. All other keywords are forwarded to plt.imshow() or matplotlib.Axes.imshow() Returns: the image instance returned by imshow()
[ "Plot", "the", "matrix", "(", "imshow", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L109-L201
awacha/sastool
sastool/classes2/exposure.py
Exposure.radial_average
def radial_average(self, qrange=None, pixel=False, returnmask=False, errorpropagation=3, abscissa_errorpropagation=3, raw_result=False) -> Curve: """Do a radial averaging Inputs: qrange: the q-range. If None, auto-determine. If 'linear', auto-determine with linear spacing (same as None). If 'log', auto-determine with log10 spacing. pixel: do a pixel-integration (instead of q) returnmask: if the effective mask matrix is to be returned. errorpropagation: the type of error propagation (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) abscissa_errorpropagation: the type of the error propagation in the abscissa (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) raw_result: if True, do not pack the result in a SASCurve, return the individual np.ndarrays. Outputs: the one-dimensional curve as an instance of SASCurve (if pixel is False) or SASPixelCurve (if pixel is True), if raw_result was True. otherwise the q (or pixel), dq (or dpixel), I, dI, area vectors the mask matrix (if returnmask was True) """ retmask = None if isinstance(qrange, str): if qrange == 'linear': qrange = None autoqrange_linear = True elif qrange == 'log': qrange = None autoqrange_linear = False else: raise ValueError( 'Value given for qrange (''%s'') not understood.' % qrange) else: autoqrange_linear = True # whatever if pixel: abscissa_kind = 3 else: abscissa_kind = 0 res = radint_fullq_errorprop(self.intensity, self.error, self.header.wavelength.val, self.header.wavelength.err, self.header.distance.val, self.header.distance.err, self.header.pixelsizey.val, self.header.pixelsizex.val, self.header.beamcentery.val, self.header.beamcentery.err, self.header.beamcenterx.val, self.header.beamcenterx.err, (self.mask == 0).astype(np.uint8), qrange, returnmask=returnmask, errorpropagation=errorpropagation, autoqrange_linear=autoqrange_linear, abscissa_kind=abscissa_kind, abscissa_errorpropagation=abscissa_errorpropagation) q, dq, I, E, area = res[:5] if not raw_result: c = Curve(q, I, E, dq) if returnmask: return c, res[5] else: return c else: if returnmask: return q, dq, I, E, area, res[5] else: return q, dq, I, E, area
python
def radial_average(self, qrange=None, pixel=False, returnmask=False, errorpropagation=3, abscissa_errorpropagation=3, raw_result=False) -> Curve: """Do a radial averaging Inputs: qrange: the q-range. If None, auto-determine. If 'linear', auto-determine with linear spacing (same as None). If 'log', auto-determine with log10 spacing. pixel: do a pixel-integration (instead of q) returnmask: if the effective mask matrix is to be returned. errorpropagation: the type of error propagation (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) abscissa_errorpropagation: the type of the error propagation in the abscissa (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) raw_result: if True, do not pack the result in a SASCurve, return the individual np.ndarrays. Outputs: the one-dimensional curve as an instance of SASCurve (if pixel is False) or SASPixelCurve (if pixel is True), if raw_result was True. otherwise the q (or pixel), dq (or dpixel), I, dI, area vectors the mask matrix (if returnmask was True) """ retmask = None if isinstance(qrange, str): if qrange == 'linear': qrange = None autoqrange_linear = True elif qrange == 'log': qrange = None autoqrange_linear = False else: raise ValueError( 'Value given for qrange (''%s'') not understood.' % qrange) else: autoqrange_linear = True # whatever if pixel: abscissa_kind = 3 else: abscissa_kind = 0 res = radint_fullq_errorprop(self.intensity, self.error, self.header.wavelength.val, self.header.wavelength.err, self.header.distance.val, self.header.distance.err, self.header.pixelsizey.val, self.header.pixelsizex.val, self.header.beamcentery.val, self.header.beamcentery.err, self.header.beamcenterx.val, self.header.beamcenterx.err, (self.mask == 0).astype(np.uint8), qrange, returnmask=returnmask, errorpropagation=errorpropagation, autoqrange_linear=autoqrange_linear, abscissa_kind=abscissa_kind, abscissa_errorpropagation=abscissa_errorpropagation) q, dq, I, E, area = res[:5] if not raw_result: c = Curve(q, I, E, dq) if returnmask: return c, res[5] else: return c else: if returnmask: return q, dq, I, E, area, res[5] else: return q, dq, I, E, area
[ "def", "radial_average", "(", "self", ",", "qrange", "=", "None", ",", "pixel", "=", "False", ",", "returnmask", "=", "False", ",", "errorpropagation", "=", "3", ",", "abscissa_errorpropagation", "=", "3", ",", "raw_result", "=", "False", ")", "->", "Curve", ":", "retmask", "=", "None", "if", "isinstance", "(", "qrange", ",", "str", ")", ":", "if", "qrange", "==", "'linear'", ":", "qrange", "=", "None", "autoqrange_linear", "=", "True", "elif", "qrange", "==", "'log'", ":", "qrange", "=", "None", "autoqrange_linear", "=", "False", "else", ":", "raise", "ValueError", "(", "'Value given for qrange ('", "'%s'", "') not understood.'", "%", "qrange", ")", "else", ":", "autoqrange_linear", "=", "True", "# whatever", "if", "pixel", ":", "abscissa_kind", "=", "3", "else", ":", "abscissa_kind", "=", "0", "res", "=", "radint_fullq_errorprop", "(", "self", ".", "intensity", ",", "self", ".", "error", ",", "self", ".", "header", ".", "wavelength", ".", "val", ",", "self", ".", "header", ".", "wavelength", ".", "err", ",", "self", ".", "header", ".", "distance", ".", "val", ",", "self", ".", "header", ".", "distance", ".", "err", ",", "self", ".", "header", ".", "pixelsizey", ".", "val", ",", "self", ".", "header", ".", "pixelsizex", ".", "val", ",", "self", ".", "header", ".", "beamcentery", ".", "val", ",", "self", ".", "header", ".", "beamcentery", ".", "err", ",", "self", ".", "header", ".", "beamcenterx", ".", "val", ",", "self", ".", "header", ".", "beamcenterx", ".", "err", ",", "(", "self", ".", "mask", "==", "0", ")", ".", "astype", "(", "np", ".", "uint8", ")", ",", "qrange", ",", "returnmask", "=", "returnmask", ",", "errorpropagation", "=", "errorpropagation", ",", "autoqrange_linear", "=", "autoqrange_linear", ",", "abscissa_kind", "=", "abscissa_kind", ",", "abscissa_errorpropagation", "=", "abscissa_errorpropagation", ")", "q", ",", "dq", ",", "I", ",", "E", ",", "area", "=", "res", "[", ":", "5", "]", "if", "not", "raw_result", ":", "c", "=", "Curve", "(", "q", ",", "I", ",", "E", ",", "dq", ")", "if", "returnmask", ":", "return", "c", ",", "res", "[", "5", "]", "else", ":", "return", "c", "else", ":", "if", "returnmask", ":", "return", "q", ",", "dq", ",", "I", ",", "E", ",", "area", ",", "res", "[", "5", "]", "else", ":", "return", "q", ",", "dq", ",", "I", ",", "E", ",", "area" ]
Do a radial averaging Inputs: qrange: the q-range. If None, auto-determine. If 'linear', auto-determine with linear spacing (same as None). If 'log', auto-determine with log10 spacing. pixel: do a pixel-integration (instead of q) returnmask: if the effective mask matrix is to be returned. errorpropagation: the type of error propagation (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) abscissa_errorpropagation: the type of the error propagation in the abscissa (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) raw_result: if True, do not pack the result in a SASCurve, return the individual np.ndarrays. Outputs: the one-dimensional curve as an instance of SASCurve (if pixel is False) or SASPixelCurve (if pixel is True), if raw_result was True. otherwise the q (or pixel), dq (or dpixel), I, dI, area vectors the mask matrix (if returnmask was True)
[ "Do", "a", "radial", "averaging" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L257-L320
awacha/sastool
sastool/classes2/exposure.py
Exposure.mask_negative
def mask_negative(self): """Extend the mask with the image elements where the intensity is negative.""" self.mask = np.logical_and(self.mask, ~(self.intensity < 0))
python
def mask_negative(self): """Extend the mask with the image elements where the intensity is negative.""" self.mask = np.logical_and(self.mask, ~(self.intensity < 0))
[ "def", "mask_negative", "(", "self", ")", ":", "self", ".", "mask", "=", "np", ".", "logical_and", "(", "self", ".", "mask", ",", "~", "(", "self", ".", "intensity", "<", "0", ")", ")" ]
Extend the mask with the image elements where the intensity is negative.
[ "Extend", "the", "mask", "with", "the", "image", "elements", "where", "the", "intensity", "is", "negative", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L331-L333
awacha/sastool
sastool/classes2/exposure.py
Exposure.mask_nan
def mask_nan(self): """Extend the mask with the image elements where the intensity is NaN.""" self.mask = np.logical_and(self.mask, ~(np.isnan(self.intensity)))
python
def mask_nan(self): """Extend the mask with the image elements where the intensity is NaN.""" self.mask = np.logical_and(self.mask, ~(np.isnan(self.intensity)))
[ "def", "mask_nan", "(", "self", ")", ":", "self", ".", "mask", "=", "np", ".", "logical_and", "(", "self", ".", "mask", ",", "~", "(", "np", ".", "isnan", "(", "self", ".", "intensity", ")", ")", ")" ]
Extend the mask with the image elements where the intensity is NaN.
[ "Extend", "the", "mask", "with", "the", "image", "elements", "where", "the", "intensity", "is", "NaN", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L335-L337
awacha/sastool
sastool/classes2/exposure.py
Exposure.mask_nonfinite
def mask_nonfinite(self): """Extend the mask with the image elements where the intensity is NaN.""" self.mask = np.logical_and(self.mask, (np.isfinite(self.intensity)))
python
def mask_nonfinite(self): """Extend the mask with the image elements where the intensity is NaN.""" self.mask = np.logical_and(self.mask, (np.isfinite(self.intensity)))
[ "def", "mask_nonfinite", "(", "self", ")", ":", "self", ".", "mask", "=", "np", ".", "logical_and", "(", "self", ".", "mask", ",", "(", "np", ".", "isfinite", "(", "self", ".", "intensity", ")", ")", ")" ]
Extend the mask with the image elements where the intensity is NaN.
[ "Extend", "the", "mask", "with", "the", "image", "elements", "where", "the", "intensity", "is", "NaN", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/classes2/exposure.py#L339-L341
awacha/sastool
sastool/io/credo_saxsctrl/header.py
Header.distance
def distance(self) -> ErrorValue: """Sample-to-detector distance""" if 'DistCalibrated' in self._data: dist = self._data['DistCalibrated'] else: dist = self._data["Dist"] if 'DistCalibratedError' in self._data: disterr = self._data['DistCalibratedError'] elif 'DistError' in self._data: disterr = self._data['DistError'] else: disterr = 0.0 return ErrorValue(dist, disterr)
python
def distance(self) -> ErrorValue: """Sample-to-detector distance""" if 'DistCalibrated' in self._data: dist = self._data['DistCalibrated'] else: dist = self._data["Dist"] if 'DistCalibratedError' in self._data: disterr = self._data['DistCalibratedError'] elif 'DistError' in self._data: disterr = self._data['DistError'] else: disterr = 0.0 return ErrorValue(dist, disterr)
[ "def", "distance", "(", "self", ")", "->", "ErrorValue", ":", "if", "'DistCalibrated'", "in", "self", ".", "_data", ":", "dist", "=", "self", ".", "_data", "[", "'DistCalibrated'", "]", "else", ":", "dist", "=", "self", ".", "_data", "[", "\"Dist\"", "]", "if", "'DistCalibratedError'", "in", "self", ".", "_data", ":", "disterr", "=", "self", ".", "_data", "[", "'DistCalibratedError'", "]", "elif", "'DistError'", "in", "self", ".", "_data", ":", "disterr", "=", "self", ".", "_data", "[", "'DistError'", "]", "else", ":", "disterr", "=", "0.0", "return", "ErrorValue", "(", "dist", ",", "disterr", ")" ]
Sample-to-detector distance
[ "Sample", "-", "to", "-", "detector", "distance" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_saxsctrl/header.py#L114-L126
awacha/sastool
sastool/io/credo_saxsctrl/header.py
Header.temperature
def temperature(self) -> Optional[ErrorValue]: """Sample temperature""" try: return ErrorValue(self._data['Temperature'], self._data.setdefault('TemperatureError', 0.0)) except KeyError: return None
python
def temperature(self) -> Optional[ErrorValue]: """Sample temperature""" try: return ErrorValue(self._data['Temperature'], self._data.setdefault('TemperatureError', 0.0)) except KeyError: return None
[ "def", "temperature", "(", "self", ")", "->", "Optional", "[", "ErrorValue", "]", ":", "try", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'Temperature'", "]", ",", "self", ".", "_data", ".", "setdefault", "(", "'TemperatureError'", ",", "0.0", ")", ")", "except", "KeyError", ":", "return", "None" ]
Sample temperature
[ "Sample", "temperature" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_saxsctrl/header.py#L136-L141