code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
182
url
stringlengths
46
251
license
stringclasses
4 values
def nnCostFunction(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda): length = nn_params.shape[0] # theta的中长度 # 还原theta1和theta2 Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1) Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1) # np.savetxt("Theta1.csv",Theta1,delimiter=',') m = X.shape[0] class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系 # 映射y for i in range(num_labels): class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值 '''去掉theta1和theta2的第一列,因为正则化时从1开始''' Theta1_colCount = Theta1.shape[1] Theta1_x = Theta1[:,1:Theta1_colCount] Theta2_colCount = Theta2.shape[1] Theta2_x = Theta2[:,1:Theta2_colCount] # 正则化向theta^2 term = np.dot(np.transpose(np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1)))),np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1)))) '''正向传播,每次需要补上一列1的偏置bias''' a1 = np.hstack((np.ones((m,1)),X)) z2 = np.dot(a1,np.transpose(Theta1)) a2 = sigmoid(z2) a2 = np.hstack((np.ones((m,1)),a2)) z3 = np.dot(a2,np.transpose(Theta2)) h = sigmoid(z3) '''代价''' J = -(np.dot(np.transpose(class_y.reshape(-1,1)),np.log(h.reshape(-1,1)))+np.dot(np.transpose(1-class_y.reshape(-1,1)),np.log(1-h.reshape(-1,1)))-Lambda*term/2)/m #temp1 = (h.reshape(-1,1)-class_y.reshape(-1,1)) #temp2 = (temp1**2).sum() #J = 1/(2*m)*temp2 return np.ravel(J)
去掉theta1和theta2的第一列,因为正则化时从1开始
nnCostFunction
python
lawlite19/MachineLearning_Python
NeuralNetwok/NeuralNetwork.py
https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py
MIT
def checkGradient(Lambda = 0): '''构造一个小型的神经网络验证,因为数值法计算梯度很浪费时间,而且验证正确后之后就不再需要验证了''' input_layer_size = 3 hidden_layer_size = 5 num_labels = 3 m = 5 initial_Theta1 = debugInitializeWeights(input_layer_size,hidden_layer_size); initial_Theta2 = debugInitializeWeights(hidden_layer_size,num_labels) X = debugInitializeWeights(input_layer_size-1,m) y = np.transpose(np.mod(np.arange(1,m+1), num_labels))# 初始化y y = y.reshape(-1,1) nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta '''BP求出梯度''' grad = nnGradient(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, Lambda) '''使用数值法计算梯度''' num_grad = np.zeros((nn_params.shape[0])) step = np.zeros((nn_params.shape[0])) e = 1e-4 for i in range(nn_params.shape[0]): step[i] = e loss1 = nnCostFunction(nn_params-step.reshape(-1,1), input_layer_size, hidden_layer_size, num_labels, X, y, Lambda) loss2 = nnCostFunction(nn_params+step.reshape(-1,1), input_layer_size, hidden_layer_size, num_labels, X, y, Lambda) num_grad[i] = (loss2-loss1)/(2*e) step[i]=0 # 显示两列比较 res = np.hstack((num_grad.reshape(-1,1),grad.reshape(-1,1))) print("检查梯度的结果,第一列为数值法计算得到的,第二列为BP得到的:") print (res)
构造一个小型的神经网络验证,因为数值法计算梯度很浪费时间,而且验证正确后之后就不再需要验证了
checkGradient
python
lawlite19/MachineLearning_Python
NeuralNetwok/NeuralNetwork.py
https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py
MIT
def predict(Theta1,Theta2,X): m = X.shape[0] num_labels = Theta2.shape[0] #p = np.zeros((m,1)) '''正向传播,预测结果''' X = np.hstack((np.ones((m,1)),X)) h1 = sigmoid(np.dot(X,np.transpose(Theta1))) h1 = np.hstack((np.ones((m,1)),h1)) h2 = sigmoid(np.dot(h1,np.transpose(Theta2))) ''' 返回h中每一行最大值所在的列号 - np.max(h, axis=1)返回h中每一行的最大值(是某个数字的最大概率) - 最后where找到的最大概率所在的列号(列号即是对应的数字) ''' #np.savetxt("h2.csv",h2,delimiter=',') p = np.array(np.where(h2[0,:] == np.max(h2, axis=1)[0])) for i in np.arange(1, m): t = np.array(np.where(h2[i,:] == np.max(h2, axis=1)[i])) p = np.vstack((p,t)) return p
正向传播,预测结果
predict
python
lawlite19/MachineLearning_Python
NeuralNetwok/NeuralNetwork.py
https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py
MIT
def compiler_archs(compiler: str): """Discovers what platforms the given compiler supports; intended for MacOS use""" import tempfile import subprocess print(f"Compiler: {compiler}") arch_flags = [] # see also the architectures tested for in .github/workflows/build-and-upload.yml for arch in ['x86_64', 'arm64', 'arm64e']: with tempfile.TemporaryDirectory() as tmpdir: cpp = Path(tmpdir) / 'test.cxx'; cpp.write_text('int main() {return 0;}\n') out = Path(tmpdir) / 'a.out' p = subprocess.run([compiler, "-arch", arch, str(cpp), "-o", str(out)], capture_output=True) if p.returncode == 0: arch_flags += ['-arch', arch] print(f"Discovered {compiler} arch flags: {arch_flags}") return arch_flags
Discovers what platforms the given compiler supports; intended for MacOS use
compiler_archs
python
plasma-umass/scalene
setup.py
https://github.com/plasma-umass/scalene/blob/master/setup.py
Apache-2.0
def extra_compile_args(): """Returns extra compiler args for platform.""" if sys.platform == 'win32': return ['/std:c++14'] # for Visual Studio C++ return ['-std=c++14']
Returns extra compiler args for platform.
extra_compile_args
python
plasma-umass/scalene
setup.py
https://github.com/plasma-umass/scalene/blob/master/setup.py
Apache-2.0
def dll_suffix(): """Returns the file suffix ("extension") of a DLL""" if (sys.platform == 'win32'): return '.dll' if (sys.platform == 'darwin'): return '.dylib' return '.so'
Returns the file suffix ("extension") of a DLL
dll_suffix
python
plasma-umass/scalene
setup.py
https://github.com/plasma-umass/scalene/blob/master/setup.py
Apache-2.0
def read_file(name): """Returns a file's contents""" with open(path.join(path.dirname(__file__), name), encoding="utf-8") as f: return f.read()
Returns a file's contents
read_file
python
plasma-umass/scalene
setup.py
https://github.com/plasma-umass/scalene/blob/master/setup.py
Apache-2.0
def output_profile_line( self, json: ScaleneJSON, fname: Filename, line_no: LineNumber, line: SyntaxLine, console: Console, tbl: Table, stats: ScaleneStatistics, profile_this_code: Callable[[Filename, LineNumber], bool], force_print: bool = False, suppress_lineno_print: bool = False, is_function_summary: bool = False, profile_memory: bool = False, reduced_profile: bool = False, ) -> bool: """Print at most one line of the profile (true == printed one).""" obj = json.output_profile_line( fname=fname, fname_print=fname, line_no=line_no, line=str(line), stats=stats, profile_this_code=profile_this_code, force_print=force_print, ) if not obj: return False if -1 < obj["n_peak_mb"] < 1: # Don't print out "-0" or anything below 1. obj["n_peak_mb"] = 0 # Finally, print results. n_cpu_percent_c_str: str = ( "" if obj["n_cpu_percent_c"] < 1 else f"{obj['n_cpu_percent_c']:5.0f}%" ) n_gpu_percent_str: str = ( "" if obj["n_gpu_percent"] < 1 else f"{obj['n_gpu_percent']:3.0f}%" ) n_cpu_percent_python_str: str = ( "" if obj["n_cpu_percent_python"] < 1 else f"{obj['n_cpu_percent_python']:5.0f}%" ) n_growth_mem_str = "" if obj["n_peak_mb"] < 1024: n_growth_mem_str = ( "" if (not obj["n_peak_mb"] and not obj["n_usage_fraction"]) else f"{obj['n_peak_mb']:5.0f}M" ) else: n_growth_mem_str = ( "" if (not obj["n_peak_mb"] and not obj["n_usage_fraction"]) else f"{(obj['n_peak_mb'] / 1024):5.2f}G" ) # Only report utilization where there is more than 1% CPU total usage. sys_str: str = ( "" if obj["n_sys_percent"] < 1 else f"{obj['n_sys_percent']:4.0f}%" ) if not is_function_summary: print_line_no = "" if suppress_lineno_print else str(line_no) else: print_line_no = ( "" if fname not in stats.firstline_map else str(stats.firstline_map[fname]) ) if profile_memory: spark_str: str = "" # Scale the sparkline by the usage fraction. samples = obj["memory_samples"] # Randomly downsample to ScaleneOutput.max_sparkline_len_line. if len(samples) > ScaleneOutput.max_sparkline_len_line: random_samples = sorted( random.sample( samples, ScaleneOutput.max_sparkline_len_line ) ) else: random_samples = samples sparkline_samples = [ random_samples[i][1] * obj["n_usage_fraction"] for i in range(len(random_samples)) ] if random_samples: _, _, spark_str = sparkline.generate( sparkline_samples, 0, stats.max_footprint ) # Red highlight ncpps: Any = "" ncpcs: Any = "" nufs: Any = "" ngpus: Any = "" n_usage_fraction_str: str = ( "" if obj["n_usage_fraction"] < 0.01 else f"{(100 * obj['n_usage_fraction']):4.0f}%" ) if ( obj["n_usage_fraction"] >= self.highlight_percentage or ( obj["n_cpu_percent_c"] + obj["n_cpu_percent_python"] + obj["n_gpu_percent"] ) >= self.highlight_percentage ): ncpps = Text.assemble( (n_cpu_percent_python_str, self.highlight_color) ) ncpcs = Text.assemble( (n_cpu_percent_c_str, self.highlight_color) ) nufs = Text.assemble( (spark_str + n_usage_fraction_str, self.highlight_color) ) ngpus = Text.assemble( (n_gpu_percent_str, self.highlight_color) ) else: ncpps = n_cpu_percent_python_str ncpcs = n_cpu_percent_c_str ngpus = n_gpu_percent_str nufs = spark_str + n_usage_fraction_str if reduced_profile and not ncpps + ncpcs + nufs + ngpus: return False n_python_fraction_str: str = ( "" if obj["n_python_fraction"] < 0.01 else f"{(obj['n_python_fraction'] * 100):4.0f}%" ) n_copy_mb_s_str: str = ( "" if obj["n_copy_mb_s"] < 0.5 else f"{obj['n_copy_mb_s']:6.0f}" ) if self.gpu: tbl.add_row( print_line_no, ncpps, # n_cpu_percent_python_str, ncpcs, # n_cpu_percent_c_str, sys_str, ngpus, n_python_fraction_str, n_growth_mem_str, nufs, # spark_str + n_usage_fraction_str, n_copy_mb_s_str, line, ) else: tbl.add_row( print_line_no, ncpps, # n_cpu_percent_python_str, ncpcs, # n_cpu_percent_c_str, sys_str, n_python_fraction_str, n_growth_mem_str, nufs, # spark_str + n_usage_fraction_str, n_copy_mb_s_str, line, ) else: # Red highlight if ( obj["n_cpu_percent_c"] + obj["n_cpu_percent_python"] + obj["n_gpu_percent"] ) >= self.highlight_percentage: ncpps = Text.assemble( (n_cpu_percent_python_str, self.highlight_color) ) ncpcs = Text.assemble( (n_cpu_percent_c_str, self.highlight_color) ) ngpus = Text.assemble( (n_gpu_percent_str, self.highlight_color) ) else: ncpps = n_cpu_percent_python_str ncpcs = n_cpu_percent_c_str ngpus = n_gpu_percent_str if reduced_profile and not ncpps + ncpcs + ngpus: return False if self.gpu: tbl.add_row( print_line_no, ncpps, # n_cpu_percent_python_str, ncpcs, # n_cpu_percent_c_str, sys_str, ngpus, # n_gpu_percent_str line, ) else: tbl.add_row( print_line_no, ncpps, # n_cpu_percent_python_str, ncpcs, # n_cpu_percent_c_str, sys_str, line, ) return True
Print at most one line of the profile (true == printed one).
output_profile_line
python
plasma-umass/scalene
scalene/scalene_output.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_output.py
Apache-2.0
def output_profiles( self, column_width: int, stats: ScaleneStatistics, pid: int, profile_this_code: Callable[[Filename, LineNumber], bool], python_alias_dir: Path, program_path: Filename, program_args: Optional[List[str]], profile_memory: bool = True, reduced_profile: bool = False, ) -> bool: """Write the profile out.""" # Get the children's stats, if any. json = ScaleneJSON() json.gpu = self.gpu if not pid: stats.merge_stats(python_alias_dir) # If we've collected any samples, dump them. if ( not stats.total_cpu_samples and not stats.total_memory_malloc_samples and not stats.total_memory_free_samples ): # Nothing to output. return False # Collect all instrumented filenames. all_instrumented_files: List[Filename] = list( set( list(stats.cpu_samples_python.keys()) + list(stats.cpu_samples_c.keys()) + list(stats.memory_free_samples.keys()) + list(stats.memory_malloc_samples.keys()) ) ) if not all_instrumented_files: # We didn't collect samples in source files. return False mem_usage_line: Union[Text, str] = "" growth_rate = 0.0 if profile_memory: samples = stats.memory_footprint_samples if len(samples) > 0: # Randomly downsample samples if len(samples) > ScaleneOutput.max_sparkline_len_file: random_samples = sorted( random.sample( samples, ScaleneOutput.max_sparkline_len_file ) ) else: random_samples = samples sparkline_samples = [item[1] for item in random_samples] # Output a sparkline as a summary of memory usage over time. _, _, spark_str = sparkline.generate( sparkline_samples[: ScaleneOutput.max_sparkline_len_file], 0, stats.max_footprint, ) # Compute growth rate (slope), between 0 and 1. if stats.allocation_velocity[1] > 0: growth_rate = ( 100.0 * stats.allocation_velocity[0] / stats.allocation_velocity[1] ) mem_usage_line = Text.assemble( "Memory usage: ", ((spark_str, self.memory_color)), ( f" (max: {ScaleneJSON.memory_consumed_str(stats.max_footprint)}, growth rate: {growth_rate:3.0f}%)\n" ), ) null = tempfile.TemporaryFile(mode="w+") console = Console( width=column_width, record=True, force_terminal=True, file=null, force_jupyter=False, ) # Build a list of files we will actually report on. report_files: List[Filename] = [] # Sort in descending order of CPU cycles, and then ascending order by filename for fname in sorted( all_instrumented_files, key=lambda f: (-(stats.cpu_samples[f]), f), ): fname = Filename(fname) try: percent_cpu_time = ( 100 * stats.cpu_samples[fname] / stats.total_cpu_samples ) except ZeroDivisionError: percent_cpu_time = 0 # Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs. if ( stats.malloc_samples[fname] < ScaleneJSON.malloc_threshold and percent_cpu_time < ScaleneJSON.cpu_percent_threshold ): continue report_files.append(fname) # Don't actually output the profile if we are a child process. # Instead, write info to disk for the main process to collect. if pid: stats.output_stats(pid, python_alias_dir) return True if not report_files: return False for fname in report_files: # If the file was actually a Jupyter (IPython) cell, # restore its name, as in "[12]". fname_print = fname import re if result := re.match("_ipython-input-([0-9]+)-.*", fname_print): fname_print = Filename(f"[{result.group(1)}]") # Print header. percent_cpu_time = ( (100 * stats.cpu_samples[fname] / stats.total_cpu_samples) if stats.total_cpu_samples else 0 ) new_title = mem_usage_line + ( f"{fname_print}: % of time = {percent_cpu_time:6.2f}% ({ScaleneJSON.time_consumed_str(percent_cpu_time / 100.0 * stats.elapsed_time * 1e3)}) out of {ScaleneJSON.time_consumed_str(stats.elapsed_time * 1e3)}." ) # Only display total memory usage once. mem_usage_line = "" tbl = Table( box=box.MINIMAL_HEAVY_HEAD, title=new_title, collapse_padding=True, width=column_width - 1, ) tbl.add_column( Markdown("Line", style="dim"), style="dim", justify="right", no_wrap=True, width=4, ) tbl.add_column( Markdown("Time " + "\n" + "_Python_", style="blue"), style="blue", no_wrap=True, width=6, ) tbl.add_column( Markdown("–––––– \n_native_", style="blue"), style="blue", no_wrap=True, width=6, ) tbl.add_column( Markdown("–––––– \n_system_", style="blue"), style="blue", no_wrap=True, width=6, ) if self.gpu: tbl.add_column( Markdown("–––––– \n_GPU_", style=self.gpu_color), style=self.gpu_color, no_wrap=True, width=6, ) other_columns_width = 0 # Size taken up by all columns BUT code if profile_memory: tbl.add_column( Markdown("Memory \n_Python_", style=self.memory_color), style=self.memory_color, no_wrap=True, width=7, ) tbl.add_column( Markdown("–––––– \n_peak_", style=self.memory_color), style=self.memory_color, no_wrap=True, width=6, ) tbl.add_column( Markdown( "––––––––––– \n_timeline_/%", style=self.memory_color ), style=self.memory_color, no_wrap=True, width=15, ) tbl.add_column( Markdown("Copy \n_(MB/s)_", style=self.copy_volume_color), style=self.copy_volume_color, no_wrap=True, width=6, ) other_columns_width = 75 + (6 if self.gpu else 0) else: other_columns_width = 37 + (5 if self.gpu else 0) tbl.add_column( "\n" + fname_print, width=column_width - other_columns_width, no_wrap=True, ) # Print out the the profile for the source, line by line. if fname == "<BOGUS>": continue if not fname: continue # Print out the profile for the source, line by line. full_fname = os.path.normpath(os.path.join(program_path, fname)) try: with open(full_fname, "r") as source_file: code_lines = source_file.read() except (FileNotFoundError, OSError): continue # We track whether we should put in ellipsis (for reduced profiles) # or not. did_print = True # did we print a profile line last time? # Generate syntax highlighted version for the whole file, # which we will consume a line at a time. # See https://github.com/willmcgugan/rich/discussions/965#discussioncomment-314233 syntax_highlighted = Syntax( code_lines, "python", theme="default" if self.html else "vim", line_numbers=False, code_width=None, ) capture_console = Console( width=column_width - other_columns_width, force_terminal=True, ) formatted_lines = [ SyntaxLine(segments) for segments in capture_console.render_lines( syntax_highlighted ) ] for line_no, line in enumerate(formatted_lines, start=1): old_did_print = did_print did_print = self.output_profile_line( json=json, fname=fname, line_no=LineNumber(line_no), line=line, console=console, tbl=tbl, stats=stats, profile_this_code=profile_this_code, profile_memory=profile_memory, force_print=False, suppress_lineno_print=False, is_function_summary=False, reduced_profile=reduced_profile, ) if old_did_print and not did_print: # We are skipping lines, so add an ellipsis. tbl.add_row("...") old_did_print = did_print # Potentially print a function summary. fn_stats = stats.build_function_stats(fname) print_fn_summary = False # Check CPU samples and memory samples. all_samples = set() all_samples |= set(fn_stats.cpu_samples_python.keys()) all_samples |= set(fn_stats.cpu_samples_c.keys()) all_samples |= set(fn_stats.memory_malloc_samples.keys()) all_samples |= set(fn_stats.memory_free_samples.keys()) for fn_name in all_samples: if fn_name == fname: continue print_fn_summary = True break if print_fn_summary: try: tbl.add_row(None, end_section=True) except TypeError: # rich < 9.4.0 compatibility tbl.add_row(None) txt = Text.assemble( f"function summary for {fname_print}", style="bold italic" ) if profile_memory: if self.gpu: tbl.add_row("", "", "", "", "", "", "", "", "", txt) else: tbl.add_row("", "", "", "", "", "", "", "", txt) elif self.gpu: tbl.add_row("", "", "", "", "", txt) else: tbl.add_row("", "", "", "", txt) for fn_name in sorted( fn_stats.cpu_samples_python, key=lambda k: stats.firstline_map[k], ): if fn_name == fname: continue syntax_highlighted = Syntax( fn_name, "python", theme="default" if self.html else "vim", line_numbers=False, code_width=None, ) # force print, suppress line numbers self.output_profile_line( json=json, fname=fn_name, line_no=LineNumber(1), line=syntax_highlighted, # type: ignore console=console, tbl=tbl, stats=fn_stats, profile_this_code=profile_this_code, profile_memory=profile_memory, force_print=True, suppress_lineno_print=True, is_function_summary=True, reduced_profile=reduced_profile, ) console.print(tbl) # Compute AVERAGE memory consumption. avg_mallocs: Dict[LineNumber, float] = defaultdict(float) for line_no in stats.bytei_map[fname]: n_malloc_mb = stats.memory_aggregate_footprint[fname][line_no] if count := stats.memory_malloc_count[fname][line_no]: avg_mallocs[line_no] = n_malloc_mb / count else: # Setting to n_malloc_mb addresses the edge case where this allocation is the last line executed. avg_mallocs[line_no] = n_malloc_mb avg_mallocs = OrderedDict( sorted(avg_mallocs.items(), key=itemgetter(1), reverse=True) ) # Compute (really, aggregate) PEAK memory consumption. peak_mallocs: Dict[LineNumber, float] = defaultdict(float) for line_no in stats.bytei_map[fname]: peak_mallocs[line_no] = stats.memory_max_footprint[fname][ line_no ] peak_mallocs = OrderedDict( sorted(peak_mallocs.items(), key=itemgetter(1), reverse=True) ) # Print the top N lines by AVERAGE memory consumption, as long # as they are above some threshold MB in size. self.output_top_memory( "Top AVERAGE memory consumption, by line:", console, avg_mallocs, ) # Print the top N lines by PEAK memory consumption, as long # as they are above some threshold MB in size. self.output_top_memory( "Top PEAK memory consumption, by line:", console, peak_mallocs ) # Only report potential leaks if the allocation velocity (growth rate) is above some threshold. leaks = ScaleneLeakAnalysis.compute_leaks( growth_rate, stats, avg_mallocs, fname ) if len(leaks) > 0: # Report in descending order by least likelihood for leak in sorted(leaks, key=itemgetter(1), reverse=True): output_str = f"Possible memory leak identified at line {str(leak[0])} (estimated likelihood: {(leak[1] * 100):3.0f}%, velocity: {(leak[2] / stats.elapsed_time):3.0f} MB/s)" console.print(output_str) if self.html: # Write HTML file. md = Markdown( "generated by the [scalene](https://github.com/plasma-umass/scalene) profiler" ) console.print(md) if not self.output_file: self.output_file = "/dev/stdout" console.save_html(self.output_file, clear=False) elif self.output_file: # Don't output styles to text file. console.save_text(self.output_file, styles=False, clear=False) else: # No output file specified: write to stdout. sys.stdout.write(console.export_text(styles=True)) return True
Write the profile out.
output_profiles
python
plasma-umass/scalene
scalene/scalene_output.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_output.py
Apache-2.0
def replacement_process_join(self, timeout: float = -1) -> None: # type: ignore """ A drop-in replacement for multiprocessing.Process.join that periodically yields to handle signals """ # print(multiprocessing.process.active_children()) if minor_version >= 7: self._check_closed() assert self._parent_pid == os.getpid(), "can only join a child process" assert self._popen is not None, "can only join a started process" tident = threading.get_ident() if timeout < 0: interval = sys.getswitchinterval() else: interval = min(timeout, sys.getswitchinterval()) start_time = time.perf_counter() while True: scalene.set_thread_sleeping(tident) res = self._popen.wait(interval) if res is not None: from multiprocessing.process import _children # type: ignore scalene.remove_child_pid(self.pid) _children.discard(self) return scalene.reset_thread_sleeping(tident) # I think that this should be timeout-- # Interval is the sleep time per-tic, # but timeout determines whether it returns if timeout != -1: end_time = time.perf_counter() if end_time - start_time >= timeout: from multiprocessing.process import ( # type: ignore _children, ) _children.discard(self) return
A drop-in replacement for multiprocessing.Process.join that periodically yields to handle signals
replacement_pjoin.replacement_process_join
python
plasma-umass/scalene
scalene/replacement_pjoin.py
https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_pjoin.py
Apache-2.0
def is_native(package_name: str) -> bool: """ Returns whether a package is native or not. """ result = False try: package = importlib.import_module(package_name) if package.__file__: package_dir = os.path.dirname(package.__file__) for root, dirs, files in os.walk(package_dir): for filename in files: if filename.endswith(".so") or filename.endswith( ".pyd" ): return True result = False except ImportError: # This module is not installed or something else went wrong; fail gracefully. result = False except AttributeError: # No __file__, meaning it's built-in. Let's call it native. result = True except TypeError: # __file__ is there, but empty (os.path.dirname() returns TypeError). Let's call it native. result = True return result
Returns whether a package is native or not.
is_native
python
plasma-umass/scalene
scalene/scalene_analysis.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py
Apache-2.0
def get_imported_modules(source: str) -> List[str]: """ Extracts a list of imported modules from the given source code. Parameters: - source (str): The source code to be analyzed. Returns: - imported_modules (list[str]): A list of import statements. """ # Parse the source code into an abstract syntax tree source = ScaleneAnalysis.strip_magic_line(source) tree = ast.parse(source) imported_modules = [] # Iterate through the nodes in the syntax tree for node in ast.walk(tree): # Check if the node represents an import statement if isinstance(node, (ast.Import, ast.ImportFrom)): imported_modules.append(ast.unparse(node)) return imported_modules
Extracts a list of imported modules from the given source code. Parameters: - source (str): The source code to be analyzed. Returns: - imported_modules (list[str]): A list of import statements.
get_imported_modules
python
plasma-umass/scalene
scalene/scalene_analysis.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py
Apache-2.0
def get_native_imported_modules(source: str) -> List[str]: """ Extracts a list of **native** imported modules from the given source code. Parameters: - source (str): The source code to be analyzed. Returns: - imported_modules (list[str]): A list of import statements. """ # Parse the source code into an abstract syntax tree source = ScaleneAnalysis.strip_magic_line(source) tree = ast.parse(source) imported_modules = [] # Add the module name to the list if it's native. for node in ast.walk(tree): if isinstance(node, ast.Import): # Iterate through the imported modules in the statement for alias in node.names: if ScaleneAnalysis.is_native(alias.name): imported_modules.append(ast.unparse(node)) # Check if the node represents an import from statement elif isinstance(node, ast.ImportFrom): node.module = cast(str, node.module) if ScaleneAnalysis.is_native(node.module): imported_modules.append(ast.unparse(node)) return imported_modules
Extracts a list of **native** imported modules from the given source code. Parameters: - source (str): The source code to be analyzed. Returns: - imported_modules (list[str]): A list of import statements.
get_native_imported_modules
python
plasma-umass/scalene
scalene/scalene_analysis.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py
Apache-2.0
def find_regions(src: str) -> Dict[int, Tuple[int, int]]: """This function collects the start and end lines of all loops and functions in the AST, and then uses these to determine the narrowest region containing each line in the source code (that is, loops take precedence over functions.""" # Filter out the first line if in a Jupyter notebook and it starts with a magic (% or %%). src = ScaleneAnalysis.strip_magic_line(src) srclines = src.split("\n") tree = ast.parse(src) regions = {} loops = {} functions = {} classes = {} for node in ast.walk(tree): if isinstance(node, ast.ClassDef): assert node.end_lineno for line in range(node.lineno, node.end_lineno + 1): classes[line] = (node.lineno, node.end_lineno) if isinstance(node, (ast.For, ast.While)): assert node.end_lineno for line in range(node.lineno, node.end_lineno + 1): loops[line] = (node.lineno, node.end_lineno) if isinstance(node, ast.FunctionDef): assert node.end_lineno for line in range(node.lineno, node.end_lineno + 1): functions[line] = (node.lineno, node.end_lineno) for lineno, _ in enumerate(srclines, 1): if lineno in loops: regions[lineno] = loops[lineno] elif lineno in functions: regions[lineno] = functions[lineno] elif lineno in classes: regions[lineno] = classes[lineno] else: regions[lineno] = (lineno, lineno) return regions
This function collects the start and end lines of all loops and functions in the AST, and then uses these to determine the narrowest region containing each line in the source code (that is, loops take precedence over functions.
find_regions
python
plasma-umass/scalene
scalene/scalene_analysis.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py
Apache-2.0
def close(self) -> None: """Close the map file.""" self._signal_fd.close() self._lock_fd.close()
Close the map file.
close
python
plasma-umass/scalene
scalene/scalene_mapfile.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py
Apache-2.0
def cleanup(self) -> None: """Remove all map files.""" try: os.remove(self._init_filename) os.remove(self._signal_filename) except FileNotFoundError: pass
Remove all map files.
cleanup
python
plasma-umass/scalene
scalene/scalene_mapfile.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py
Apache-2.0
def read(self) -> Any: """Read a line from the map file.""" if sys.platform == "win32": return False if not self._signal_mmap: return False return get_line_atomic.get_line_atomic( self._lock_mmap, self._signal_mmap, self._buf, self._lastpos )
Read a line from the map file.
read
python
plasma-umass/scalene
scalene/scalene_mapfile.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py
Apache-2.0
def get_str(self) -> str: """Get the string from the buffer.""" map_str = self._buf.rstrip(b"\x00").split(b"\n")[0].decode("ascii") return map_str
Get the string from the buffer.
get_str
python
plasma-umass/scalene
scalene/scalene_mapfile.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py
Apache-2.0
def replacement_poll_selector(scalene: Scalene) -> None: """ A replacement for selectors.PollSelector that periodically wakes up to accept signals """ class ReplacementPollSelector(selectors.PollSelector): def select( self, timeout: Optional[float] = -1 ) -> List[Tuple[selectors.SelectorKey, int]]: tident = threading.get_ident() start_time = time.perf_counter() if not timeout or timeout < 0: interval = sys.getswitchinterval() else: interval = min(timeout, sys.getswitchinterval()) while True: scalene.set_thread_sleeping(tident) selected = super().select(interval) scalene.reset_thread_sleeping(tident) if selected or timeout == 0: return selected end_time = time.perf_counter() if timeout and timeout != -1: if end_time - start_time >= timeout: return [] # None ReplacementPollSelector.__qualname__ = ( "replacement_poll_selector.ReplacementPollSelector" ) selectors.PollSelector = ReplacementPollSelector # type: ignore
A replacement for selectors.PollSelector that periodically wakes up to accept signals
replacement_poll_selector
python
plasma-umass/scalene
scalene/replacement_poll_selector.py
https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_poll_selector.py
Apache-2.0
def is_port_available(port: int) -> bool: """ Check if a given TCP port is available to start a server on the local machine. :param port: Port number as an integer. :return: True if the port is available, False otherwise. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: try: s.bind(("localhost", port)) return True except socket.error: return False
Check if a given TCP port is available to start a server on the local machine. :param port: Port number as an integer. :return: True if the port is available, False otherwise.
is_port_available
python
plasma-umass/scalene
scalene/launchbrowser.py
https://github.com/plasma-umass/scalene/blob/master/scalene/launchbrowser.py
Apache-2.0
def generate_html(profile_fname: Filename, output_fname: Filename) -> None: """Apply a template to generate a single HTML payload containing the current profile.""" try: # Load the profile profile_file = pathlib.Path(profile_fname) profile = profile_file.read_text() except FileNotFoundError: assert profile_fname == "demo" profile = "{}" # return # Load the GUI JavaScript file. scalene_dir = os.path.dirname(__file__) file_contents = { "scalene_gui_js_text": read_file_content( scalene_dir, "scalene-gui", "scalene-gui-bundle.js" ), "prism_css_text": read_file_content( scalene_dir, "scalene-gui", "prism.css" ), } # Put the profile and everything else into the template. environment = Environment( loader=FileSystemLoader(os.path.join(scalene_dir, "scalene-gui")) ) template = environment.get_template("index.html.template") try: import scalene_config except ModuleNotFoundError: import scalene.scalene_config as scalene_config rendered_content = template.render( profile=profile, gui_js=file_contents["scalene_gui_js_text"], prism_css=file_contents["prism_css_text"], scalene_version=scalene_config.scalene_version, scalene_date=scalene_config.scalene_date, ) # Write the rendered content to the specified output file. try: with open(output_fname, "w", encoding="utf-8") as f: f.write(rendered_content) except OSError: pass
Apply a template to generate a single HTML payload containing the current profile.
generate_html
python
plasma-umass/scalene
scalene/launchbrowser.py
https://github.com/plasma-umass/scalene/blob/master/scalene/launchbrowser.py
Apache-2.0
def replacement_exit(scalene: Scalene) -> None: """ Shims out the unconditional exit with the "neat exit" (which raises the SystemExit error and allows Scalene to exit neatly) """ # Note: MyPy doesn't like this, but it works because passing an int # to sys.exit does the right thing os._exit = sys.exit # type: ignore
Shims out the unconditional exit with the "neat exit" (which raises the SystemExit error and allows Scalene to exit neatly)
replacement_exit
python
plasma-umass/scalene
scalene/replacement_exit.py
https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_exit.py
Apache-2.0
def setup_preload(args: argparse.Namespace) -> bool: """ Ensures that Scalene runs with libscalene preloaded, if necessary, as well as any other required environment variables. Returns true iff we had to run another process. """ # First, check that we are on a supported platform. # (x86-64 and ARM only for now.) if args.memory and ( platform.machine() not in ["x86_64", "AMD64", "arm64", "aarch64"] or struct.calcsize("P") != 8 ): args.memory = False print( "Scalene warning: currently only 64-bit x86-64 and ARM platforms are supported for memory and copy profiling." ) with contextlib.suppress(Exception): from IPython import get_ipython if get_ipython(): sys.exit = Scalene.clean_exit # type: ignore sys._exit = Scalene.clean_exit # type: ignore # Start a subprocess with the required environment variables, # which may include preloading libscalene req_env = ScalenePreload.get_preload_environ(args) if any(k_v not in os.environ.items() for k_v in req_env.items()): os.environ.update(req_env) new_args = [ sys.executable, "-m", "scalene", ] + sys.argv[1:] result = subprocess.Popen(new_args, close_fds=True, shell=False) with contextlib.suppress(Exception): # If running in the background, print the PID. if os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()): # In the background. print(f"Scalene now profiling process {result.pid}") print( f" to disable profiling: python3 -m scalene.profile --off --pid {result.pid}" ) print( f" to resume profiling: python3 -m scalene.profile --on --pid {result.pid}" ) try: result.wait() except subprocess.TimeoutExpired: print("Scalene failure. Please try again.") return False except KeyboardInterrupt: result.returncode = 0 if result.returncode < 0: print( "Scalene error: received signal", signal.Signals(-result.returncode).name, ) sys.exit(result.returncode) return True return False
Ensures that Scalene runs with libscalene preloaded, if necessary, as well as any other required environment variables. Returns true iff we had to run another process.
setup_preload
python
plasma-umass/scalene
scalene/scalene_preload.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_preload.py
Apache-2.0
def add_stack( frame: FrameType, should_trace: Callable[[Filename, str], bool], stacks: Dict[Any, Any], python_time: float, c_time: float, cpu_samples: float, ) -> None: """Add one to the stack starting from this frame.""" stk: List[Tuple[str, str, int]] = list() f: Optional[FrameType] = frame while f: if should_trace(Filename(f.f_code.co_filename), f.f_code.co_name): stk.insert(0, (f.f_code.co_filename, get_fully_qualified_name(f), f.f_lineno)) f = f.f_back if tuple(stk) not in stacks: stacks[tuple(stk)] = (1, python_time, c_time, cpu_samples) else: (prev_count, prev_python_time, prev_c_time, prev_cpu_samples) = stacks[ tuple(stk) ] stacks[tuple(stk)] = ( prev_count + 1, prev_python_time + python_time, prev_c_time + c_time, prev_cpu_samples + cpu_samples, )
Add one to the stack starting from this frame.
add_stack
python
plasma-umass/scalene
scalene/scalene_utility.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_utility.py
Apache-2.0
def on_stack( frame: FrameType, fname: Filename, lineno: LineNumber ) -> Optional[FrameType]: """Find a frame matching the given filename and line number, if any. Used for checking whether we are still executing the same line of code or not in invalidate_lines (for per-line memory accounting). """ f = frame current_file_and_line = (fname, lineno) while f: if (f.f_code.co_filename, f.f_lineno) == current_file_and_line: return f f = cast(FrameType, f.f_back) return None
Find a frame matching the given filename and line number, if any. Used for checking whether we are still executing the same line of code or not in invalidate_lines (for per-line memory accounting).
on_stack
python
plasma-umass/scalene
scalene/scalene_utility.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_utility.py
Apache-2.0
def flamegraph_format(stacks: Dict[Tuple[Any], Any]) -> str: """Converts stacks to a string suitable for input to Brendan Gregg's flamegraph.pl script.""" output = "" for stk in stacks.keys(): for item in stk: (fname, fn_name, lineno) = item output += f"{fname} {fn_name}:{lineno};" output += " " + str(stacks[stk][0]) output += "\n" return output
Converts stacks to a string suitable for input to Brendan Gregg's flamegraph.pl script.
flamegraph_format
python
plasma-umass/scalene
scalene/scalene_utility.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_utility.py
Apache-2.0
def clear(self) -> None: """Reset for new samples""" self._n = 0 self._m1 = self._m2 = self._m3 = self._m4 = 0.0 self._peak = 0.0
Reset for new samples
clear
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def push(self, x: float) -> None: """Add a sample""" if x > self._peak: self._peak = x n1 = self._n self._n += 1 delta = x - self._m1 delta_n = delta / self._n delta_n2 = delta_n * delta_n term1 = delta * delta_n * n1 self._m1 += delta_n self._m4 += ( term1 * delta_n2 * (self._n * self._n - 3 * self._n + 3) + 6 * delta_n2 * self._m2 - 4 * delta_n * self._m3 ) self._m3 += term1 * delta_n * (self._n - 2) - 3 * delta_n * self._m2 self._m2 += term1
Add a sample
push
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def peak(self) -> float: """The maximum sample seen.""" return self._peak
The maximum sample seen.
peak
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def size(self) -> int: """The number of samples""" return self._n
The number of samples
size
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def mean(self) -> float: """Arithmetic mean, a.k.a. average""" return self._m1
Arithmetic mean, a.k.a. average
mean
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def var(self) -> float: """Variance""" return self._m2 / (self._n - 1.0)
Variance
var
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def std(self) -> float: """Standard deviation""" return math.sqrt(self.var())
Standard deviation
std
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def sem(self) -> float: """Standard error of the mean""" return self.std() / math.sqrt(self._n)
Standard error of the mean
sem
python
plasma-umass/scalene
scalene/runningstats.py
https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py
Apache-2.0
def _find_apple_gpu_service() -> io_registry_entry_t: """ Grabs the first service matching "IOAccelerator" (integrated GPU). Returns None if not found. """ matching = IOServiceMatching(b"IOAccelerator") if not matching: return None service_obj = IOServiceGetMatchingService(kIOMasterPortDefault, matching) # service_obj is automatically retained if found. # No need to release 'matching' (it is CFTypeRef, but handled by the system). return service_obj
Grabs the first service matching "IOAccelerator" (integrated GPU). Returns None if not found.
_find_apple_gpu_service
python
plasma-umass/scalene
scalene/scalene_apple_gpu.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py
Apache-2.0
def _read_gpu_core_count(service_obj: io_registry_entry_t) -> int: """ Reads the top-level "gpu-core-count" from the service. (Only needed once, as it shouldn't change.) """ if not service_obj: return 0 cf_core_count = IORegistryEntryCreateCFProperty(service_obj, cf_str_gpu_core_count, None, 0) if not cf_core_count or (CFGetTypeID(cf_core_count) != CFNumberGetTypeID()): if cf_core_count: IOObjectRelease(cf_core_count) return 0 val_container_64 = ctypes.c_longlong(0) success = CFNumberGetValue(cf_core_count, kCFNumberSInt64Type, ctypes.byref(val_container_64)) IOObjectRelease(cf_core_count) return val_container_64.value if success else 0
Reads the top-level "gpu-core-count" from the service. (Only needed once, as it shouldn't change.)
_read_gpu_core_count
python
plasma-umass/scalene
scalene/scalene_apple_gpu.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py
Apache-2.0
def _read_perf_stats(service_obj: io_registry_entry_t) -> Tuple[float, float]: """ Returns (utilization [0..1], in_use_mem_MB). Reads the "PerformanceStatistics" sub-dict via IORegistryEntryCreateCFProperty. """ if not service_obj: return (0.0, 0.0) # Grab the PerformanceStatistics dictionary perf_dict_ref = IORegistryEntryCreateCFProperty(service_obj, cf_str_perf_stats, None, 0) if not perf_dict_ref or (CFGetTypeID(perf_dict_ref) != CFDictionaryGetTypeID()): if perf_dict_ref: IOObjectRelease(perf_dict_ref) return (0.0, 0.0) # Device Utilization device_util = 0.0 util_val_ref = CFDictionaryGetValue(perf_dict_ref, cf_str_device_util) if util_val_ref and (CFGetTypeID(util_val_ref) == CFNumberGetTypeID()): val64 = ctypes.c_longlong(0) if CFNumberGetValue(util_val_ref, kCFNumberSInt64Type, ctypes.byref(val64)): device_util = val64.value / 100.0 # In-use memory in_use_mem = 0.0 mem_val_ref = CFDictionaryGetValue(perf_dict_ref, cf_str_inuse_mem) if mem_val_ref and (CFGetTypeID(mem_val_ref) == CFNumberGetTypeID()): val64 = ctypes.c_longlong(0) if CFNumberGetValue(mem_val_ref, kCFNumberSInt64Type, ctypes.byref(val64)): in_use_mem = float(val64.value) / 1048576.0 # convert bytes -> MB IOObjectRelease(perf_dict_ref) return (device_util, in_use_mem)
Returns (utilization [0..1], in_use_mem_MB). Reads the "PerformanceStatistics" sub-dict via IORegistryEntryCreateCFProperty.
_read_perf_stats
python
plasma-umass/scalene
scalene/scalene_apple_gpu.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py
Apache-2.0
def has_gpu(self) -> bool: """Return True if we found an Apple integrated GPU service.""" return bool(self._service_obj)
Return True if we found an Apple integrated GPU service.
has_gpu
python
plasma-umass/scalene
scalene/scalene_apple_gpu.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py
Apache-2.0
def reinit(self) -> None: """No-op for compatibility with other GPU wrappers.""" pass
No-op for compatibility with other GPU wrappers.
reinit
python
plasma-umass/scalene
scalene/scalene_apple_gpu.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py
Apache-2.0
def get_stats(self) -> Tuple[float, float]: """Return (util%, memory_in_use_MB).""" if not self.has_gpu(): return (0.0, 0.0) try: util, mem = _read_perf_stats(self._service_obj) return (util, mem) except Exception: return (0.0, 0.0)
Return (util%, memory_in_use_MB).
get_stats
python
plasma-umass/scalene
scalene/scalene_apple_gpu.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py
Apache-2.0
def __del__(self): """Release the service object if it exists.""" if self._service_obj: IOObjectRelease(self._service_obj) self._service_obj = None
Release the service object if it exists.
__del__
python
plasma-umass/scalene
scalene/scalene_apple_gpu.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py
Apache-2.0
def find_browser(browserClass: Optional[str] = None) -> Optional[str]: """Find the default system browser, excluding text browsers. If you want a specific browser, pass its class as an argument.""" text_browsers = [ "browsh", "elinks", "links", "lynx", "w3m", ] try: # Get the default browser object browser = webbrowser.get(browserClass) browser_name = ( browser.name if browser.name else browser.__class__.__name__ ) return browser_name if browser_name not in text_browsers else None except AttributeError: # https://github.com/plasma-umass/scalene/issues/790 # https://github.com/python/cpython/issues/105545 # MacOSXOSAScript._name was deprecated but for pre-Python 3.11, # we need to refer to it as such to prevent this error: # 'MacOSXOSAScript' object has no attribute 'name' browser = webbrowser.get(browserClass) return browser._name if browser._name not in text_browsers else None # type: ignore[attr-defined] except webbrowser.Error: # Return None if there is an error in getting the browser return None
Find the default system browser, excluding text browsers. If you want a specific browser, pass its class as an argument.
find_browser
python
plasma-umass/scalene
scalene/find_browser.py
https://github.com/plasma-umass/scalene/blob/master/scalene/find_browser.py
Apache-2.0
def scalene(self, line: str, cell: str = "") -> None: """%%scalene magic: see https://github.com/plasma-umass/scalene for usage info.""" if line: sys.argv = ["scalene", "--ipython", *line.split()] (args, _left) = ScaleneParseArgs.parse_args() # print(f"{args=}, {_left=}") else: args = ScaleneArguments() # print(f"{args=}") if args and cell: # Preface with a "\n" to drop the first line (%%scalene). self.run_code(args, "\n" + cell) # type: ignore
%%scalene magic: see https://github.com/plasma-umass/scalene for usage info.
scalene
python
plasma-umass/scalene
scalene/scalene_magics.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_magics.py
Apache-2.0
def scrun(self, line: str = "") -> None: """%scrun magic: see https://github.com/plasma-umass/scalene for usage info.""" if line: sys.argv = ["scalene", "--ipython", *line.split()] (args, left) = ScaleneParseArgs.parse_args() if args: self.run_code(args, " ".join(left)) # type: ignore
%scrun magic: see https://github.com/plasma-umass/scalene for usage info.
scrun
python
plasma-umass/scalene
scalene/scalene_magics.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_magics.py
Apache-2.0
def find_available_port(start_port: int, end_port: int) -> Optional[int]: """ Finds an available port within a given range. Parameters: - start_port (int): the starting port number to search from - end_port (int): the ending port number to search up to (inclusive) Returns: - int: the first available port number found in the given range, or None if no ports are available """ for port in range(start_port, end_port + 1): try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("", port)) return port except OSError: continue return None
Finds an available port within a given range. Parameters: - start_port (int): the starting port number to search from - end_port (int): the ending port number to search up to (inclusive) Returns: - int: the first available port number found in the given range, or None if no ports are available
find_available_port
python
plasma-umass/scalene
scalene/scalene_jupyter.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_jupyter.py
Apache-2.0
def log_message(self, format: str, *args: Any) -> None: """overriding log_message to disable all messages from webserver""" pass
overriding log_message to disable all messages from webserver
display_profile.log_message
python
plasma-umass/scalene
scalene/scalene_jupyter.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_jupyter.py
Apache-2.0
def redirect_python( preface: str, cmdline: str, python_alias_dir: pathlib.Path ) -> str: """ Redirects Python calls to a different command with a preface and cmdline. Args: preface: A string to be prefixed to the Python command. cmdline: Additional command line arguments to be appended. python_alias_dir: The directory where the alias scripts will be stored. """ base_python_extension = ".exe" if sys.platform == "win32" else "" all_python_names = [ "python" + base_python_extension, f"python{sys.version_info.major}{base_python_extension}", f"python{sys.version_info.major}.{sys.version_info.minor}{base_python_extension}", ] shebang = "@echo off" if sys.platform == "win32" else "#!/bin/bash" all_args = "%*" if sys.platform == "win32" else '"$@"' payload = f"{shebang}\n{preface} {sys.executable} -m scalene {cmdline} {all_args}\n" for name in all_python_names: fname = python_alias_dir / name if sys.platform == "win32": fname = fname.with_suffix(".bat") try: with open(fname, "w") as file: file.write(payload) if sys.platform != "win32": os.chmod(fname, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR) except IOError as e: print(f"Error writing to {fname}: {e}") sys.path.insert(0, str(python_alias_dir)) os.environ["PATH"] = f"{python_alias_dir}{os.pathsep}{os.environ['PATH']}" orig_sys_executable = sys.executable # Compute the new sys executable path sys_executable_path = python_alias_dir / all_python_names[0] # On Windows, adjust the path to use a .bat file instead of .exe if sys.platform == "win32" and sys_executable_path.suffix == ".exe": sys_executable_path = sys_executable_path.with_suffix(".bat") sys.executable = str(sys_executable_path) return orig_sys_executable
Redirects Python calls to a different command with a preface and cmdline. Args: preface: A string to be prefixed to the Python command. cmdline: Additional command line arguments to be appended. python_alias_dir: The directory where the alias scripts will be stored.
redirect_python
python
plasma-umass/scalene
scalene/redirect_python.py
https://github.com/plasma-umass/scalene/blob/master/scalene/redirect_python.py
Apache-2.0
def _get_module_details( mod_name: str, error: Type[Exception] = ImportError, ) -> Tuple[str, ModuleSpec, CodeType]: """Copy of `runpy._get_module_details`, but not private.""" if mod_name.startswith("."): raise error("Relative module names not supported") pkg_name, _, _ = mod_name.rpartition(".") if pkg_name: # Try importing the parent to avoid catching initialization errors try: __import__(pkg_name) except ImportError as e: # If the parent or higher ancestor package is missing, let the # error be raised by find_spec() below and then be caught. But do # not allow other errors to be caught. if e.name is None or ( e.name != pkg_name and not pkg_name.startswith(e.name + ".") ): raise # Warn if the module has already been imported under its normal name existing = sys.modules.get(mod_name) if existing is not None and not hasattr(existing, "__path__"): from warnings import warn msg = ( "{mod_name!r} found in sys.modules after import of " "package {pkg_name!r}, but prior to execution of " "{mod_name!r}; this may result in unpredictable " "behaviour".format(mod_name=mod_name, pkg_name=pkg_name) ) warn(RuntimeWarning(msg)) try: spec = importlib.util.find_spec(mod_name) except (ImportError, AttributeError, TypeError, ValueError) as ex: # This hack fixes an impedance mismatch between pkgutil and # importlib, where the latter raises other errors for cases where # pkgutil previously raised ImportError msg = "Error while finding module specification for {!r} ({}: {})" if mod_name.endswith(".py"): msg += ( f". Try using '{mod_name[:-3]}' instead of " f"'{mod_name}' as the module name." ) raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex if spec is None: raise error("No module named %s" % mod_name) if spec.submodule_search_locations is not None: if mod_name == "__main__" or mod_name.endswith(".__main__"): raise error("Cannot use package as __main__ module") try: pkg_main_name = mod_name + ".__main__" return _get_module_details(pkg_main_name, error) except error as e: if mod_name not in sys.modules: raise # No module loaded; being a package is irrelevant raise error( ("%s; %r is a package and cannot " + "be directly executed") % (e, mod_name) ) loader = spec.loader # use isinstance instead of `is None` to placate mypy if not isinstance(loader, SourceLoader): raise error( "%r is a namespace package and cannot be executed" % mod_name ) try: code = loader.get_code(mod_name) except ImportError as e: raise error(format(e)) from e if code is None: raise error("No code object available for %s" % mod_name) return mod_name, spec, code
Copy of `runpy._get_module_details`, but not private.
_get_module_details
python
plasma-umass/scalene
scalene/get_module_details.py
https://github.com/plasma-umass/scalene/blob/master/scalene/get_module_details.py
Apache-2.0
def thread_join_replacement( self: threading.Thread, timeout: Optional[float] = None ) -> None: """We replace threading.Thread.join with this method which always periodically yields.""" start_time = time.perf_counter() interval = sys.getswitchinterval() while self.is_alive(): scalene.set_thread_sleeping(threading.get_ident()) orig_thread_join(self, interval) scalene.reset_thread_sleeping(threading.get_ident()) # If a timeout was specified, check to see if it's expired. if timeout is not None: end_time = time.perf_counter() if end_time - start_time >= timeout: return None return None
We replace threading.Thread.join with this method which always periodically yields.
replacement_thread_join.thread_join_replacement
python
plasma-umass/scalene
scalene/replacement_thread_join.py
https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_thread_join.py
Apache-2.0
def set_timer_signals(self, use_virtual_time: bool = True) -> None: """ Set up timer signals for CPU profiling. use_virtual_time: bool, default True If True, sets virtual timer signals, otherwise sets real timer signals. """ if sys.platform == "win32": self.cpu_timer_signal = signal.SIGBREAK # Note: on Windows, this is unused, so any signal will do self.cpu_signal = signal.SIGBREAK return if use_virtual_time: self.cpu_timer_signal = signal.ITIMER_VIRTUAL self.cpu_signal = signal.SIGVTALRM else: self.cpu_timer_signal = signal.ITIMER_REAL self.cpu_signal = signal.SIGALRM
Set up timer signals for CPU profiling. use_virtual_time: bool, default True If True, sets virtual timer signals, otherwise sets real timer signals.
set_timer_signals
python
plasma-umass/scalene
scalene/scalene_signals.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_signals.py
Apache-2.0
def get_timer_signals(self) -> Tuple[int, signal.Signals]: """ Returns 2-tuple of the integers representing the CPU timer signal and the CPU signal. """ return self.cpu_timer_signal, self.cpu_signal
Returns 2-tuple of the integers representing the CPU timer signal and the CPU signal.
get_timer_signals
python
plasma-umass/scalene
scalene/scalene_signals.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_signals.py
Apache-2.0
def get_all_signals(self) -> List[signal.Signals]: """ Return all the signals used for controlling profiling, except the CPU timer. """ return [ self.start_profiling_signal, self.stop_profiling_signal, self.memcpy_signal, self.malloc_signal, self.free_signal, self.cpu_signal, ]
Return all the signals used for controlling profiling, except the CPU timer.
get_all_signals
python
plasma-umass/scalene
scalene/scalene_signals.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_signals.py
Apache-2.0
def memory_consumed_str(size_in_mb: float) -> str: """Return a string corresponding to amount of memory consumed.""" gigabytes = size_in_mb // 1024 terabytes = gigabytes // 1024 if terabytes > 0: return f"{(size_in_mb / 1048576):3.3f} TB" elif gigabytes > 0: return f"{(size_in_mb / 1024):3.3f} GB" else: return f"{size_in_mb:3.3f} MB"
Return a string corresponding to amount of memory consumed.
memory_consumed_str
python
plasma-umass/scalene
scalene/scalene_json.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_json.py
Apache-2.0
def replacement_fork(scalene: Scalene) -> None: """ Executes Scalene fork() handling. Works just like os.register_at_fork(), but unlike that also provides the child PID. """ orig_fork = os.fork def fork_replacement() -> int: scalene.before_fork() child_pid = orig_fork() if child_pid == 0: scalene.after_fork_in_child() else: scalene.after_fork_in_parent(child_pid) return child_pid os.fork = fork_replacement
Executes Scalene fork() handling. Works just like os.register_at_fork(), but unlike that also provides the child PID.
replacement_fork
python
plasma-umass/scalene
scalene/replacement_fork.py
https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_fork.py
Apache-2.0
def _in_wsl() -> bool: """Are we in Windows Subsystem for Linux?""" return "WSL_DISTRO_NAME" in os.environ
Are we in Windows Subsystem for Linux?
_in_wsl
python
plasma-umass/scalene
scalene/sparkline.py
https://github.com/plasma-umass/scalene/blob/master/scalene/sparkline.py
Apache-2.0
def _in_windows_terminal() -> bool: """Are we in Windows Terminal? https://aka.ms/windowsterminal """ return "WT_PROFILE_ID" in os.environ
Are we in Windows Terminal? https://aka.ms/windowsterminal
_in_windows_terminal
python
plasma-umass/scalene
scalene/sparkline.py
https://github.com/plasma-umass/scalene/blob/master/scalene/sparkline.py
Apache-2.0
def clean_exit(code: object = 0) -> NoReturn: """Replacement for sys.exit that exits cleanly from within Jupyter notebooks.""" raise StopJupyterExecution
Replacement for sys.exit that exits cleanly from within Jupyter notebooks.
clean_exit
python
plasma-umass/scalene
scalene/scalene_parseargs.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_parseargs.py
Apache-2.0
def parse_args() -> Tuple[argparse.Namespace, List[str]]: # In IPython, intercept exit cleanly (because sys.exit triggers a backtrace). with contextlib.suppress(BaseException): from IPython import get_ipython if get_ipython(): sys.exit = ScaleneParseArgs.clean_exit sys._exit = ScaleneParseArgs.clean_exit # type: ignore defaults = ScaleneArguments() usage = dedent( rf"""[b]Scalene[/b]: a high-precision CPU and memory profiler, version {scalene_version} ({scalene_date}) [link=https://github.com/plasma-umass/scalene]https://github.com/plasma-umass/scalene[/link] command-line: % [b]scalene \[options] your_program.py \[--- --your_program_args] [/b] or % [b]python3 -m scalene \[options] your_program.py \[--- --your_program_args] [/b] in Jupyter, line mode: [b] %scrun \[options] statement[/b] in Jupyter, cell mode: [b] %%scalene \[options] your code here [/b] """ ) # NOTE: below is only displayed on non-Windows platforms. epilog = dedent( """When running Scalene in the background, you can suspend/resume profiling for the process ID that Scalene reports. For example: % python3 -m scalene [options] yourprogram.py & Scalene now profiling process 12345 to suspend profiling: python3 -m scalene.profile --off --pid 12345 to resume profiling: python3 -m scalene.profile --on --pid 12345 """ ) parser = RichArgParser( # argparse.ArgumentParser( prog="scalene", description=usage, epilog=epilog if sys.platform != "win32" else "", formatter_class=argparse.RawTextHelpFormatter, allow_abbrev=False, ) parser.add_argument( "--version", dest="version", action="store_const", const=True, help="prints the version number for this release of Scalene and exits", ) parser.add_argument( "--column-width", dest="column_width", type=int, default=defaults.column_width, help=f"Column width for profile output (default: [blue]{defaults.column_width}[/blue])", ) parser.add_argument( "--outfile", type=str, default=defaults.outfile, help="file to hold profiler output (default: [blue]" + ("stdout" if not defaults.outfile else defaults.outfile) + "[/blue])", ) parser.add_argument( "--html", dest="html", action="store_const", const=True, default=defaults.html, help="output as HTML (default: [blue]" + str("html" if defaults.html else "web") + "[/blue])", ) parser.add_argument( "--json", dest="json", action="store_const", const=True, default=defaults.json, help="output as JSON (default: [blue]" + str("json" if defaults.json else "web") + "[/blue])", ) parser.add_argument( "--cli", dest="cli", action="store_const", const=True, default=defaults.cli, help="forces use of the command-line", ) parser.add_argument( "--stacks", dest="stacks", action="store_const", const=True, default=defaults.stacks, help="collect stack traces", ) parser.add_argument( "--web", dest="web", action="store_const", const=True, default=defaults.web, help="opens a web tab to view the profile (saved as 'profile.html')", ) parser.add_argument( "--no-browser", dest="no_browser", action="store_const", const=True, default=defaults.no_browser, help="doesn't open a web tab; just saves the profile ('profile.html')", ) parser.add_argument( "--viewer", dest="viewer", action="store_const", const=True, default=False, help="opens the Scalene web UI.", ) parser.add_argument( "--reduced-profile", dest="reduced_profile", action="store_const", const=True, default=defaults.reduced_profile, help=f"generate a reduced profile, with non-zero lines only (default: [blue]{defaults.reduced_profile}[/blue])", ) parser.add_argument( "--profile-interval", type=float, default=defaults.profile_interval, help=f"output profiles every so many seconds (default: [blue]{defaults.profile_interval}[/blue])", ) parser.add_argument( "--cpu", dest="cpu", action="store_const", const=True, default=None, help="profile CPU time (default: [blue] True [/blue])", ) parser.add_argument( "--cpu-only", dest="cpu", action="store_const", const=True, default=None, help="profile CPU time ([red]deprecated: use --cpu [/red])", ) parser.add_argument( "--gpu", dest="gpu", action="store_const", const=True, default=None, help="profile GPU time and memory (default: [blue]" + (str(defaults.gpu)) + " [/blue])", ) if sys.platform == "win32": memory_profile_message = ( "profile memory (not supported on this platform)" ) else: memory_profile_message = ( "profile memory (default: [blue]" + (str(defaults.memory)) + " [/blue])" ) parser.add_argument( "--memory", dest="memory", action="store_const", const=True, default=None, help=memory_profile_message, ) parser.add_argument( "--profile-all", dest="profile_all", action="store_const", const=True, default=defaults.profile_all, help="profile all executed code, not just the target program (default: [blue]" + ( "all code" if defaults.profile_all else "only the target program" ) + "[/blue])", ) parser.add_argument( "--profile-only", dest="profile_only", type=str, default=defaults.profile_only, help="profile only code in filenames that contain the given strings, separated by commas (default: [blue]" + ( "no restrictions" if not defaults.profile_only else defaults.profile_only ) + "[/blue])", ) parser.add_argument( "--profile-exclude", dest="profile_exclude", type=str, default=defaults.profile_exclude, help="do not profile code in filenames that contain the given strings, separated by commas (default: [blue]" + ( "no restrictions" if not defaults.profile_exclude else defaults.profile_exclude ) + "[/blue])", ) parser.add_argument( "--use-virtual-time", dest="use_virtual_time", action="store_const", const=True, default=defaults.use_virtual_time, help=f"measure only CPU time, not time spent in I/O or blocking (default: [blue]{defaults.use_virtual_time}[/blue])", ) parser.add_argument( "--cpu-percent-threshold", dest="cpu_percent_threshold", type=float, default=defaults.cpu_percent_threshold, help=f"only report profiles with at least this percent of CPU time (default: [blue]{defaults.cpu_percent_threshold}%%[/blue])", ) parser.add_argument( "--cpu-sampling-rate", dest="cpu_sampling_rate", type=float, default=defaults.cpu_sampling_rate, help=f"CPU sampling rate (default: every [blue]{defaults.cpu_sampling_rate}s[/blue])", ) parser.add_argument( "--allocation-sampling-window", dest="allocation_sampling_window", type=int, default=defaults.allocation_sampling_window, help=f"Allocation sampling window size, in bytes (default: [blue]{defaults.allocation_sampling_window} bytes[/blue])", ) parser.add_argument( "--malloc-threshold", dest="malloc_threshold", type=int, default=defaults.malloc_threshold, help=f"only report profiles with at least this many allocations (default: [blue]{defaults.malloc_threshold}[/blue])", ) parser.add_argument( "--program-path", dest="program_path", type=str, default="", help="The directory containing the code to profile (default: [blue]the path to the profiled program[/blue])", ) parser.add_argument( "--memory-leak-detector", dest="memory_leak_detector", action="store_true", default=defaults.memory_leak_detector, help="EXPERIMENTAL: report likely memory leaks (default: [blue]" + (str(defaults.memory_leak_detector)) + "[/blue])", ) parser.add_argument( "--ipython", dest="ipython", action="store_const", const=True, default=False, help=argparse.SUPPRESS, ) if sys.platform != "win32": # Turning profiling on and off from another process is currently not supported on Windows. group = parser.add_mutually_exclusive_group(required=False) group.add_argument( "--on", action="store_true", help="start with profiling on (default)", ) group.add_argument( "--off", action="store_true", help="start with profiling off" ) # the PID of the profiling process (for internal use only) parser.add_argument( "--pid", type=int, default=0, help=argparse.SUPPRESS ) # collect all arguments after "---", which Scalene will ignore parser.add_argument( "---", dest="unused_args", default=[], help=argparse.SUPPRESS, nargs=argparse.REMAINDER, ) # Parse out all Scalene arguments. # https://stackoverflow.com/questions/35733262/is-there-any-way-to-instruct-argparse-python-2-7-to-remove-found-arguments-fro args, left = parser.parse_known_args() # Validate file/directory arguments if args.outfile and os.path.isdir(args.outfile): parser.error(f"outfile {args.outfile} is a directory") # Hack to simplify functionality for Windows platforms. if sys.platform == "win32": args.on = True args.pid = 0 left += args.unused_args import re # Launch the UI if `--viewer` was selected. if args.viewer: if find_browser(): assert not args.no_browser dir = os.path.dirname(__file__) import scalene.scalene_config import subprocess subprocess.Popen( [ sys.executable, f"{dir}{os.sep}launchbrowser.py", "demo", str(scalene.scalene_config.SCALENE_PORT), ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) sys.exit(0) pass else: print( "Scalene: could not open a browser." ) # {scalene_gui_url}.") sys.exit(0) # If any of the individual profiling metrics were specified, # disable the unspecified ones (set as None). if args.cpu or args.gpu or args.memory: if not args.memory: args.memory = False if not args.gpu: args.gpu = False else: # Nothing specified; use defaults. args.cpu = defaults.cpu args.gpu = defaults.gpu args.memory = defaults.memory args.cpu = True # Always true in_jupyter_notebook = len(sys.argv) >= 1 and re.match( r"_ipython-input-([0-9]+)-.*", sys.argv[0] ) # If the user did not enter any commands (just `scalene` or `python3 -m scalene`), # print the usage information and bail. if not in_jupyter_notebook and (len(sys.argv) + len(left) == 1): parser.print_help(sys.stderr) sys.exit(-1) if args.version: print(f"Scalene version {scalene_version} ({scalene_date})") if not args.ipython: sys.exit(-1) # Clear out the namespace. We do this to indicate that we should not run further in IPython. for arg in list(args.__dict__): delattr(args, arg) # was: # args = ( # [] # ) # We use this to indicate that we should not run further in IPython. return args, left
) # NOTE: below is only displayed on non-Windows platforms. epilog = dedent( """When running Scalene in the background, you can suspend/resume profiling for the process ID that Scalene reports. For example: % python3 -m scalene [options] yourprogram.py & Scalene now profiling process 12345 to suspend profiling: python3 -m scalene.profile --off --pid 12345 to resume profiling: python3 -m scalene.profile --on --pid 12345
parse_args
python
plasma-umass/scalene
scalene/scalene_parseargs.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_parseargs.py
Apache-2.0
def put(self, item: Optional[T]) -> None: """Add an item to the queue.""" self.queue.put(item)
Add an item to the queue.
put
python
plasma-umass/scalene
scalene/scalene_sigqueue.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py
Apache-2.0
def get(self) -> Optional[T]: """Get one item from the queue.""" return self.queue.get()
Get one item from the queue.
get
python
plasma-umass/scalene
scalene/scalene_sigqueue.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py
Apache-2.0
def start(self) -> None: """Start processing.""" # We use a daemon thread to defensively avoid hanging if we never join with it if not self.thread: self.thread = threading.Thread(target=self.run, daemon=True) self.thread.start()
Start processing.
start
python
plasma-umass/scalene
scalene/scalene_sigqueue.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py
Apache-2.0
def stop(self) -> None: """Stop processing.""" if self.thread: self.queue.put(None) # We need to join all threads before a fork() to avoid an inconsistent # state, locked mutexes, etc. self.thread.join() self.thread = None
Stop processing.
stop
python
plasma-umass/scalene
scalene/scalene_sigqueue.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py
Apache-2.0
def run(self) -> None: """Run the function processing items until stop is called. Executed in a separate thread.""" while True: item = self.queue.get() if item is None: # None => stop request break with self.lock: self.process(*item)
Run the function processing items until stop is called. Executed in a separate thread.
run
python
plasma-umass/scalene
scalene/scalene_sigqueue.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py
Apache-2.0
def scalene_redirect_profile(func: Any) -> Any: """Handle @profile decorators. If Scalene encounters any functions decorated by @profile, it will only report stats for those functions. """ return Scalene.profile(func)
Handle @profile decorators. If Scalene encounters any functions decorated by @profile, it will only report stats for those functions.
scalene_redirect_profile
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def start() -> None: """Start profiling.""" Scalene.start()
Start profiling.
start
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def stop() -> None: """Stop profiling.""" Scalene.stop()
Stop profiling.
stop
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def enable_profiling() -> Generator[None, None, None]: """Contextmanager that starts and stops profiling""" start() yield stop()
Contextmanager that starts and stops profiling
enable_profiling
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def signal_blocking_wrapper(func: Union[BuiltinFunctionType, FunctionType]) -> Any: """Wrap a function to block the specified signal during its execution.""" @functools.wraps(func) def wrapped(*args: Any, **kwargs: Any) -> Any: # Block the specified signal temporarily original_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [signal_to_block]) try: return func(*args, **kwargs) finally: # Restore original signal mask signal.pthread_sigmask(signal.SIG_SETMASK, original_sigmask) return wrapped
Wrap a function to block the specified signal during its execution.
patch_module_functions_with_signal_blocking.signal_blocking_wrapper
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def patch_module_functions_with_signal_blocking(module: ModuleType, signal_to_block: signal.Signals) -> None: """Patch all functions in the given module to block the specified signal during execution.""" def signal_blocking_wrapper(func: Union[BuiltinFunctionType, FunctionType]) -> Any: """Wrap a function to block the specified signal during its execution.""" @functools.wraps(func) def wrapped(*args: Any, **kwargs: Any) -> Any: # Block the specified signal temporarily original_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [signal_to_block]) try: return func(*args, **kwargs) finally: # Restore original signal mask signal.pthread_sigmask(signal.SIG_SETMASK, original_sigmask) return wrapped # Iterate through all attributes of the module for attr_name in dir(module): attr = getattr(module, attr_name) if isinstance(attr, BuiltinFunctionType) or isinstance(attr, FunctionType): wrapped_attr = signal_blocking_wrapper(attr) setattr(module, attr_name, wrapped_attr)
Patch all functions in the given module to block the specified signal during execution.
patch_module_functions_with_signal_blocking
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def last_profiled_tuple() -> Tuple[Filename, LineNumber, ByteCodeIndex]: """Helper function to type last profiled information.""" return cast( Tuple[Filename, LineNumber, ByteCodeIndex], Scalene.__last_profiled )
Helper function to type last profiled information.
last_profiled_tuple
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def get_original_lock() -> threading.Lock: """Return the true lock, which we shim in replacement_lock.py.""" return Scalene.__original_lock()
Return the true lock, which we shim in replacement_lock.py.
get_original_lock
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def get_all_signals_set() -> Set[int]: """Return the set of all signals currently set. Used by replacement_signal_fns.py to shim signals used by the client program. """ return set(Scalene.__signals.get_all_signals())
Return the set of all signals currently set. Used by replacement_signal_fns.py to shim signals used by the client program.
get_all_signals_set
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def get_timer_signals() -> Tuple[int, signal.Signals]: """Return the set of all TIMER signals currently set. Used by replacement_signal_fns.py to shim timers used by the client program. """ return Scalene.__signals.get_timer_signals()
Return the set of all TIMER signals currently set. Used by replacement_signal_fns.py to shim timers used by the client program.
get_timer_signals
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def set_in_jupyter() -> None: """Tell Scalene that it is running inside a Jupyter notebook.""" Scalene.__in_jupyter = True
Tell Scalene that it is running inside a Jupyter notebook.
set_in_jupyter
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def in_jupyter() -> bool: """Return whether Scalene is running inside a Jupyter notebook.""" return Scalene.__in_jupyter
Return whether Scalene is running inside a Jupyter notebook.
in_jupyter
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def interruption_handler( signum: Union[ Callable[[signal.Signals, FrameType], None], int, signal.Handlers, None, ], this_frame: Optional[FrameType], ) -> None: """Handle keyboard interrupts (e.g., Ctrl-C).""" raise KeyboardInterrupt
Handle keyboard interrupts (e.g., Ctrl-C).
interruption_handler
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def update_line() -> None: """Mark a new line by allocating the trigger number of bytes.""" bytearray(scalene.scalene_config.NEWLINE_TRIGGER_LENGTH)
Mark a new line by allocating the trigger number of bytes.
update_line
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def clear_metrics(cls) -> None: """Clear the various states for forked processes.""" cls.__stats.clear() cls.child_pids.clear()
Clear the various states for forked processes.
clear_metrics
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def add_child_pid(cls, pid: int) -> None: """Add this pid to the set of children. Used when forking.""" cls.child_pids.add(pid)
Add this pid to the set of children. Used when forking.
add_child_pid
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def remove_child_pid(cls, pid: int) -> None: """Remove a child once we have joined with it (used by replacement_pjoin.py).""" with contextlib.suppress(KeyError): cls.child_pids.remove(pid)
Remove a child once we have joined with it (used by replacement_pjoin.py).
remove_child_pid
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def profile(func: Any) -> Any: """Record the file and function name. Replacement @profile decorator function. Scalene tracks which functions - in which files - have been decorated; if any have, it and only reports stats for those. """ Scalene.__files_to_profile.add(func.__code__.co_filename) Scalene.__functions_to_profile[func.__code__.co_filename].add(func) if Scalene.__args.memory: Scalene.register_files_to_profile() return func
Record the file and function name. Replacement @profile decorator function. Scalene tracks which functions - in which files - have been decorated; if any have, it and only reports stats for those.
profile
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def shim(func: Callable[[Any], Any]) -> Any: """Provide a decorator that calls the wrapped function with the Scalene variant. Wrapped function must be of type (s: Scalene) -> Any. This decorator allows for marking a function in a separate file as a drop-in replacement for an existing library function. The intention is for these functions to replace a function that indefinitely blocks (which interferes with Scalene) with a function that awakens periodically to allow for signals to be delivered. """ func(Scalene) # Returns the function itself to the calling file for the sake # of not displaying unusual errors if someone attempts to call # it @functools.wraps(func) def wrapped(*args: Any, **kwargs: Any) -> Any: return func(*args, **kwargs) return wrapped
Provide a decorator that calls the wrapped function with the Scalene variant. Wrapped function must be of type (s: Scalene) -> Any. This decorator allows for marking a function in a separate file as a drop-in replacement for an existing library function. The intention is for these functions to replace a function that indefinitely blocks (which interferes with Scalene) with a function that awakens periodically to allow for signals to be delivered.
shim
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def set_thread_sleeping(tid: int) -> None: """Indicate the given thread is sleeping. Used to attribute CPU time. """ Scalene.__is_thread_sleeping[tid] = True
Indicate the given thread is sleeping. Used to attribute CPU time.
set_thread_sleeping
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def reset_thread_sleeping(tid: int) -> None: """Indicate the given thread is not sleeping. Used to attribute CPU time.""" Scalene.__is_thread_sleeping[tid] = False
Indicate the given thread is not sleeping. Used to attribute CPU time.
reset_thread_sleeping
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def windows_timer_loop() -> None: """For Windows, send periodic timer signals; launch as a background thread.""" assert sys.platform == "win32" Scalene.timer_signals = True while Scalene.timer_signals: Scalene.__windows_queue.get() time.sleep(Scalene.__args.cpu_sampling_rate) Scalene.__orig_raise_signal(Scalene.__signals.cpu_signal)
For Windows, send periodic timer signals; launch as a background thread.
windows_timer_loop
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def start_signal_queues() -> None: """Start the signal processing queues (i.e., their threads).""" for sigq in Scalene.__sigqueues: sigq.start()
Start the signal processing queues (i.e., their threads).
start_signal_queues
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def stop_signal_queues() -> None: """Stop the signal processing queues (i.e., their threads).""" for sigq in Scalene.__sigqueues: sigq.stop()
Stop the signal processing queues (i.e., their threads).
stop_signal_queues
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def term_signal_handler( signum: Union[ Callable[[signal.Signals, FrameType], None], int, signal.Handlers, None, ], this_frame: Optional[FrameType], ) -> None: """Handle terminate signals.""" Scalene.stop() Scalene.output_profile() Scalene.__orig_exit(Scalene.__sigterm_exit_code)
Handle terminate signals.
term_signal_handler
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def malloc_signal_handler( signum: Union[ Callable[[signal.Signals, FrameType], None], int, signal.Handlers, None, ], this_frame: Optional[FrameType], ) -> None: """Handle allocation signals.""" if not Scalene.__args.memory: # This should never happen, but we fail gracefully. return from scalene import pywhere # type: ignore if this_frame: Scalene.enter_function_meta(this_frame, Scalene.__stats) # Walk the stack till we find a line of code in a file we are tracing. found_frame = False f = this_frame while f: if found_frame := Scalene.should_trace( f.f_code.co_filename, f.f_code.co_name ): break f = cast(FrameType, f.f_back) if not found_frame: return assert f # Start tracing until we execute a different line of # code in a file we are tracking. # First, see if we have now executed a different line of code. # If so, increment. invalidated = pywhere.get_last_profiled_invalidated() (fname, lineno, lasti) = Scalene.last_profiled_tuple() if ( not invalidated and this_frame and not (on_stack(this_frame, fname, lineno)) ): Scalene.update_profiled() pywhere.set_last_profiled_invalidated_false() # In the setprofile callback, we rely on # __last_profiled always having the same memory address. # This is an optimization to not have to traverse the Scalene profiler # object's dictionary every time we want to update the last profiled line. # # A previous change to this code set Scalene.__last_profiled = [fname, lineno, lasti], # which created a new list object and set the __last_profiled attribute to the new list. This # made the object held in `pywhere.cpp` out of date, and caused the profiler to not update the last profiled line. Scalene.__last_profiled[:] = [ Filename(f.f_code.co_filename), LineNumber(f.f_lineno), ByteCodeIndex(f.f_lasti), ] Scalene.__alloc_sigq.put([0]) pywhere.enable_settrace(this_frame) del this_frame
Handle allocation signals.
malloc_signal_handler
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def free_signal_handler( signum: Union[ Callable[[signal.Signals, FrameType], None], int, signal.Handlers, None, ], this_frame: Optional[FrameType], ) -> None: """Handle free signals.""" if this_frame: Scalene.enter_function_meta(this_frame, Scalene.__stats) Scalene.__alloc_sigq.put([0]) del this_frame
Handle free signals.
free_signal_handler
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def memcpy_signal_handler( signum: Union[ Callable[[signal.Signals, FrameType], None], int, signal.Handlers, None, ], this_frame: Optional[FrameType], ) -> None: """Handle memcpy signals.""" Scalene.__memcpy_sigq.put((signum, this_frame)) del this_frame
Handle memcpy signals.
memcpy_signal_handler
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def enable_signals() -> None: """Set up the signal handlers to handle interrupts for profiling and start the timer interrupts.""" if sys.platform == "win32": Scalene.enable_signals_win32() return Scalene.start_signal_queues() # Set signal handlers for various events. for sig, handler in [ (Scalene.__signals.malloc_signal, Scalene.malloc_signal_handler), (Scalene.__signals.free_signal, Scalene.free_signal_handler), (Scalene.__signals.memcpy_signal, Scalene.memcpy_signal_handler), (signal.SIGTERM, Scalene.term_signal_handler), (Scalene.__signals.cpu_signal, Scalene.cpu_signal_handler), ]: Scalene.__orig_signal(sig, handler) # Set every signal to restart interrupted system calls. for s in Scalene.__signals.get_all_signals(): Scalene.__orig_siginterrupt(s, False) Scalene.__orig_setitimer( Scalene.__signals.cpu_timer_signal, Scalene.__args.cpu_sampling_rate, )
Set up the signal handlers to handle interrupts for profiling and start the timer interrupts.
enable_signals
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def cpu_signal_handler( signum: Union[ Callable[[signal.Signals, FrameType], None], int, signal.Handlers, None, ], this_frame: Optional[FrameType], ) -> None: """Handle CPU signals.""" try: # Get current time stats. now_sys, now_user = get_times() now_virtual = time.process_time() now_wallclock = time.perf_counter() if ( Scalene.__last_signal_time.virtual == 0 or Scalene.__last_signal_time.wallclock == 0 ): # Initialization: store values and update on the next pass. Scalene.__last_signal_time.virtual = now_virtual Scalene.__last_signal_time.wallclock = now_wallclock Scalene.__last_signal_time.sys = now_sys Scalene.__last_signal_time.user = now_user if sys.platform != "win32": Scalene.__orig_setitimer( Scalene.__signals.cpu_timer_signal, Scalene.__args.cpu_sampling_rate, ) return if Scalene.__accelerator: (gpu_load, gpu_mem_used) = Scalene.__accelerator.get_stats() else: (gpu_load, gpu_mem_used) = (0.0, 0.0) # Process this CPU sample. Scalene.process_cpu_sample( signum, Scalene.compute_frames_to_record(), now_virtual, now_wallclock, now_sys, now_user, gpu_load, gpu_mem_used, Scalene.__last_signal_time.virtual, Scalene.__last_signal_time.wallclock, Scalene.__last_signal_time.sys, Scalene.__last_signal_time.user, Scalene.__is_thread_sleeping, ) elapsed = now_wallclock - Scalene.__last_signal_time.wallclock # Store the latest values as the previously recorded values. Scalene.__last_signal_time.virtual = now_virtual Scalene.__last_signal_time.wallclock = now_wallclock Scalene.__last_signal_time.sys = now_sys Scalene.__last_signal_time.user = now_user # Restart the timer while handling any timers set by the client. if sys.platform != "win32": if Scalene.client_timer.is_set: ( should_raise, remaining_time, ) = Scalene.client_timer.yield_next_delay(elapsed) if should_raise: Scalene.__orig_raise_signal(signal.SIGUSR1) # NOTE-- 0 will only be returned if the 'seconds' have elapsed # and there is no interval to_wait: float if remaining_time > 0: to_wait = min( remaining_time, Scalene.__args.cpu_sampling_rate ) else: to_wait = Scalene.__args.cpu_sampling_rate Scalene.client_timer.reset() Scalene.__orig_setitimer( Scalene.__signals.cpu_timer_signal, to_wait, ) else: Scalene.__orig_setitimer( Scalene.__signals.cpu_timer_signal, Scalene.__args.cpu_sampling_rate, ) finally: if sys.platform == "win32": Scalene.__windows_queue.put(None)
Handle CPU signals.
cpu_signal_handler
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def output_profile(program_args: Optional[List[str]] = None) -> bool: """Output the profile. Returns true iff there was any info reported the profile.""" # sourcery skip: inline-immediately-returned-variable # print(flamegraph_format(Scalene.__stats.stacks)) if Scalene.__args.json: json_output = Scalene.__json.output_profiles( Scalene.__program_being_profiled, Scalene.__stats, Scalene.__pid, Scalene.profile_this_code, Scalene.__python_alias_dir, Scalene.__program_path, Scalene.__entrypoint_dir, program_args, profile_memory=Scalene.__args.memory, reduced_profile=Scalene.__args.reduced_profile, ) # Since the default value returned for "there are no samples" # is `{}`, we use a sentinel value `{"is_child": True}` # when inside a child process to indicate that there are samples, but they weren't # turned into a JSON file because they'll later # be used by the parent process if "is_child" in json_output: return True outfile = Scalene.__output.output_file if Scalene.__args.outfile: outfile = os.path.join( os.path.dirname(Scalene.__args.outfile), os.path.splitext(os.path.basename(Scalene.__args.outfile))[0] + ".json" ) # outfile = Scalene.__args.outfile # If there was no output file specified, print to the console. if not outfile: if sys.platform == "win32": outfile = "CON" else: outfile = "/dev/stdout" # Write the JSON to the output file (or console). with open(outfile, "w") as f: f.write( json.dumps(json_output, sort_keys=True, indent=4) + "\n" ) return json_output != {} else: output = Scalene.__output column_width = Scalene.__args.column_width if not Scalene.__args.html: # Get column width of the terminal and adjust to fit. with contextlib.suppress(Exception): # If we are in a Jupyter notebook, stick with 132 if "ipykernel" in sys.modules: column_width = 132 else: import shutil column_width = shutil.get_terminal_size().columns did_output: bool = output.output_profiles( column_width, Scalene.__stats, Scalene.__pid, Scalene.profile_this_code, Scalene.__python_alias_dir, Scalene.__program_path, program_args, profile_memory=Scalene.__args.memory, reduced_profile=Scalene.__args.reduced_profile, ) return did_output
Output the profile. Returns true iff there was any info reported the profile.
output_profile
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def profile_this_code(fname: Filename, lineno: LineNumber) -> bool: # sourcery skip: inline-immediately-returned-variable """When using @profile, only profile files & lines that have been decorated.""" if not Scalene.__files_to_profile: return True if fname not in Scalene.__files_to_profile: return False # Now check to see if it's the right line range. line_info = Scalene.get_line_info(fname) found_function = any( line_start <= lineno < line_start + len(lines) for (lines, line_start) in line_info ) return found_function
When using @profile, only profile files & lines that have been decorated.
profile_this_code
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def process_cpu_sample( _signum: Union[ Callable[[signal.Signals, FrameType], None], int, signal.Handlers, None, ], new_frames: List[Tuple[FrameType, int, FrameType]], now_virtual: float, now_wallclock: float, now_sys: float, now_user: float, gpu_load: float, gpu_mem_used: float, prev_virtual: float, prev_wallclock: float, _prev_sys: float, prev_user: float, is_thread_sleeping: Dict[int, bool], ) -> None: """Handle interrupts for CPU profiling.""" # We have recorded how long it has been since we received a timer # before. See the logic below. # If it's time to print some profiling info, do so. if now_wallclock >= Scalene.__next_output_time: # Print out the profile. Set the next output time, stop # signals, print the profile, and then start signals # again. Scalene.__next_output_time += Scalene.__args.profile_interval stats = Scalene.__stats # pause (lock) all the queues to prevent updates while we output with contextlib.ExitStack() as stack: _ = [stack.enter_context(s.lock) for s in Scalene.__sigqueues] stats.stop_clock() Scalene.output_profile() stats.start_clock() if not new_frames: # No new frames, so nothing to update. return # Here we take advantage of an ostensible limitation of Python: # it only delivers signals after the interpreter has given up # control. This seems to mean that sampling is limited to code # running purely in the interpreter, and in fact, that was a limitation # of the first version of Scalene, meaning that native code was entirely ignored. # # (cf. https://docs.python.org/3.9/library/signal.html#execution-of-python-signal-handlers) # # However: lemons -> lemonade: this "problem" is in fact # an effective way to separate out time spent in # Python vs. time spent in native code "for free"! If we get # the signal immediately, we must be running in the # interpreter. On the other hand, if it was delayed, that means # we are running code OUTSIDE the interpreter, e.g., # native code (be it inside of Python or in a library). We # account for this time by tracking the elapsed (process) time # and compare it to the interval, and add any computed delay # (as if it were sampled) to the C counter. elapsed_virtual = now_virtual - prev_virtual elapsed_wallclock = now_wallclock - prev_wallclock # CPU utilization is the fraction of time spent on the CPU # over the total time. elapsed_user = now_user - prev_user if any([elapsed_virtual < 0, elapsed_wallclock < 0, elapsed_user < 0]): # If we get negative values, which appear to arise in some # multi-process settings (seen in gunicorn), skip this # sample. return cpu_utilization = 0.0 if elapsed_wallclock != 0: cpu_utilization = elapsed_user / elapsed_wallclock # On multicore systems running multi-threaded native code, CPU # utilization can exceed 1; that is, elapsed user time is # longer than elapsed wallclock time. If this occurs, set # wall clock time to user time and set CPU utilization to 100%. core_utilization = cpu_utilization / Scalene.__availableCPUs if cpu_utilization > 1.0: cpu_utilization = 1.0 elapsed_wallclock = elapsed_user # Deal with an odd case reported here: https://github.com/plasma-umass/scalene/issues/124 # (Note: probably obsolete now that Scalene is using the nvidia wrappers, but just in case...) # We don't want to report 'nan', so turn the load into 0. if math.isnan(gpu_load): gpu_load = 0.0 assert gpu_load >= 0.0 and gpu_load <= 1.0 gpu_time = gpu_load * elapsed_wallclock Scalene.__stats.total_gpu_samples += gpu_time python_time = Scalene.__args.cpu_sampling_rate c_time = elapsed_virtual - python_time c_time = max(c_time, 0) # Now update counters (weighted) for every frame we are tracking. total_time = python_time + c_time # First, find out how many frames are not sleeping. We need # to know this number so we can parcel out time appropriately # (equally to each running thread). total_frames = sum( not is_thread_sleeping[tident] for frame, tident, orig_frame in new_frames ) if total_frames == 0: total_frames = 1 normalized_time = total_time / total_frames # Now attribute execution time. main_thread_frame = new_frames[0][0] average_python_time = python_time / total_frames average_c_time = c_time / total_frames average_cpu_time = (python_time + c_time) / total_frames if Scalene.__args.stacks: add_stack( main_thread_frame, Scalene.should_trace, Scalene.__stats.stacks, average_python_time, average_c_time, average_cpu_time, ) # First, handle the main thread. Scalene.enter_function_meta(main_thread_frame, Scalene.__stats) fname = Filename(main_thread_frame.f_code.co_filename) lineno = LineNumber(main_thread_frame.f_lineno) # print(main_thread_frame) # print(fname, lineno) main_tid = cast(int, threading.main_thread().ident) if not is_thread_sleeping[main_tid]: Scalene.__stats.cpu_samples_python[fname][ lineno ] += average_python_time Scalene.__stats.cpu_samples_c[fname][lineno] += average_c_time Scalene.__stats.cpu_samples[fname] += average_cpu_time Scalene.__stats.cpu_utilization[fname][lineno].push( cpu_utilization ) Scalene.__stats.core_utilization[fname][lineno].push( core_utilization ) Scalene.__stats.gpu_samples[fname][lineno] += ( gpu_load * elapsed_wallclock ) Scalene.__stats.n_gpu_samples[fname][lineno] += elapsed_wallclock Scalene.__stats.gpu_mem_samples[fname][lineno].push(gpu_mem_used) # Now handle the rest of the threads. for frame, tident, orig_frame in new_frames: if frame == main_thread_frame: continue add_stack( frame, Scalene.should_trace, Scalene.__stats.stacks, average_python_time, average_c_time, average_cpu_time, ) # In a thread. fname = Filename(frame.f_code.co_filename) lineno = LineNumber(frame.f_lineno) Scalene.enter_function_meta(frame, Scalene.__stats) # We can't play the same game here of attributing # time, because we are in a thread, and threads don't # get signals in Python. Instead, we check if the # bytecode instruction being executed is a function # call. If so, we attribute all the time to native. # NOTE: for now, we don't try to attribute GPU time to threads. if is_thread_sleeping[tident]: # Ignore sleeping threads. continue # Check if the original caller is stuck inside a call. if ScaleneFuncUtils.is_call_function( orig_frame.f_code, ByteCodeIndex(orig_frame.f_lasti), ): # It is. Attribute time to native. Scalene.__stats.cpu_samples_c[fname][lineno] += normalized_time else: # Not in a call function so we attribute the time to Python. Scalene.__stats.cpu_samples_python[fname][ lineno ] += normalized_time Scalene.__stats.cpu_samples[fname] += normalized_time Scalene.__stats.cpu_utilization[fname][lineno].push( cpu_utilization ) Scalene.__stats.core_utilization[fname][lineno].push( core_utilization ) # Clean up all the frames del new_frames[:] del new_frames del is_thread_sleeping Scalene.__stats.total_cpu_samples += total_time
Handle interrupts for CPU profiling.
process_cpu_sample
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def compute_frames_to_record() -> List[Tuple[FrameType, int, FrameType]]: """Collect all stack frames that Scalene actually processes.""" frames: List[Tuple[FrameType, int]] = [ ( cast( FrameType, sys._current_frames().get(cast(int, t.ident), None), ), cast(int, t.ident), ) for t in threading.enumerate() if t != threading.main_thread() ] # Put the main thread in the front. tid = cast(int, threading.main_thread().ident) frames.insert( 0, ( sys._current_frames().get(tid, cast(FrameType, None)), tid, ), ) # Process all the frames to remove ones we aren't going to track. new_frames: List[Tuple[FrameType, int, FrameType]] = [] for frame, tident in frames: orig_frame = frame if not frame: continue fname = frame.f_code.co_filename func = frame.f_code.co_name # Record samples only for files we care about. if not fname: # 'eval/compile' gives no f_code.co_filename. We have # to look back into the outer frame in order to check # the co_filename. back = cast(FrameType, frame.f_back) fname = Filename(back.f_code.co_filename) func = back.f_code.co_name while not Scalene.should_trace(fname, func): # Walk the stack backwards until we hit a frame that # IS one we should trace (if there is one). i.e., if # it's in the code being profiled, and it is just # calling stuff deep in libraries. if frame: frame = cast(FrameType, frame.f_back) else: break if frame: fname = frame.f_code.co_filename func = frame.f_code.co_name if frame: new_frames.append((frame, tident, orig_frame)) del frames[:] return new_frames
Collect all stack frames that Scalene actually processes.
compute_frames_to_record
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def enter_function_meta( frame: FrameType, stats: ScaleneStatistics ) -> None: """Update tracking info so we can correctly report line number info later.""" fname = Filename(frame.f_code.co_filename) lineno = LineNumber(frame.f_lineno) f = frame try: while "<" in Filename(f.f_code.co_name): f = cast(FrameType, f.f_back) # Handle case where the function with the name wrapped # in triangle brackets is at the bottom of the stack if f is None: return except Exception: return if not Scalene.should_trace(f.f_code.co_filename, f.f_code.co_name): return fn_name = get_fully_qualified_name(f) firstline = f.f_code.co_firstlineno stats.function_map[fname][lineno] = fn_name stats.firstline_map[fn_name] = LineNumber(firstline)
Update tracking info so we can correctly report line number info later.
enter_function_meta
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def alloc_sigqueue_processor(x: Optional[List[int]]) -> None: """Handle interrupts for memory profiling (mallocs and frees).""" stats = Scalene.__stats curr_pid = os.getpid() # Process the input array from where we left off reading last time. arr: List[ Tuple[ int, str, float, float, str, Filename, LineNumber, ByteCodeIndex, ] ] = [] with contextlib.suppress(FileNotFoundError): while Scalene.__malloc_mapfile.read(): count_str = Scalene.__malloc_mapfile.get_str() if count_str.strip() == "": break ( action, alloc_time_str, count_str, python_fraction_str, pid, pointer, reported_fname, reported_lineno, bytei_str, ) = count_str.split(",") if int(curr_pid) != int(pid): continue arr.append( ( int(alloc_time_str), action, float(count_str), float(python_fraction_str), pointer, Filename(reported_fname), LineNumber(int(reported_lineno)), ByteCodeIndex(int(bytei_str)), ) ) stats.alloc_samples += len(arr) # Iterate through the array to compute the new current footprint # and update the global __memory_footprint_samples. Since on some systems, # we get free events before mallocs, force `before` to always be at least 0. before = max(stats.current_footprint, 0) prevmax = stats.max_footprint freed_last_trigger = 0 for item in arr: ( _alloc_time, action, count, python_fraction, pointer, fname, lineno, bytei, ) = item is_malloc = action == Scalene.MALLOC_ACTION if count == scalene.scalene_config.NEWLINE_TRIGGER_LENGTH + 1: continue # in previous implementations, we were adding NEWLINE to the footprint. # We should not account for this in the user-facing profile. count /= Scalene.BYTES_PER_MB if is_malloc: stats.current_footprint += count if stats.current_footprint > stats.max_footprint: stats.max_footprint = stats.current_footprint stats.max_footprint_python_fraction = python_fraction stats.max_footprint_loc = (fname, lineno) else: assert action in [ Scalene.FREE_ACTION, Scalene.FREE_ACTION_SAMPLED, ] stats.current_footprint -= count # Force current footprint to be non-negative; this # code is needed because Scalene can miss some initial # allocations at startup. stats.current_footprint = max(0, stats.current_footprint) if ( action == Scalene.FREE_ACTION_SAMPLED and stats.last_malloc_triggered[2] == pointer ): freed_last_trigger += 1 timestamp = time.monotonic_ns() - Scalene.__start_time stats.memory_footprint_samples.append( [ timestamp, stats.current_footprint, ] ) after = stats.current_footprint if freed_last_trigger: if freed_last_trigger <= 1: # We freed the last allocation trigger. Adjust scores. this_fn, this_ln, _this_ptr = stats.last_malloc_triggered if this_ln != 0: mallocs, frees = stats.leak_score[this_fn][this_ln] stats.leak_score[this_fn][this_ln] = ( mallocs, frees + 1, ) stats.last_malloc_triggered = ( Filename(""), LineNumber(0), Address("0x0"), ) allocs = 0.0 last_malloc = (Filename(""), LineNumber(0), Address("0x0")) malloc_pointer = "0x0" curr = before # Go through the array again and add each updated current footprint. for item in arr: ( _alloc_time, action, count, python_fraction, pointer, fname, lineno, bytei, ) = item is_malloc = action == Scalene.MALLOC_ACTION if ( is_malloc and count == scalene.scalene_config.NEWLINE_TRIGGER_LENGTH + 1 ): with Scalene.__invalidate_mutex: last_file, last_line = Scalene.__invalidate_queue.pop(0) stats.memory_malloc_count[last_file][last_line] += 1 stats.memory_aggregate_footprint[last_file][ last_line ] += stats.memory_current_highwater_mark[last_file][last_line] stats.memory_current_footprint[last_file][last_line] = 0 stats.memory_current_highwater_mark[last_file][last_line] = 0 continue # Add the byte index to the set for this line (if it's not there already). stats.bytei_map[fname][lineno].add(bytei) count /= Scalene.BYTES_PER_MB if is_malloc: allocs += count curr += count assert curr <= stats.max_footprint malloc_pointer = pointer stats.memory_malloc_samples[fname][lineno] += count stats.memory_python_samples[fname][lineno] += ( python_fraction * count ) stats.malloc_samples[fname] += 1 stats.total_memory_malloc_samples += count # Update current and max footprints for this file & line. stats.memory_current_footprint[fname][lineno] += count stats.memory_current_highwater_mark[fname][lineno] = max( stats.memory_current_highwater_mark[fname][lineno], stats.memory_current_footprint[fname][lineno], ) assert stats.current_footprint <= stats.max_footprint stats.memory_max_footprint[fname][lineno] = max( stats.memory_current_footprint[fname][lineno], stats.memory_max_footprint[fname][lineno], ) # Ensure that the max footprint never goes above the true max footprint. # This is a work-around for a condition that in theory should never happen, but... stats.memory_max_footprint[fname][lineno] = min( stats.max_footprint, stats.memory_max_footprint[fname][lineno], ) assert stats.current_footprint <= stats.max_footprint assert ( stats.memory_max_footprint[fname][lineno] <= stats.max_footprint ) else: assert action in [ Scalene.FREE_ACTION, Scalene.FREE_ACTION_SAMPLED, ] curr -= count stats.memory_free_samples[fname][lineno] += count stats.memory_free_count[fname][lineno] += 1 stats.total_memory_free_samples += count stats.memory_current_footprint[fname][lineno] -= count # Ensure that we never drop the current footprint below 0. stats.memory_current_footprint[fname][lineno] = max( 0, stats.memory_current_footprint[fname][lineno] ) stats.per_line_footprint_samples[fname][lineno].append( [time.monotonic_ns() - Scalene.__start_time, max(0, curr)] ) # If we allocated anything, then mark this as the last triggering malloc if allocs > 0: last_malloc = ( Filename(fname), LineNumber(lineno), Address(malloc_pointer), ) stats.allocation_velocity = ( stats.allocation_velocity[0] + (after - before), stats.allocation_velocity[1] + allocs, ) if ( Scalene.__args.memory_leak_detector and prevmax < stats.max_footprint and stats.max_footprint > 100 ): stats.last_malloc_triggered = last_malloc fname, lineno, _ = last_malloc mallocs, frees = stats.leak_score[fname][lineno] stats.leak_score[fname][lineno] = (mallocs + 1, frees)
Handle interrupts for memory profiling (mallocs and frees).
alloc_sigqueue_processor
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0
def before_fork() -> None: """The parent process should invoke this function just before a fork. Invoked by replacement_fork.py. """ Scalene.stop_signal_queues()
The parent process should invoke this function just before a fork. Invoked by replacement_fork.py.
before_fork
python
plasma-umass/scalene
scalene/scalene_profiler.py
https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py
Apache-2.0