content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
sequence
answers_scores
sequence
non_answers
sequence
non_answers_scores
sequence
tags
sequence
name
stringlengths
35
137
Q: How to set a foreignkey field in views? I'm trying to save the customer field on the Test model, I'm not getting any errors but it's not saving the field either, how do I fix it? Models class Test(models.Model): customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True, null=True) email = models.EmailField(max_length=200, blank=False) Forms class TestForm(forms.Form): email = forms.EmailField(required=True) class Meta: model = Test fields = ("email") def save(self, commit=False): # Creating the customer object Test.objects.create(email=self.cleaned_data['email']) Views def test_view(request): customer = request.user.customer if form.is_valid(): email = form.cleaned_data['email'] customer = customer form.save() A: You can use cleaned_data to save the ModelForm. forms.py class TestForm(forms.ModelForm): class Meta: model = Test fields = ["email"] Assuming, you have request method POST. views.py def test_view(request): if request.method=="POST": form=TestForm(request.POST) customer = request.user.customer if form.is_valid(): email = form.cleaned_data['email'] test=Test(customer=customer,email=email) test.save() A: You need to use a ModelForm, then save the object without commiting, edit the customer of the object, then commit. class TestForm(forms.ModelForm): class Meta: model = Test fields = ["email", ] def test_view(request): customer = request.user.customer #I'm not sure this line is right, but I can't see all your models if form.is_valid(): test = form.save(commit=False) test.customer = customer test.save()
How to set a foreignkey field in views?
I'm trying to save the customer field on the Test model, I'm not getting any errors but it's not saving the field either, how do I fix it? Models class Test(models.Model): customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True, null=True) email = models.EmailField(max_length=200, blank=False) Forms class TestForm(forms.Form): email = forms.EmailField(required=True) class Meta: model = Test fields = ("email") def save(self, commit=False): # Creating the customer object Test.objects.create(email=self.cleaned_data['email']) Views def test_view(request): customer = request.user.customer if form.is_valid(): email = form.cleaned_data['email'] customer = customer form.save()
[ "You can use cleaned_data to save the ModelForm.\nforms.py\nclass TestForm(forms.ModelForm):\n\n class Meta:\n model = Test\n fields = [\"email\"]\n\nAssuming, you have request method POST.\nviews.py\ndef test_view(request):\n if request.method==\"POST\":\n form=TestForm(request.POST)\n \n customer = request.user.customer\n\n if form.is_valid():\n email = form.cleaned_data['email']\n test=Test(customer=customer,email=email)\n test.save()\n\n", "You need to use a ModelForm, then save the object without commiting, edit the customer of the object, then commit.\nclass TestForm(forms.ModelForm):\n\n class Meta:\n model = Test\n fields = [\"email\", ]\n\n\ndef test_view(request):\n customer = request.user.customer #I'm not sure this line is right, but I can't see all your models\n\n if form.is_valid():\n test = form.save(commit=False)\n test.customer = customer\n test.save()\n\n" ]
[ 2, 0 ]
[]
[]
[ "django", "django_forms", "django_models", "django_views", "python" ]
stackoverflow_0074598722_django_django_forms_django_models_django_views_python.txt
Q: How to print logger from a xml file which have null value? i am trying to capture some fields from a xml file. Using "logger.info" i have successfully printing log of my code. the below is my code: #providing the path for client fr & counting the total files processed D_DIR = Path(directory[0]) client_id = directory[0].split(os.sep)[-2] files = sorted(D_DIR.glob("*.xml")) totalFiles[client_id] = len(files) total_files += len(files) logger.info ("Processing for client '{}'".format(clients[0])) logger.info(f"Processing {len(files)} number of files") #the code for capturing the neccessary data from the xml files for i in directory: D_DIR = Path(i) files = sorted(D_DIR.glob("*.xml")) for file in files: tree = ET.parse(file) root = tree.getroot() RowCount = 0 for obj in root.findall("object"): for i in obj.findall("record"): Data_Capture_Date = i.find("invoice_capture_date").text Case_Id = i.find("case_id").text Organization = i.find("organization").text Supplier_number = i.find("supplier_number").text Invoice_Number = i.find("invoice_number").text Document_Type = i.find("document_type").text Invoice_Source = i.find("invoice_source").text Recieved_Date = time.ctime(os.path.getctime(file)) for rows in root.iter("rows"): RowCount =+ len(rows) logger.info("Data_Capture_Date:"+Data_Capture_Date + " : " +"Case_Id:" + Case_Id + " : " +"Organization:" +Organization + " : " +"Supplier_number:" + (Supplier_number)+ " : " +"Invoice_Number:" +(Invoice_Number)+ " : " +"Document_Type:" +Document_Type+ " : " +"Invoice_Source:" +Invoice_Source+ " : " +"Recieved_Date:" + Recieved_Date + " : "+"RowCount:" + str(RowCount)) #insert record into activity table if not functions.writeActivityFile('captuissue', 'success', logger.handlers[0].baseFilename, cursor): logger.info("Write to Activity Table Failed.") #closing the connection dbconn.commit() cursor.close() dbconn.close() logger.info("Information capture issue process completed") logger.info("Total files processed : {}".format(total_files)) logger.info("Information Capture : END") print("Check logfile '{}' for details".format(logger.handlers[0].baseFilename)) here i haven't added the logger initiation process as it is not needed. The main process are the above code. When my files have a null value in any field it shows error. TypeError: can only concatenate str (not "NoneType") to str how can i take none type value also also in my logger? A: I found out the method to do it. we need to write the logger in ".format" way below the capturing process code. logger.info("data_capture_date:{} | case_id:{} | organization:{} | supplier_number:{} | invoice_Number:{} | document_Type:{} | invoice_Source:{} | rowcount:{}".format(data_capture_date, case_id,organization,supplier_number,invoice_Number,document_Type,invoice_Source,rowcount)) this method shows nonetype in the logger
How to print logger from a xml file which have null value?
i am trying to capture some fields from a xml file. Using "logger.info" i have successfully printing log of my code. the below is my code: #providing the path for client fr & counting the total files processed D_DIR = Path(directory[0]) client_id = directory[0].split(os.sep)[-2] files = sorted(D_DIR.glob("*.xml")) totalFiles[client_id] = len(files) total_files += len(files) logger.info ("Processing for client '{}'".format(clients[0])) logger.info(f"Processing {len(files)} number of files") #the code for capturing the neccessary data from the xml files for i in directory: D_DIR = Path(i) files = sorted(D_DIR.glob("*.xml")) for file in files: tree = ET.parse(file) root = tree.getroot() RowCount = 0 for obj in root.findall("object"): for i in obj.findall("record"): Data_Capture_Date = i.find("invoice_capture_date").text Case_Id = i.find("case_id").text Organization = i.find("organization").text Supplier_number = i.find("supplier_number").text Invoice_Number = i.find("invoice_number").text Document_Type = i.find("document_type").text Invoice_Source = i.find("invoice_source").text Recieved_Date = time.ctime(os.path.getctime(file)) for rows in root.iter("rows"): RowCount =+ len(rows) logger.info("Data_Capture_Date:"+Data_Capture_Date + " : " +"Case_Id:" + Case_Id + " : " +"Organization:" +Organization + " : " +"Supplier_number:" + (Supplier_number)+ " : " +"Invoice_Number:" +(Invoice_Number)+ " : " +"Document_Type:" +Document_Type+ " : " +"Invoice_Source:" +Invoice_Source+ " : " +"Recieved_Date:" + Recieved_Date + " : "+"RowCount:" + str(RowCount)) #insert record into activity table if not functions.writeActivityFile('captuissue', 'success', logger.handlers[0].baseFilename, cursor): logger.info("Write to Activity Table Failed.") #closing the connection dbconn.commit() cursor.close() dbconn.close() logger.info("Information capture issue process completed") logger.info("Total files processed : {}".format(total_files)) logger.info("Information Capture : END") print("Check logfile '{}' for details".format(logger.handlers[0].baseFilename)) here i haven't added the logger initiation process as it is not needed. The main process are the above code. When my files have a null value in any field it shows error. TypeError: can only concatenate str (not "NoneType") to str how can i take none type value also also in my logger?
[ "I found out the method to do it. we need to write the logger in \".format\" way below the capturing process code.\n logger.info(\"data_capture_date:{} | case_id:{} | organization:{} | supplier_number:{} | invoice_Number:{} | document_Type:{} | invoice_Source:{} | rowcount:{}\".format(data_capture_date, case_id,organization,supplier_number,invoice_Number,document_Type,invoice_Source,rowcount))\n\nthis method shows nonetype in the logger\n" ]
[ 1 ]
[]
[]
[ "python", "xml" ]
stackoverflow_0074596672_python_xml.txt
Q: How do I write the time from datetime to a file in Python? I'm trying to have my Python code write everything it does to a log, with a timestamp. But it doesn't seem to work. this is my current code: filePath= Path('.') time=datetime.datetime.now() bot_log = ["","Set up the file path thingy"] with open ('bot.log', 'a') as f: f.write('\n'.join(bot_log)% datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")) print(bot_log[0]) but when I run it it says: Traceback (most recent call last): File "c:\Users\Name\Yuna-Discord-Bot\Yuna Discord Bot.py", line 15, in <module> f.write('\n'.join(bot_log)% TypeError: not all arguments converted during string formatting I have tried multiple things to fix it, and this is the latest one. is there something I'm doing wrong or missing? I also want the time to be in front of the log message, but I don't think it would do that (if it worked). A: You need to put "%s" somewhere in the input string before string formatting. Here's more detailed explanation. Try this: filePath= Path('.') time=datetime.datetime.now() bot_log = "%s Set up the file path thingy\n" with open ('bot.log', 'a') as f: f.write(bot_log % datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")) print(bot_log) A: It looks like you want to write three strings to your file as separate lines. I've rearranged your code to create a single list to pass to writelines, which expects an iterable: filePath= Path('.') time=datetime.datetime.now() bot_log = ["","Set up the file path thingy"] with open ('bot.log', 'a') as f: bot_log.append(datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")) f.writelines('\n'.join(bot_log)) print(bot_log[0]) EDIT: From the comments the desire is to prepend the timestamp to the message and keep it on the same line. I've used f-strings as I prefer the clarity they provide: import datetime from pathlib import Path filePath = Path('.') with open('bot.log', 'a') as f: time = datetime.datetime.now() msg = "Set up the file path thingy" f.write(f"""{time.strftime("%d-%b-%Y (%H:%M:%S.%f)")} {msg}\n""") You could also look at the logging module which does a lot of this for you.
How do I write the time from datetime to a file in Python?
I'm trying to have my Python code write everything it does to a log, with a timestamp. But it doesn't seem to work. this is my current code: filePath= Path('.') time=datetime.datetime.now() bot_log = ["","Set up the file path thingy"] with open ('bot.log', 'a') as f: f.write('\n'.join(bot_log)% datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")) print(bot_log[0]) but when I run it it says: Traceback (most recent call last): File "c:\Users\Name\Yuna-Discord-Bot\Yuna Discord Bot.py", line 15, in <module> f.write('\n'.join(bot_log)% TypeError: not all arguments converted during string formatting I have tried multiple things to fix it, and this is the latest one. is there something I'm doing wrong or missing? I also want the time to be in front of the log message, but I don't think it would do that (if it worked).
[ "You need to put \"%s\" somewhere in the input string before string formatting. Here's more detailed explanation.\nTry this:\nfilePath= Path('.')\ntime=datetime.datetime.now()\nbot_log = \"%s Set up the file path thingy\\n\"\nwith open ('bot.log', 'a') as f:\n f.write(bot_log % datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\"))\n print(bot_log)\n\n", "It looks like you want to write three strings to your file as separate lines. I've rearranged your code to create a single list to pass to writelines, which expects an iterable:\nfilePath= Path('.')\ntime=datetime.datetime.now()\nbot_log = [\"\",\"Set up the file path thingy\"]\nwith open ('bot.log', 'a') as f:\n bot_log.append(datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\"))\n f.writelines('\\n'.join(bot_log))\n print(bot_log[0])\n\nEDIT: From the comments the desire is to prepend the timestamp to the message and keep it on the same line. I've used f-strings as I prefer the clarity they provide:\nimport datetime\nfrom pathlib import Path\n\nfilePath = Path('.')\n\nwith open('bot.log', 'a') as f:\n time = datetime.datetime.now()\n msg = \"Set up the file path thingy\"\n f.write(f\"\"\"{time.strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")} {msg}\\n\"\"\")\n\nYou could also look at the logging module which does a lot of this for you.\n" ]
[ 2, 1 ]
[]
[]
[ "python", "python_datetime" ]
stackoverflow_0074599505_python_python_datetime.txt
Q: TensorFlow model subclassing API with vars doesn't show parameters or layers I wrote following code for VGG block, and I want to show the summary of the block: import tensorflow as tf from keras.layers import Conv2D, MaxPool2D, Input class VggBlock(tf.keras.Model): def __init__(self, filters, repetitions): super(VggBlock, self).__init__() self.repetitions = repetitions for i in range(repetitions): vars(self)[f'conv2D_{i}'] = Conv2D(filters=filters, kernel_size=(3, 3), padding='same', activation='relu') self.max_pool = MaxPool2D(pool_size=(2, 2)) def call(self, inputs): x = vars(self)['conv2D_0'](inputs) for i in range(1, self.repetitions): x = vars(self)[f'conv2D_{i}'](x) return self.max_pool(x) test_block = VggBlock(64, 2) temp_inputs = Input(shape=(224, 224, 3)) test_block(temp_inputs) test_block.summary() Then, this code gives following output: Model: "vgg_block" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= max_pooling2d (MaxPooling2D multiple 0 ) ================================================================= Total params: 0 Trainable params: 0 Non-trainable params: 0 _________________________________________________________________ I tried to check explicitly the layers: for layer in test_block.layers: print(layer) This output shows only one layer: <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7f6c18377f50> However, the conv layers exist well in the dictionary form: print(vars(test_block)) {'_self_setattr_tracking': True, '_is_model_for_instrumentation': True, '_instrumented_keras_api': True, '_instrumented_keras_layer_class': False, '_instrumented_keras_model_class': True, '_trainable': True, '_stateful': False, 'built': True, '_input_spec': None, '_build_input_shape': None, '_saved_model_inputs_spec': TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_10'), '_saved_model_arg_spec': ([TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_10')], {}), '_supports_masking': False, '_name': 'vgg_block_46', '_activity_regularizer': None, '_trainable_weights': [], '_non_trainable_weights': [], '_updates': [], '_thread_local': <_thread._local object at 0x7fb9084d9ef0>, '_callable_losses': [], '_losses': [], '_metrics': [], '_metrics_lock': <unlocked _thread.lock object at 0x7fb90d88abd0>, '_dtype_policy': <Policy "float32">, '_compute_dtype_object': tf.float32, '_autocast': True, '_self_tracked_trackables': [<keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>], '_inbound_nodes_value': [<keras.engine.node.Node object at 0x7fb9087146d0>], '_outbound_nodes_value': [], '_expects_training_arg': False, '_default_training_arg': None, '_expects_mask_arg': False, '_dynamic': False, '_initial_weights': None, '_auto_track_sub_layers': True, '_preserve_input_structure_in_config': False, '_name_scope_on_declaration': '', '_captured_weight_regularizer': [], '_is_graph_network': False, 'inputs': None, 'outputs': None, 'input_names': None, 'output_names': None, 'stop_training': False, 'history': None, 'compiled_loss': None, 'compiled_metrics': None, '_compute_output_and_mask_jointly': False, '_is_compiled': False, 'optimizer': None, '_distribution_strategy': None, '_cluster_coordinator': None, '_run_eagerly': None, 'train_function': None, 'test_function': None, 'predict_function': None, 'train_tf_function': None, '_compiled_trainable_state': <WeakKeyDictionary at 0x7fb9084b0790>, '_training_state': None, '_self_unconditional_checkpoint_dependencies': [TrackableReference(name=max_pool, ref=<keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>)], '_self_unconditional_dependency_names': {'max_pool': <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>}, '_self_unconditional_deferred_dependencies': {}, '_self_update_uid': -1, '_self_name_based_restores': set(), '_self_saveable_object_factories': {}, '_checkpoint': <tensorflow.python.training.tracking.util.Checkpoint object at 0x7fb9084b0910>, '_steps_per_execution': None, '_train_counter': <tf.Variable 'Variable:0' shape=() dtype=int64, numpy=0>, '_test_counter': <tf.Variable 'Variable:0' shape=() dtype=int64, numpy=0>, '_predict_counter': <tf.Variable 'Variable:0' shape=() dtype=int64, numpy=0>, '_base_model_initialized': True, '_jit_compile': None, '_layout_map': None, '_obj_reference_counts_dict': ObjectIdentityDictionary({<_ObjectIdentityWrapper wrapping 3>: 1, <_ObjectIdentityWrapper wrapping <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>>: 1}), 'repetitions': 3, 'conv2D_0': <keras.layers.convolutional.conv2d.Conv2D object at 0x7fb90852e390>, 'conv2D_1': <keras.layers.convolutional.conv2d.Conv2D object at 0x7fb90852ed90>, 'conv2D_2': <keras.layers.convolutional.conv2d.Conv2D object at 0x7fb9084dac90>, 'max_pool': <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>} Does vars() make the layer some weird? How can I show the layers or parameters correctly? A: Presumably, setting class attributes like this circumvents the usual housekeeping done by a Keras Layer (such as registering variables, sub-layers etc.), so you should avoid doing this. Rather do something like this: class VggBlock(tf.keras.Model): def __init__(self, filters, repetitions): super(VggBlock, self).__init__() self.repetitions = repetitions self.conv_layers = [Conv2D(filters=filters, kernel_size=(3, 3), padding='same', activation='relu') for _ in range(repetitions)] self.max_pool = MaxPool2D(pool_size=(2, 2)) def call(self, inputs): x = inputs for layer in self.conv_layers: x = layer(x) return self.max_pool(x) test_block = VggBlock(64, 2) temp_inputs = Input(shape=(224, 224, 3)) test_block(temp_inputs) test_block.summary() Here, we use a list to store the layers, and can still do it using a loop. This prints Model: "vgg_block_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_4 (Conv2D) multiple 1792 conv2d_5 (Conv2D) multiple 36928 max_pooling2d_2 (MaxPooling multiple 0 2D) ================================================================= Total params: 38,720 Trainable params: 38,720 Non-trainable params: 0 _________________________________________________________________ If you don't need to have the layers given explicitly in the summary, you can use Sequential to simplify the call method: class VggBlock(tf.keras.Model): def __init__(self, filters, repetitions): super(VggBlock, self).__init__() self.repetitions = repetitions self.conv_layers = tf.keras.Sequential([Conv2D(filters=filters, kernel_size=(3, 3), padding='same', activation='relu') for _ in range(repetitions)]) self.max_pool = MaxPool2D(pool_size=(2, 2)) def call(self, inputs): x = self.conv_layers(inputs) return self.max_pool(x) test_block = VggBlock(64, 2) temp_inputs = Input(shape=(224, 224, 3)) test_block(temp_inputs) test_block.summary() which is functionally identical, but displays this summary which you might not want: Model: "vgg_block_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= sequential (Sequential) (None, 224, 224, 64) 38720 max_pooling2d_3 (MaxPooling multiple 0 2D) ================================================================= Total params: 38,720 Trainable params: 38,720 Non-trainable params: 0 _________________________________________________________________
TensorFlow model subclassing API with vars doesn't show parameters or layers
I wrote following code for VGG block, and I want to show the summary of the block: import tensorflow as tf from keras.layers import Conv2D, MaxPool2D, Input class VggBlock(tf.keras.Model): def __init__(self, filters, repetitions): super(VggBlock, self).__init__() self.repetitions = repetitions for i in range(repetitions): vars(self)[f'conv2D_{i}'] = Conv2D(filters=filters, kernel_size=(3, 3), padding='same', activation='relu') self.max_pool = MaxPool2D(pool_size=(2, 2)) def call(self, inputs): x = vars(self)['conv2D_0'](inputs) for i in range(1, self.repetitions): x = vars(self)[f'conv2D_{i}'](x) return self.max_pool(x) test_block = VggBlock(64, 2) temp_inputs = Input(shape=(224, 224, 3)) test_block(temp_inputs) test_block.summary() Then, this code gives following output: Model: "vgg_block" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= max_pooling2d (MaxPooling2D multiple 0 ) ================================================================= Total params: 0 Trainable params: 0 Non-trainable params: 0 _________________________________________________________________ I tried to check explicitly the layers: for layer in test_block.layers: print(layer) This output shows only one layer: <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7f6c18377f50> However, the conv layers exist well in the dictionary form: print(vars(test_block)) {'_self_setattr_tracking': True, '_is_model_for_instrumentation': True, '_instrumented_keras_api': True, '_instrumented_keras_layer_class': False, '_instrumented_keras_model_class': True, '_trainable': True, '_stateful': False, 'built': True, '_input_spec': None, '_build_input_shape': None, '_saved_model_inputs_spec': TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_10'), '_saved_model_arg_spec': ([TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_10')], {}), '_supports_masking': False, '_name': 'vgg_block_46', '_activity_regularizer': None, '_trainable_weights': [], '_non_trainable_weights': [], '_updates': [], '_thread_local': <_thread._local object at 0x7fb9084d9ef0>, '_callable_losses': [], '_losses': [], '_metrics': [], '_metrics_lock': <unlocked _thread.lock object at 0x7fb90d88abd0>, '_dtype_policy': <Policy "float32">, '_compute_dtype_object': tf.float32, '_autocast': True, '_self_tracked_trackables': [<keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>], '_inbound_nodes_value': [<keras.engine.node.Node object at 0x7fb9087146d0>], '_outbound_nodes_value': [], '_expects_training_arg': False, '_default_training_arg': None, '_expects_mask_arg': False, '_dynamic': False, '_initial_weights': None, '_auto_track_sub_layers': True, '_preserve_input_structure_in_config': False, '_name_scope_on_declaration': '', '_captured_weight_regularizer': [], '_is_graph_network': False, 'inputs': None, 'outputs': None, 'input_names': None, 'output_names': None, 'stop_training': False, 'history': None, 'compiled_loss': None, 'compiled_metrics': None, '_compute_output_and_mask_jointly': False, '_is_compiled': False, 'optimizer': None, '_distribution_strategy': None, '_cluster_coordinator': None, '_run_eagerly': None, 'train_function': None, 'test_function': None, 'predict_function': None, 'train_tf_function': None, '_compiled_trainable_state': <WeakKeyDictionary at 0x7fb9084b0790>, '_training_state': None, '_self_unconditional_checkpoint_dependencies': [TrackableReference(name=max_pool, ref=<keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>)], '_self_unconditional_dependency_names': {'max_pool': <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>}, '_self_unconditional_deferred_dependencies': {}, '_self_update_uid': -1, '_self_name_based_restores': set(), '_self_saveable_object_factories': {}, '_checkpoint': <tensorflow.python.training.tracking.util.Checkpoint object at 0x7fb9084b0910>, '_steps_per_execution': None, '_train_counter': <tf.Variable 'Variable:0' shape=() dtype=int64, numpy=0>, '_test_counter': <tf.Variable 'Variable:0' shape=() dtype=int64, numpy=0>, '_predict_counter': <tf.Variable 'Variable:0' shape=() dtype=int64, numpy=0>, '_base_model_initialized': True, '_jit_compile': None, '_layout_map': None, '_obj_reference_counts_dict': ObjectIdentityDictionary({<_ObjectIdentityWrapper wrapping 3>: 1, <_ObjectIdentityWrapper wrapping <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>>: 1}), 'repetitions': 3, 'conv2D_0': <keras.layers.convolutional.conv2d.Conv2D object at 0x7fb90852e390>, 'conv2D_1': <keras.layers.convolutional.conv2d.Conv2D object at 0x7fb90852ed90>, 'conv2D_2': <keras.layers.convolutional.conv2d.Conv2D object at 0x7fb9084dac90>, 'max_pool': <keras.layers.pooling.max_pooling2d.MaxPooling2D object at 0x7fb9084e2510>} Does vars() make the layer some weird? How can I show the layers or parameters correctly?
[ "Presumably, setting class attributes like this circumvents the usual housekeeping done by a Keras Layer (such as registering variables, sub-layers etc.), so you should avoid doing this. Rather do something like this:\nclass VggBlock(tf.keras.Model):\n def __init__(self, filters, repetitions):\n super(VggBlock, self).__init__() \n self.repetitions = repetitions\n \n self.conv_layers = [Conv2D(filters=filters, kernel_size=(3, 3), padding='same', activation='relu') for _ in range(repetitions)]\n self.max_pool = MaxPool2D(pool_size=(2, 2))\n \n def call(self, inputs):\n x = inputs\n for layer in self.conv_layers:\n x = layer(x)\n return self.max_pool(x)\n\n\ntest_block = VggBlock(64, 2)\ntemp_inputs = Input(shape=(224, 224, 3))\ntest_block(temp_inputs)\ntest_block.summary()\n\nHere, we use a list to store the layers, and can still do it using a loop. This prints\nModel: \"vgg_block_2\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n conv2d_4 (Conv2D) multiple 1792 \n \n conv2d_5 (Conv2D) multiple 36928 \n \n max_pooling2d_2 (MaxPooling multiple 0 \n 2D) \n \n=================================================================\nTotal params: 38,720\nTrainable params: 38,720\nNon-trainable params: 0\n_________________________________________________________________\n\nIf you don't need to have the layers given explicitly in the summary, you can use Sequential to simplify the call method:\nclass VggBlock(tf.keras.Model):\n def __init__(self, filters, repetitions):\n super(VggBlock, self).__init__() \n self.repetitions = repetitions\n \n self.conv_layers = tf.keras.Sequential([Conv2D(filters=filters, kernel_size=(3, 3), padding='same', activation='relu') for _ in range(repetitions)])\n self.max_pool = MaxPool2D(pool_size=(2, 2))\n \n def call(self, inputs):\n x = self.conv_layers(inputs)\n return self.max_pool(x)\n\n\ntest_block = VggBlock(64, 2)\ntemp_inputs = Input(shape=(224, 224, 3))\ntest_block(temp_inputs)\ntest_block.summary()\n\nwhich is functionally identical, but displays this summary which you might not want:\nModel: \"vgg_block_3\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n sequential (Sequential) (None, 224, 224, 64) 38720 \n \n max_pooling2d_3 (MaxPooling multiple 0 \n 2D) \n \n=================================================================\nTotal params: 38,720\nTrainable params: 38,720\nNon-trainable params: 0\n_________________________________________________________________\n\n" ]
[ 2 ]
[]
[]
[ "model", "python", "subclass", "tensorflow" ]
stackoverflow_0074596313_model_python_subclass_tensorflow.txt
Q: Rename first two character based on condition in python Folder contains images in format jpg, and png. Here we need to achieve: Image files name start with 11BHBHHJJKKKKK.JPG, 11BCBHHJJKKKKK.JPG, 11BKBHHJJKKKKK.JPG, 33GFHJJKKKKJK.JPG, 33JHNNHHJJJJJ.JPG, 44HJFHJFHJFHF.PNG, 44HJFHJFKKHF.JPG So here we need to change image name using following conditions: image name start with "11" change to "AA", "11BHBHHJJKKKKK.JPG" to "AABHBHHJJKKKKK.JPG" image name start with "33" change to "BB", "33GFHJJKKKKJK.JPG" to "BBJHNNHHJJJJJ.JPG" image name start with "44" change to "CC", "44HJFHJFKKHF.JPG" to "CCHJFHJFKKHF.JPG" Any suggestion please. I have tried from PIL import Image import glob import os import re image_list = [] imagepath = 'C/dataimg/*.jpg' for filename in glob.glob(imagepath): #assuming gif head, tail = os.path.split(filename) print(tail) print(str(tail)[:2]) newimage=tail.replace("11", "AA") newimage.save(path) A: You could do something like below, where you check every single condition you're looking for. from pathlib import Path for filename in glob.glob(imagepath): #assuming gif path = Path(filename) head = os.path.split(filename)[1] if head.startswith('11'): os.rename(filename, os.path.join(path.parent, head.replace('11', 'AA'))) elif head.startswith('33'): os.rename(filename, os.path.join(path.parent, head.replace('33', 'BB'))) elif head.startswith('44'): os.rename(filename, os.path.join(path.parent, head.replace('44', 'CC')))
Rename first two character based on condition in python
Folder contains images in format jpg, and png. Here we need to achieve: Image files name start with 11BHBHHJJKKKKK.JPG, 11BCBHHJJKKKKK.JPG, 11BKBHHJJKKKKK.JPG, 33GFHJJKKKKJK.JPG, 33JHNNHHJJJJJ.JPG, 44HJFHJFHJFHF.PNG, 44HJFHJFKKHF.JPG So here we need to change image name using following conditions: image name start with "11" change to "AA", "11BHBHHJJKKKKK.JPG" to "AABHBHHJJKKKKK.JPG" image name start with "33" change to "BB", "33GFHJJKKKKJK.JPG" to "BBJHNNHHJJJJJ.JPG" image name start with "44" change to "CC", "44HJFHJFKKHF.JPG" to "CCHJFHJFKKHF.JPG" Any suggestion please. I have tried from PIL import Image import glob import os import re image_list = [] imagepath = 'C/dataimg/*.jpg' for filename in glob.glob(imagepath): #assuming gif head, tail = os.path.split(filename) print(tail) print(str(tail)[:2]) newimage=tail.replace("11", "AA") newimage.save(path)
[ "You could do something like below, where you check every single condition you're looking for.\nfrom pathlib import Path\nfor filename in glob.glob(imagepath): #assuming gif\n path = Path(filename)\n head = os.path.split(filename)[1]\n if head.startswith('11'):\n os.rename(filename, os.path.join(path.parent, head.replace('11', 'AA')))\n elif head.startswith('33'):\n os.rename(filename, os.path.join(path.parent, head.replace('33', 'BB')))\n elif head.startswith('44'):\n os.rename(filename, os.path.join(path.parent, head.replace('44', 'CC')))\n\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074599432_python.txt
Q: Save an image to RAM I want to send an image through socket server. After sending it, I want to show this image on ram (I mean without saving as a file). I made some amazing array changes and finally I reached my original array but I still have an error to show this image. And my array progress is really slow. Is there any suggestion for it and How can I overcome this error? Traceback (most recent call last): File "C:\Users\ENES\Desktop\python projects\opencvv\opencv_test_1.py", line 44, in <module> cv2.imshow("hi",arr2) cv2.error: OpenCV(4.6.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow' And my code: import cv2 import numpy as np import time img= cv2.imread("resim/tfm.jpg") img1= cv2.imread("resim/tfm.jpg") print(type(img)) y, x = len(img), len(img[0]) print(y, x) print(img) for y1 in range(y): i=img[y1] print(y1) for x1 in range(x): data=i[x1] imgstr=np.array_str(data) #print(img) imgstr=imgstr.replace("[","") imgstr=imgstr.replace("]","") #print(img) """ for a1 in range(3): img=i[a1] print(img)""" imgstr=np.fromstring(imgstr, dtype=int, sep=" ") #print(img) if x1==0: arr1=imgstr else: arr1=np.vstack((arr1, imgstr)) if y1==0: arr2=arr1 else: arr2=np.vstack((arr2, arr1)) #arr1=np.array(arr1) print(img.shape, arr2.shape) #time.sleep(100) arr2=np.array(arr2.reshape(353,616,3)) print(arr2==img) print(type(arr2), type(img)) print(img.shape, arr2.shape) #print(img1) cv2.imshow("hi",arr2) A: I solved my problem changing only 1 line of code. We need to declare datatype of array. Here is the code: arr2=np.array(arr2.reshape(353,616,3), dtype=np.uint8) However, I found a new function cv2.imencode() This function is exactly what I wanted and it is really faster than my code. So, I am going to use this function. Thanks to all your answers! Here is an example of it: import cv2 import numpy as np import time img= cv2.imread("resim/tfm.jpg") img=cv2.imencode(".jpg", img)[1] print(img) byte_img = img.tobytes() print(len(byte_img)) recvd_img=np.array(bytearray(byte_img), dtype="uint8") recvd_img=cv2.imdecode(recvd_img, cv2.IMREAD_COLOR) cv2.imshow("sa", recvd_img)
Save an image to RAM
I want to send an image through socket server. After sending it, I want to show this image on ram (I mean without saving as a file). I made some amazing array changes and finally I reached my original array but I still have an error to show this image. And my array progress is really slow. Is there any suggestion for it and How can I overcome this error? Traceback (most recent call last): File "C:\Users\ENES\Desktop\python projects\opencvv\opencv_test_1.py", line 44, in <module> cv2.imshow("hi",arr2) cv2.error: OpenCV(4.6.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow' And my code: import cv2 import numpy as np import time img= cv2.imread("resim/tfm.jpg") img1= cv2.imread("resim/tfm.jpg") print(type(img)) y, x = len(img), len(img[0]) print(y, x) print(img) for y1 in range(y): i=img[y1] print(y1) for x1 in range(x): data=i[x1] imgstr=np.array_str(data) #print(img) imgstr=imgstr.replace("[","") imgstr=imgstr.replace("]","") #print(img) """ for a1 in range(3): img=i[a1] print(img)""" imgstr=np.fromstring(imgstr, dtype=int, sep=" ") #print(img) if x1==0: arr1=imgstr else: arr1=np.vstack((arr1, imgstr)) if y1==0: arr2=arr1 else: arr2=np.vstack((arr2, arr1)) #arr1=np.array(arr1) print(img.shape, arr2.shape) #time.sleep(100) arr2=np.array(arr2.reshape(353,616,3)) print(arr2==img) print(type(arr2), type(img)) print(img.shape, arr2.shape) #print(img1) cv2.imshow("hi",arr2)
[ "I solved my problem changing only 1 line of code. We need to declare datatype of array. Here is the code:\narr2=np.array(arr2.reshape(353,616,3), dtype=np.uint8)\n\nHowever, I found a new function cv2.imencode()\nThis function is exactly what I wanted and it is really faster than my code. So, I am going to use this function.\nThanks to all your answers!\nHere is an example of it:\nimport cv2\nimport numpy as np\nimport time\n\nimg= cv2.imread(\"resim/tfm.jpg\")\n\nimg=cv2.imencode(\".jpg\", img)[1]\nprint(img)\nbyte_img = img.tobytes()\nprint(len(byte_img))\n\nrecvd_img=np.array(bytearray(byte_img), dtype=\"uint8\")\nrecvd_img=cv2.imdecode(recvd_img, cv2.IMREAD_COLOR)\ncv2.imshow(\"sa\", recvd_img)\n\n" ]
[ 0 ]
[]
[]
[ "cv2", "image", "numpy", "python", "sockets" ]
stackoverflow_0074593809_cv2_image_numpy_python_sockets.txt
Q: What is the time complexity of this code for Best Time to Buy and Sell Stock I am a beginner in coding, I was doing a leetcode problem "121. Best Time to Buy and Sell Stock". I wrote a code that works pretty well but when I try to run it, it says Time Limit Exceeded. Looking at this code, this would be O(n) time complexity and for the space complexity it would be O(1). I have seen other solutions using a while loop (kadane's algorithm) and it runs perfectly. l = 0 r = 1 maxx = 0 if len(prices) <= 1: return 0 while l <= r: profit = prices[r] - prices[l] if r != len(prices) - 1: r += 1 elif l == len(prices) - 2: if maxx < profit: maxx = profit break else: l += 1 r = l + 1 if maxx < profit: maxx = profit return maxx A: Some observations: Either l is updated and r is set to one more, or l is not updated and r does not diminish. This means that the while condition is always satisfied. The only way to exit the loop is via the break. This means the loop header could also have been written as while True: The r index visits the same index multiple times, as r = l + 1 generally decreases its value The algorithm produces all possible pairs of [l, r] where l < r. There are n(n-1)/2 of those pairs, so the algorithm has a complexity of O(n²). This complexity is not optimal -- as you know. We can get a "feel" of that inefficiency by realising the following: when we have found the optimal r for a given l, it makes no sense to look at lesser values of r when l has been increased. Surely that optimal r for that previous l is still going to be the optimal one for the next value of l. Only when that optimum was at l + 1 we need a "fresh" one. So your algorithm is wasting time on values of r that we already know cannot be optimal. Similarly, when the price at l is greater than a previous one, there is no way that the optimal r (for that l) will improve the best profit we already had. So then there should be no reason at all to have r iterate over the rest of the array: it will be fruitless. Optimal algorithm You have already mentioned Kadane's algorithm (based on day-to-day price differences) I find the following one-pass algorithm quite intuitive: When at a certain index during that single pass, let's assume you have correctly identified the following, using only the part of the list that was already processed: The best profit in that list. If this sub list would be all there was, this would be the final answer. The minimum price in that list. This is a good candidate to buy and try to make even better profits when going further. Then, when reading the next price from the list, there are the following cases: Maybe that price is high enough to improve on the best profit (considering a buy at the minimum price we had so far, and selling at this current price): if so, we just have to update that best profit. Maybe that price is lower than the minimum price we opted for. In that case we can forget about the previous minimum we had, since any future improvement we could get by using that previous minimum price would immediately be improved by taking the current price as buying price instead, so we don't need the previous minimum price anymore. In all other cases we have a price that is non interesting, nor as buying price, nor as selling price. After this step we are again sure we have determined the best profit for the range of prices that was processed, knowing also the minimum price in that range. So by induction this process will give the correct answer. Code: class Solution: def maxProfit(self, prices: List[int]) -> int: maxprofit = 0 minprice = prices[0] for price in prices: if price < minprice: minprice = price elif price - minprice > maxprofit: maxprofit = price - minprice # buy at minprice & sell at price return maxprofit
What is the time complexity of this code for Best Time to Buy and Sell Stock
I am a beginner in coding, I was doing a leetcode problem "121. Best Time to Buy and Sell Stock". I wrote a code that works pretty well but when I try to run it, it says Time Limit Exceeded. Looking at this code, this would be O(n) time complexity and for the space complexity it would be O(1). I have seen other solutions using a while loop (kadane's algorithm) and it runs perfectly. l = 0 r = 1 maxx = 0 if len(prices) <= 1: return 0 while l <= r: profit = prices[r] - prices[l] if r != len(prices) - 1: r += 1 elif l == len(prices) - 2: if maxx < profit: maxx = profit break else: l += 1 r = l + 1 if maxx < profit: maxx = profit return maxx
[ "Some observations:\n\nEither l is updated and r is set to one more, or l is not updated and r does not diminish. This means that the while condition is always satisfied. The only way to exit the loop is via the break. This means the loop header could also have been written as while True:\n\nThe r index visits the same index multiple times, as r = l + 1 generally decreases its value The algorithm produces all possible pairs of [l, r] where l < r. There are n(n-1)/2 of those pairs, so the algorithm has a complexity of O(n²).\n\n\nThis complexity is not optimal -- as you know. We can get a \"feel\" of that inefficiency by realising the following: when we have found the optimal r for a given l, it makes no sense to look at lesser values of r when l has been increased. Surely that optimal r for that previous l is still going to be the optimal one for the next value of l. Only when that optimum was at l + 1 we need a \"fresh\" one. So your algorithm is wasting time on values of r that we already know cannot be optimal.\nSimilarly, when the price at l is greater than a previous one, there is no way that the optimal r (for that l) will improve the best profit we already had. So then there should be no reason at all to have r iterate over the rest of the array: it will be fruitless.\nOptimal algorithm\nYou have already mentioned Kadane's algorithm (based on day-to-day price differences)\nI find the following one-pass algorithm quite intuitive:\nWhen at a certain index during that single pass, let's assume you have correctly identified the following, using only the part of the list that was already processed:\n\nThe best profit in that list. If this sub list would be all there was, this would be the final answer.\n\nThe minimum price in that list. This is a good candidate to buy and try to make even better profits when going further.\n\n\nThen, when reading the next price from the list, there are the following cases:\n\nMaybe that price is high enough to improve on the best profit (considering a buy at the minimum price we had so far, and selling at this current price): if so, we just have to update that best profit.\n\nMaybe that price is lower than the minimum price we opted for. In that case we can forget about the previous minimum we had, since any future improvement we could get by using that previous minimum price would immediately be improved by taking the current price as buying price instead, so we don't need the previous minimum price anymore.\n\nIn all other cases we have a price that is non interesting, nor as buying price, nor as selling price.\n\n\nAfter this step we are again sure we have determined the best profit for the range of prices that was processed, knowing also the minimum price in that range.\nSo by induction this process will give the correct answer.\nCode:\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n maxprofit = 0\n minprice = prices[0]\n for price in prices:\n if price < minprice:\n minprice = price\n elif price - minprice > maxprofit:\n maxprofit = price - minprice # buy at minprice & sell at price\n \n return maxprofit\n\n" ]
[ 0 ]
[]
[]
[ "algorithm", "data_structures", "python" ]
stackoverflow_0074597778_algorithm_data_structures_python.txt
Q: Emoji Remove from the specific column I want to just remove emoji from one column and sepecial charater for eg (@ #.:/,.). Will remain in that specific column ? I want to clean the data A: You can remove emojis from your data columns using the following code df.astype(str).apply(lambda x: x.str.encode('ascii', 'ignore').str.decode('ascii'))
Emoji Remove from the specific column
I want to just remove emoji from one column and sepecial charater for eg (@ #.:/,.). Will remain in that specific column ? I want to clean the data
[ "You can remove emojis from your data columns using the following code\ndf.astype(str).apply(lambda x: x.str.encode('ascii', 'ignore').str.decode('ascii'))\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074598854_dataframe_pandas_python.txt
Q: How to I split the time_taken' column of a dataframe? I am trying to split the time_taken attribute (eg., 02h 10m) into only numbers using the below code. I have checked earlier posts and this code seemed to work fine for some of you but it is not working for me. t=pd.to_timedelta(df3['time_taken']) df3['hours']=t.dt.components['hours'] df3['minutes']=t.dt.components['minutes'] df3.head() I am getting the following error: ValueError: invalid unit abbreviation: hm I am unable to understand the error. Can anyone help me split the column into hours and mins? It would be of great help. Thanks in advance. A: You can try this code. Since you mentioned that your time_taken attribute looks like this: 02h 10m. I have written an example code which you can try out. import pandas as pd # initializing example time data time_taken = ['1h 10m', '2h 20m', '3h 30m', '4h 40m', '5h 50m'] #inserting the time data into a pandas DataFrame data = pd.DataFrame(time_taken, columns = ['time_taken']) # see how the data looks like print(data) # initializing "Hours" and "Minutes" columns" # and assigning the value 0 to both for now. data['Hours'] = 0 data['Minutes'] = 0 # when I ran this code, the data type for the elements # in time_taken column was numpy.int64 # so we convert it into string type data['time_taken'] = data['time_taken'].apply(str) # loop through the elements to split into Hours and minutes for i in range(len(data)): temp = data.iat[i,0] hours, minutes = temp.split() # use python .split() function for strings data.iat[i,1] = hours.translate({ord('h'): None}) data.iat[i,2] = minutes.translate({ord('m'): None}) # the correct data is here print(data)
How to I split the time_taken' column of a dataframe?
I am trying to split the time_taken attribute (eg., 02h 10m) into only numbers using the below code. I have checked earlier posts and this code seemed to work fine for some of you but it is not working for me. t=pd.to_timedelta(df3['time_taken']) df3['hours']=t.dt.components['hours'] df3['minutes']=t.dt.components['minutes'] df3.head() I am getting the following error: ValueError: invalid unit abbreviation: hm I am unable to understand the error. Can anyone help me split the column into hours and mins? It would be of great help. Thanks in advance.
[ "You can try this code. Since you mentioned that your time_taken attribute looks like this: 02h 10m. I have written an example code which you can try out.\nimport pandas as pd\n\n# initializing example time data\ntime_taken = ['1h 10m', '2h 20m', '3h 30m', '4h 40m', '5h 50m']\n\n#inserting the time data into a pandas DataFrame\ndata = pd.DataFrame(time_taken, columns = ['time_taken'])\n\n# see how the data looks like\nprint(data)\n\n# initializing \"Hours\" and \"Minutes\" columns\"\n# and assigning the value 0 to both for now.\ndata['Hours'] = 0\ndata['Minutes'] = 0\n\n# when I ran this code, the data type for the elements \n# in time_taken column was numpy.int64\n# so we convert it into string type\n\ndata['time_taken'] = data['time_taken'].apply(str)\n\n# loop through the elements to split into Hours and minutes\nfor i in range(len(data)):\n temp = data.iat[i,0]\n hours, minutes = temp.split() # use python .split() function for strings\n data.iat[i,1] = hours.translate({ord('h'): None})\n data.iat[i,2] = minutes.translate({ord('m'): None})\n\n\n# the correct data is here\nprint(data)\n\n" ]
[ 0 ]
[]
[]
[ "duration", "pandas", "python", "timedelta", "valueerror" ]
stackoverflow_0074595396_duration_pandas_python_timedelta_valueerror.txt
Q: Alternative to Using Repeated Stratified K Fold with Multiple Outputs? I am exploring the number of features that would be best to use for my models. I understand that a Repeated Stratified K Fold requires 1 1D array output while I am trying to evaluate the number of features for an output that has multiple outputs. Is there a way to use the Repeated Stratified K Fold with multiple outputs? Or is there an alternative to accomplish what I need? from sklearn import datasets from numpy import mean, std from sklearn.datasets import make_classification from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold, KFold from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import Pipeline from matplotlib import pyplot def get_models(): models = dict() for i in range(4,20): rfe = RFE(estimator = DecisionTreeClassifier(), n_features_to_select = i) model = DecisionTreeClassifier() models[str(i)] = Pipeline(steps=[('s', rfe), ('m', model)]) return models from sklearn.utils.multiclass import type_of_target x = imp_data.iloc[:,:34] y = imp_data.iloc[:,39] model = DecisionTreeClassifier() def evaluate_model(model,x,y): cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=0) scores = cross_val_score(model, x, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score = 'raise') return scores models = get_models() results, names = list(), list() for name,model in models.items(): scores = evaluate_model(model,x,y) results.append(scores) names.append(name) print('>%s %.3f (%.3f)' % (name, mean(scores), std(scores))) A: as i know, you can use cross_validate() as alternative of StratifiedKFold with multiple output. You can define cross validation technique with StratifiedKFold and scoring metrics as your preference. You can check link below for more detail ! https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html
Alternative to Using Repeated Stratified K Fold with Multiple Outputs?
I am exploring the number of features that would be best to use for my models. I understand that a Repeated Stratified K Fold requires 1 1D array output while I am trying to evaluate the number of features for an output that has multiple outputs. Is there a way to use the Repeated Stratified K Fold with multiple outputs? Or is there an alternative to accomplish what I need? from sklearn import datasets from numpy import mean, std from sklearn.datasets import make_classification from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold, KFold from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import Pipeline from matplotlib import pyplot def get_models(): models = dict() for i in range(4,20): rfe = RFE(estimator = DecisionTreeClassifier(), n_features_to_select = i) model = DecisionTreeClassifier() models[str(i)] = Pipeline(steps=[('s', rfe), ('m', model)]) return models from sklearn.utils.multiclass import type_of_target x = imp_data.iloc[:,:34] y = imp_data.iloc[:,39] model = DecisionTreeClassifier() def evaluate_model(model,x,y): cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=0) scores = cross_val_score(model, x, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score = 'raise') return scores models = get_models() results, names = list(), list() for name,model in models.items(): scores = evaluate_model(model,x,y) results.append(scores) names.append(name) print('>%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
[ "as i know, you can use cross_validate() as alternative of StratifiedKFold with multiple output. You can define cross validation technique with StratifiedKFold and scoring metrics as your preference. You can check link below for more detail !\nhttps://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html\n" ]
[ 0 ]
[]
[]
[ "feature_selection", "k_fold", "python" ]
stackoverflow_0073281240_feature_selection_k_fold_python.txt
Q: How to Fix Attribute Error: module 'graph' has no attribute 'get_user_token' I have imported graph already, any idea why it can't identify the attribute? def display_access_token(graph: Graph): token = graph.get_user_token() print('User token:', token, '\n') if user_input == '0': display_access_token(graph) I am not sure what to do to fix it. Thanks in advance. A: I have referred the sample code from the official documentation. After reproducing from my end, this was working fine after adding get_user_token() in the graph.py. Below is the complete code in my graph.py. import json from configparser import SectionProxy from azure.identity import DeviceCodeCredential, ClientSecretCredential from msgraph.core import GraphClient class Graph: settings: SectionProxy device_code_credential: DeviceCodeCredential user_client: GraphClient client_credential: ClientSecretCredential app_client: GraphClient def __init__(self, config: SectionProxy): self.settings = config client_id = self.settings['clientId'] tenant_id = self.settings['authTenant'] graph_scopes = self.settings['graphUserScopes'].split(' ') self.device_code_credential = DeviceCodeCredential(client_id, tenant_id = tenant_id) self.user_client = GraphClient(credential=self.device_code_credential, scopes=graph_scopes) def get_user_token(self): a="Return from get_user_token" return a RESULTS:
How to Fix Attribute Error: module 'graph' has no attribute 'get_user_token'
I have imported graph already, any idea why it can't identify the attribute? def display_access_token(graph: Graph): token = graph.get_user_token() print('User token:', token, '\n') if user_input == '0': display_access_token(graph) I am not sure what to do to fix it. Thanks in advance.
[ "I have referred the sample code from the official documentation. After reproducing from my end, this was working fine after adding get_user_token() in the graph.py. Below is the complete code in my graph.py.\nimport json\nfrom configparser import SectionProxy\nfrom azure.identity import DeviceCodeCredential, ClientSecretCredential\nfrom msgraph.core import GraphClient\n\nclass Graph:\n settings: SectionProxy\n device_code_credential: DeviceCodeCredential\n user_client: GraphClient\n client_credential: ClientSecretCredential\n app_client: GraphClient\n\n def __init__(self, config: SectionProxy):\n self.settings = config\n client_id = self.settings['clientId']\n tenant_id = self.settings['authTenant']\n graph_scopes = self.settings['graphUserScopes'].split(' ')\n\n self.device_code_credential = DeviceCodeCredential(client_id, tenant_id = tenant_id)\n self.user_client = GraphClient(credential=self.device_code_credential, scopes=graph_scopes)\n \n def get_user_token(self):\n a=\"Return from get_user_token\"\n return a\n\nRESULTS:\n\n" ]
[ 0 ]
[]
[]
[ "azure", "microsoft_graph_api", "python" ]
stackoverflow_0074593631_azure_microsoft_graph_api_python.txt
Q: Why does my function does not give me the expected output Given a year, determine whether it is a leap year. If it is a leap year, return the Boolean True, otherwise return False. Note that the code stub provided reads from STDIN and passes arguments to the is_leap function. It is only necessary to complete the is_leap function. In the Gregorian calendar, three conditions are used to identify leap years: The year can be evenly divided by 4, is a leap year, unless: The year can be evenly divided by 100, it is NOT a leap year, unless: The year is also evenly divisible by 400. Then it is a leap year. def is_leap(year): leap= False if (year % 400 == 0) and (year % 100 == 0): leap = True elif (year % 4 ==0) and (year % 100 != 0): leap=True else: pass year = int(input()) print(is_leap(year)) Input 2100 and expected output False A: As @Grismar pointed your function is not returning anything so just add return leap to the end. def is_leap(year): leap= False if (year % 400 == 0) and (year % 100 == 0): leap = True elif (year % 4 ==0) and (year % 100 != 0): leap=True else: pass return leap year = int(input()) print(is_leap(year))
Why does my function does not give me the expected output
Given a year, determine whether it is a leap year. If it is a leap year, return the Boolean True, otherwise return False. Note that the code stub provided reads from STDIN and passes arguments to the is_leap function. It is only necessary to complete the is_leap function. In the Gregorian calendar, three conditions are used to identify leap years: The year can be evenly divided by 4, is a leap year, unless: The year can be evenly divided by 100, it is NOT a leap year, unless: The year is also evenly divisible by 400. Then it is a leap year. def is_leap(year): leap= False if (year % 400 == 0) and (year % 100 == 0): leap = True elif (year % 4 ==0) and (year % 100 != 0): leap=True else: pass year = int(input()) print(is_leap(year)) Input 2100 and expected output False
[ "As @Grismar pointed your function is not returning anything so just add return leap to the end.\ndef is_leap(year):\n leap= False\n\n if (year % 400 == 0) and (year % 100 == 0):\n leap = True\n elif (year % 4 ==0) and (year % 100 != 0):\n leap=True\n else:\n pass\n return leap\n\nyear = int(input())\nprint(is_leap(year))\n\n" ]
[ 0 ]
[]
[]
[ "function", "python" ]
stackoverflow_0074588991_function_python.txt
Q: How to add new data to existing dataframe I've created empty dataframe that I have to fill. d = {'A': [], 'B': [], 'C': []} dataframe = pd.DataFrame(data=d) Then I am assigning data like this: dataframe['A'] = some_list_1a dataframe['B'] = some_list_1b dataframe['C'] = some_list_1c So my dataframe is filled like this: A B C ---------------- val1 val1 val1 val1 val1 val1 val1 val1 val1 Then I have to add new values from list but the previous way is not working: dataframe['A'] = some_list_2a etc. That's what I want: A B C ---------------- val1 val1 val1 val1 val1 val1 val1 val1 val1 val2 val2 val2 val2 val1 val2 val2 val2 val2 (val1 - values from first lists, val2 - values from second lists) I know I can make second dataframe and use concat method, but is there another way of doing it? A: Create dictionary with all joined lists first and then call DataFrame is fastest and recommended way, check this: d = {'A': some_list_1a + some_list_2a, 'B': some_list_1b + some_list_2b, 'C': some_list_1c + some_list_2c} dataframe = pd.DataFrame(data=d) If need append dict of list in loop: from collections import defaultdict d = defaultdict(list) #some loop for x in iter: d[col_name].append(sublist) dataframe = pd.DataFrame(data=d) A: append() function is used to append rows of other dataframe to the end of the given dataframe, returning a new dataframe object. Columns not in the original dataframes are added as new columns and the new cells are populated with NONE value.
How to add new data to existing dataframe
I've created empty dataframe that I have to fill. d = {'A': [], 'B': [], 'C': []} dataframe = pd.DataFrame(data=d) Then I am assigning data like this: dataframe['A'] = some_list_1a dataframe['B'] = some_list_1b dataframe['C'] = some_list_1c So my dataframe is filled like this: A B C ---------------- val1 val1 val1 val1 val1 val1 val1 val1 val1 Then I have to add new values from list but the previous way is not working: dataframe['A'] = some_list_2a etc. That's what I want: A B C ---------------- val1 val1 val1 val1 val1 val1 val1 val1 val1 val2 val2 val2 val2 val1 val2 val2 val2 val2 (val1 - values from first lists, val2 - values from second lists) I know I can make second dataframe and use concat method, but is there another way of doing it?
[ "Create dictionary with all joined lists first and then call DataFrame is fastest and recommended way, check this:\nd = {'A': some_list_1a + some_list_2a, \n 'B': some_list_1b + some_list_2b,\n 'C': some_list_1c + some_list_2c}\ndataframe = pd.DataFrame(data=d)\n\nIf need append dict of list in loop:\nfrom collections import defaultdict\n\nd = defaultdict(list)\n\n#some loop\nfor x in iter:\n d[col_name].append(sublist)\n \ndataframe = pd.DataFrame(data=d)\n\n", "append() function is used to append rows of other dataframe to the end of the given dataframe, returning a new dataframe object. Columns not in the original dataframes are added as new columns and the new cells are populated with NONE value.\n" ]
[ 1, 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074599828_dataframe_pandas_python.txt
Q: Writing a constraint with gurobipy I shared the parameters, variables and notation of the model: I have difficulty in writing equation 7, which is one of the constraints of the model, with gurobipy. The code block I wrote is as follows: mdl2.addConstrs(T[i, j, k] >= quicksum(p[l]*y[i, l, s] + s[l]*x[i, l, s] for l in N for s in ???)- d[j] - 100000*(1 - x[i, j, k]) for i in M for j in N for k in N) #7 Could you please help me about this? It will be very welcome. If desired, I can also share all the model code I wrote. A: You should be good with writing s in range(k) - the sum depends on the index k from the outer loop. To make this a bit easier to read and comprehend, you might want to switch around the for loops like this: for i in M: for j in N: for k in N: mdl2.addConstr( T[i, j, k] >= quicksum( p[l] * y[i, l, s] + s[l] * x[i, l, s] for l in N for s in range(k) ) - d[j] - 100000 * (1 - x[i, j, k]) ) This assumes that M is range(m) and N is range(n). And please note that I am using addConstr because there is just a single constraint in every iteration of the triple loop. In any case, you should write out the LP file of the problem and check whether what you are modeling actually corresponds to the mathematical formulation.
Writing a constraint with gurobipy
I shared the parameters, variables and notation of the model: I have difficulty in writing equation 7, which is one of the constraints of the model, with gurobipy. The code block I wrote is as follows: mdl2.addConstrs(T[i, j, k] >= quicksum(p[l]*y[i, l, s] + s[l]*x[i, l, s] for l in N for s in ???)- d[j] - 100000*(1 - x[i, j, k]) for i in M for j in N for k in N) #7 Could you please help me about this? It will be very welcome. If desired, I can also share all the model code I wrote.
[ "You should be good with writing s in range(k) - the sum depends on the index k from the outer loop.\nTo make this a bit easier to read and comprehend, you might want to switch around the for loops like this:\nfor i in M:\n for j in N:\n for k in N:\n mdl2.addConstr(\n T[i, j, k]\n >= quicksum(\n p[l] * y[i, l, s] + s[l] * x[i, l, s] for l in N for s in range(k)\n )\n - d[j]\n - 100000 * (1 - x[i, j, k])\n )\n\n\nThis assumes that M is range(m) and N is range(n). And please note that I am using addConstr because there is just a single constraint in every iteration of the triple loop.\nIn any case, you should write out the LP file of the problem and check whether what you are modeling actually corresponds to the mathematical formulation.\n" ]
[ 1 ]
[]
[]
[ "constraints", "gurobi", "linear_programming", "optimization", "python" ]
stackoverflow_0074577839_constraints_gurobi_linear_programming_optimization_python.txt
Q: List of tables in a Database I achieved to have a DataFrame with all the columns and their type of all the tables in my database of Databricks. Database Table Column ColumnType default table1 column1 string default table1 column2 boolean default table2 column3 integer default table2 column4 string default table2 column5 string Could anybody help me to add two extra columns, one column that indicates the number of null values of each column in each table and another column that indicates the percentage of null values of each column in each table? Database Table Column ColumnType Nulls Percentage default table1 column1 string 345 5% default table1 column2 boolean 0 0% default table2 column3 integer 98760 90% default table2 column4 string 56721 52% default table2 column5 string 1512 1% Thanks in advance! Python-Code: table_name = 'table1' df = spark.sql("SELECT * FROM {}".format(table_name)) col_null_cnt_df = df.select([count(when(col(c).isNull(),c)).alias(c) for c in df.columns]) col_null_cnt_df.show() A: I have the following data in my 2 tables t1 and t2. The following is how I have the data in my dataframe: I have used the following code to get the desired output. First, I have created 2 columns containing respective queries: from pyspark.sql.functions import lit,col,concat_ws df = df.withColumn('count_of_nulls',concat(lit('select * from '),concat_ws('.',*['Database','Table']),lit(' where isnull('),col('Column'),lit(')'))) df = df.withColumn('no_of_rows',concat(lit('select * from '),concat_ws('.',*['Database','Table']))) Now, I converted this dataframe to pandas dataframe on spark API to perform respective operations using loops and update the dataframe: pdf = df.to_pandas_on_spark() #pdf null_count = [] total_count = [] for i in pdf['count_of_nulls'].to_numpy(): null_count.append(spark.sql(i).count()) for i in pdf['no_of_rows'].to_numpy(): total_count.append(spark.sql(i).count()) print(null_count,total_count) pdf['count_of_nulls'] = null_count pdf['no_of_rows'] = total_count #pdf I converted it back to pyspark dataframe and then calculated percentage. df = pdf.to_spark() df.withColumn('percentage_of_nulls', col('count_of_nulls')/col('no_of_rows')*100).show()
List of tables in a Database
I achieved to have a DataFrame with all the columns and their type of all the tables in my database of Databricks. Database Table Column ColumnType default table1 column1 string default table1 column2 boolean default table2 column3 integer default table2 column4 string default table2 column5 string Could anybody help me to add two extra columns, one column that indicates the number of null values of each column in each table and another column that indicates the percentage of null values of each column in each table? Database Table Column ColumnType Nulls Percentage default table1 column1 string 345 5% default table1 column2 boolean 0 0% default table2 column3 integer 98760 90% default table2 column4 string 56721 52% default table2 column5 string 1512 1% Thanks in advance! Python-Code: table_name = 'table1' df = spark.sql("SELECT * FROM {}".format(table_name)) col_null_cnt_df = df.select([count(when(col(c).isNull(),c)).alias(c) for c in df.columns]) col_null_cnt_df.show()
[ "\nI have the following data in my 2 tables t1 and t2.\n\n\n\nThe following is how I have the data in my dataframe:\n\n\n\nI have used the following code to get the desired output. First, I have created 2 columns containing respective queries:\n\nfrom pyspark.sql.functions import lit,col,concat_ws \ndf = df.withColumn('count_of_nulls',concat(lit('select * from '),concat_ws('.',*['Database','Table']),lit(' where isnull('),col('Column'),lit(')')))\n\ndf = df.withColumn('no_of_rows',concat(lit('select * from '),concat_ws('.',*['Database','Table'])))\n\n\n\nNow, I converted this dataframe to pandas dataframe on spark API to perform respective operations using loops and update the dataframe:\n\npdf = df.to_pandas_on_spark()\n#pdf\n\nnull_count = []\ntotal_count = []\nfor i in pdf['count_of_nulls'].to_numpy():\n null_count.append(spark.sql(i).count())\n\n\nfor i in pdf['no_of_rows'].to_numpy():\n total_count.append(spark.sql(i).count())\n \nprint(null_count,total_count)\npdf['count_of_nulls'] = null_count\npdf['no_of_rows'] = total_count\n#pdf\n\n\n\nI converted it back to pyspark dataframe and then calculated percentage.\n\ndf = pdf.to_spark()\ndf.withColumn('percentage_of_nulls', col('count_of_nulls')/col('no_of_rows')*100).show()\n\n\n" ]
[ 0 ]
[]
[]
[ "azure_databricks", "database", "databricks", "null", "python" ]
stackoverflow_0074503462_azure_databricks_database_databricks_null_python.txt
Q: I'm trying to find the maximum of a function using scipy.optimize.minimize. Can someone help me to find out the mistake? price = pd.read_csv('C:\\Users\\mypath\\price.csv', index_col= [0,1], usecols=[0,5,6]) yt = price.loc['AUS'] yt = yt.pct_change().dropna().values def p(u, sigma, pi): d = pi / (2*np.pi*sigma)**0.5 * np.exp(-(yt-u)**2 / (2*sigma**2)) return d def Lf(u, sigma, pi): prob = p(u[0], sigma[0], pi[0]) + p(u[1], sigma[1], pi[1]) L = np.sum(-np.log(prob)) return L pi_init = (0.5,0.5) sigma_init = (0.1,0.1) u_init = (0.1,0.1) res = opt.minimize(Lf, (u_init, sigma_init, pi_init), method='L-BFGS-B') If i run Lf() i get a real number but when i run minimize i get the following error message: TypeError: Lf() missing 2 required positional arguments: 'sigma' and 'pi' This message doesn't make sense to me... A: x0 = np.array([...]) # suitably shaped numpy array of your init values res = opt.minimize(Lf, x0, args=(u_init, sigma_init, pi_init), method='L-BFGS-B') May be you can try calling like this
I'm trying to find the maximum of a function using scipy.optimize.minimize. Can someone help me to find out the mistake?
price = pd.read_csv('C:\\Users\\mypath\\price.csv', index_col= [0,1], usecols=[0,5,6]) yt = price.loc['AUS'] yt = yt.pct_change().dropna().values def p(u, sigma, pi): d = pi / (2*np.pi*sigma)**0.5 * np.exp(-(yt-u)**2 / (2*sigma**2)) return d def Lf(u, sigma, pi): prob = p(u[0], sigma[0], pi[0]) + p(u[1], sigma[1], pi[1]) L = np.sum(-np.log(prob)) return L pi_init = (0.5,0.5) sigma_init = (0.1,0.1) u_init = (0.1,0.1) res = opt.minimize(Lf, (u_init, sigma_init, pi_init), method='L-BFGS-B') If i run Lf() i get a real number but when i run minimize i get the following error message: TypeError: Lf() missing 2 required positional arguments: 'sigma' and 'pi' This message doesn't make sense to me...
[ "x0 = np.array([...]) # suitably shaped numpy array of your init values\nres = opt.minimize(Lf, x0, args=(u_init, sigma_init, pi_init), method='L-BFGS-B')\n\nMay be you can try calling like this\n" ]
[ 0 ]
[]
[]
[ "optimization", "python", "scipy", "scipy_optimize_minimize" ]
stackoverflow_0074599892_optimization_python_scipy_scipy_optimize_minimize.txt
Q: Why does my values in a list doesn't calculate sum odd and even numbers differently and return it in a new list? I have list named li with values [1,2,4,5] and I want to return a new list with sum of odd numbers and even numbers like new_list = [6,6] where values add as 1+5 = 6 and 2+4 = 6. But, the output that I am receiving is [1]. Below is my code. class Solution(object): def calculate_odd_even(self, li): even = 0 odd = 0 sum_num = [] for i in range(len(li)): if li[i] % 2 == 0: even += 1 sum_num.append(even) else: odd += 1 sum_num.append(odd) return sum_num if __name__ == "__main__": p = Solution() lit = [1, 2, 4, 5] print(p.calculate_odd_even(lit)) A: There are several issues in your code. you return too early (in the loop) you add +1 instead of the value you try to append to each loop (do it only in the end) the order of the odd/even values depends on the input data (first one seen of odd/even will be first) Other "minor" issue: don't loop over the indices, loop over the values class Solution(object): def calculate_odd_even(self, li): even = 0 odd = 0 sum_num = [] for x in li: if x % 2 == 0: even += x else: odd += x sum_num.append(odd) sum_num.append(even) return sum_num if __name__ == "__main__": p = Solution() lit = [1, 2, 4, 5] print(p.calculate_odd_even(lit)) More simple variant: class Solution(object): def calculate_odd_even(self, li): sum_num = [0, 0] for x in li: sum_num[1-x%2] += x # use sum_num[x%2] += x for even/odd order return sum_num if __name__ == "__main__": p = Solution() lit = [1, 2, 4, 5] print(p.calculate_odd_even(lit))
Why does my values in a list doesn't calculate sum odd and even numbers differently and return it in a new list?
I have list named li with values [1,2,4,5] and I want to return a new list with sum of odd numbers and even numbers like new_list = [6,6] where values add as 1+5 = 6 and 2+4 = 6. But, the output that I am receiving is [1]. Below is my code. class Solution(object): def calculate_odd_even(self, li): even = 0 odd = 0 sum_num = [] for i in range(len(li)): if li[i] % 2 == 0: even += 1 sum_num.append(even) else: odd += 1 sum_num.append(odd) return sum_num if __name__ == "__main__": p = Solution() lit = [1, 2, 4, 5] print(p.calculate_odd_even(lit))
[ "There are several issues in your code.\n\nyou return too early (in the loop)\nyou add +1 instead of the value\nyou try to append to each loop (do it only in the end)\nthe order of the odd/even values depends on the input data (first one seen of odd/even will be first)\n\nOther \"minor\" issue:\n\ndon't loop over the indices, loop over the values\n\nclass Solution(object):\n\n def calculate_odd_even(self, li):\n even = 0\n odd = 0\n\n sum_num = []\n for x in li:\n if x % 2 == 0:\n even += x\n else:\n odd += x\n \n sum_num.append(odd)\n sum_num.append(even)\n return sum_num\n\n\nif __name__ == \"__main__\":\n p = Solution()\n lit = [1, 2, 4, 5]\n print(p.calculate_odd_even(lit))\n\nMore simple variant:\nclass Solution(object):\n\n def calculate_odd_even(self, li):\n sum_num = [0, 0]\n for x in li:\n sum_num[1-x%2] += x # use sum_num[x%2] += x for even/odd order\n return sum_num\n\n\nif __name__ == \"__main__\":\n p = Solution()\n lit = [1, 2, 4, 5]\n print(p.calculate_odd_even(lit))\n\n" ]
[ 2 ]
[]
[]
[ "list", "python" ]
stackoverflow_0074599901_list_python.txt
Q: Use PMML models in Python I've found many topics related to this on the Internet but I could find no solutions. Suppose I want to download any PMML model from this examples list, and run them in Python (Python 3 preferably). Is there any way to do this? I'm looking for a way to import a PMML that was deployed OUTSIDE Python and import it to use it with this language. A: You could use PyPMML to apply PMML in Python, for example: from pypmml import Model model = Model.fromFile('DecisionTreeIris.pmml') result = model.predict({ "Sepal_Length" : 5.1, "Sepal_Width" : 3.5, "Petal_Length" : 1.4, "Petal_Width" : 0.2 }) For more info about other PMML libraries, be free to see: https://github.com/autodeployai A: After some research I found the solution to this: the 'openscoring' library. Using it is very simple: import subprocess from openscoring import Openscoring import numpy as np p = subprocess.Popen('java -jar openscoring-server-executable-1.4.3.jar', shell=True) os = Openscoring("http://localhost:8080/openscoring") # Deploying a PMML document DecisionTreeIris.pmml as an Iris model: os.deployFile("Iris", "DecisionTreeIris.pmml") # Evaluating the Iris model with a data record: arguments = { "Sepal_Length" : 5.1, "Sepal_Width" : 3.5, "Petal_Length" : 1.4, "Petal_Width" : 0.2 } result = os.evaluate("Iris", arguments) print(result) This returns the value of the target variable in a dictionary. There is no need to go outside of Python to use PMML models anymore (you just have to run the server with Java, which can be done with Python as well as I showed above). A: Isn't it like trying to host H2O models in python apps? Looks like a bridge from python to Java is required here too. Such bridges are not stable at all, been there, tested them. Just a general suggestion: do not mix languages between ML algos and apps code, train in python, serve in python, re-validate also in python. Legacy R and H2O models can be always re-fitted in python.
Use PMML models in Python
I've found many topics related to this on the Internet but I could find no solutions. Suppose I want to download any PMML model from this examples list, and run them in Python (Python 3 preferably). Is there any way to do this? I'm looking for a way to import a PMML that was deployed OUTSIDE Python and import it to use it with this language.
[ "You could use PyPMML to apply PMML in Python, for example:\nfrom pypmml import Model\n\nmodel = Model.fromFile('DecisionTreeIris.pmml')\nresult = model.predict({\n \"Sepal_Length\" : 5.1,\n \"Sepal_Width\" : 3.5,\n \"Petal_Length\" : 1.4,\n \"Petal_Width\" : 0.2\n})\n\nFor more info about other PMML libraries, be free to see:\nhttps://github.com/autodeployai\n", "After some research I found the solution to this: the 'openscoring' library.\nUsing it is very simple:\nimport subprocess\nfrom openscoring import Openscoring\nimport numpy as np\n\np = subprocess.Popen('java -jar openscoring-server-executable-1.4.3.jar',\n shell=True)\n\nos = Openscoring(\"http://localhost:8080/openscoring\")\n\n# Deploying a PMML document DecisionTreeIris.pmml as an Iris model: \nos.deployFile(\"Iris\", \"DecisionTreeIris.pmml\")\n\n# Evaluating the Iris model with a data record:\narguments = {\n \"Sepal_Length\" : 5.1,\n \"Sepal_Width\" : 3.5,\n \"Petal_Length\" : 1.4,\n \"Petal_Width\" : 0.2\n}\nresult = os.evaluate(\"Iris\", arguments)\nprint(result)\n\nThis returns the value of the target variable in a dictionary. There is no need to go outside of Python to use PMML models anymore (you just have to run the server with Java, which can be done with Python as well as I showed above).\n", "Isn't it like trying to host H2O models in python apps? Looks like a bridge from python to Java is required here too. Such bridges are not stable at all, been there, tested them. Just a general suggestion: do not mix languages between ML algos and apps code, train in python, serve in python, re-validate also in python. Legacy R and H2O models can be always re-fitted in python.\n" ]
[ 5, 1, 0 ]
[]
[]
[ "pmml", "prediction", "python", "python_3.x", "xml" ]
stackoverflow_0052393301_pmml_prediction_python_python_3.x_xml.txt
Q: Keras model prediction gives opposite results I trained a model called model_2 in Keras and made predictions using model.predict but I notice as I rerun the code the results are completely different. For example, first time column 0 has all probability values close to 1, but next time it has probability values all close to 0. Has it to do with the memory or the stateful parameter which I have seen mentioned in other posts? X = df.iloc[:,1:10161] X = X.to_numpy() X = X.reshape([X.shape[0], X.shape[1],1]) X_train_1 = X[:,0:10080,:] X_train_2 = X[:,10080:10160,:].reshape(17,80) inputs_1 = keras.Input(shape=(10080, 1)) layer1 = Conv1D(64, 14)(inputs_1) layer2 = layers.MaxPool1D(5)(layer1) layer3 = Conv1D(64, 14)(layer2) layer4 = layers.GlobalMaxPooling1D()(layer3) layer5 = layers.Dropout(0.2)(layer4) inputs_2 = keras.Input(shape=(80,)) layer6 = layers.concatenate([layer5, inputs_2]) layer7 = Dense(128, activation='relu')(layer6) layer8 = layers.Dropout(0.5)(layer7) layer9 = Dense(2, activation='softmax')(layer8) model_2 = keras.models.Model(inputs = [inputs_1, inputs_2], outputs = [layer9]) adam = keras.optimizers.Adam(lr = 0.0001) model_2.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['acc']) prediction = pd.DataFrame(model_2.predict([X_train_1,X_train_2]),index = df.iloc[:,0]) pred = np.argmax(model_2.predict([X_train_1,X_train_2]), axis=1) display(prediction, pred) Examples of the contradictory results: trial 1: 0 1 id 11 1.131853e-07 1.000000 22 1.003963e-06 0.999999 33 1.226156e-07 1.000000 44 9.985497e-08 1.000000 55 1.234705e-07 1.000000 66 1.189311e-07 1.000000 77 6.631822e-08 1.000000 88 9.586067e-08 1.000000 99 9.494666e-08 1.000000 trial 2: 0 1 id 11 0.183640 0.816360 22 0.487814 0.512187 33 0.151600 0.848400 44 0.135977 0.864023 55 0.120982 0.879018 66 0.171371 0.828629 77 0.199774 0.800226 88 0.133711 0.866289 99 0.125785 0.874215 trial 3: 0 1 id 11 0.900128 0.099872 22 0.573520 0.426480 33 0.948409 0.051591 44 0.955184 0.044816 55 0.959075 0.040925 66 0.945758 0.054242 77 0.956582 0.043418 88 0.954180 0.045820 99 0.964601 0.035399 trial 4: 0 1 id 11 1.0 4.697790e-08 22 1.0 2.018885e-07 33 1.0 2.911827e-08 44 1.0 2.904826e-08 55 1.0 1.368165e-08 66 1.0 2.742492e-08 77 1.0 1.461449e-08 88 1.0 2.302636e-08 99 1.0 2.099636e-08 Model was been trained with: n_folds = 10 skf = StratifiedKFold(n_splits=n_folds, shuffle=True) skf = skf.split(X_train_1, Y_cat) cv_score = [] for i, (train, test) in enumerate(skf): model_2 = my_model() history = model_2.fit([X_train_1[train], X_train_2[train]], Y[train], validation_data=([X_train_1[test], X_train_2[test]], Y[test]), epochs=120, batch_size=10) result = model_2.evaluate([X_train_1[test], X_train_2[test]], Y[test]) keras.backend.clear_session() A: After you train a model, you have store the weights of that model in a file. If you don't do this, you don't keep the trained models if you run your program (or any other) again. You can store the model weights during training by using the ModelCheckpoint callback. https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint You can load the weights before inference (prediction) using model.load_weights(...) https://www.tensorflow.org/tutorials/keras/save_and_load To clarify why your results are random. You keep initializing a new model with random weights and then directly try to predict the answer with the untrained model. [EDIT] The reason your untrained model keeps giving the opposite results, may be due to the inputs combined with the weight initialization. The weight initialization is random, but there is some "smarts" behind the initialization to prevent dead connections. Here you can find more details on all weight initializers: https://www.tensorflow.org/api_docs/python/tf/keras/initializers Good Luck! A: U need to fix the random generator seeds for both tensorflow and sklearn. Try the following: tf.random.set_seed(42) and skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42) This will allow you to replicate your results. A: It's completely normal, when you create a new model, its weights are initialized as random, so the prediction will change each time you run this code.
Keras model prediction gives opposite results
I trained a model called model_2 in Keras and made predictions using model.predict but I notice as I rerun the code the results are completely different. For example, first time column 0 has all probability values close to 1, but next time it has probability values all close to 0. Has it to do with the memory or the stateful parameter which I have seen mentioned in other posts? X = df.iloc[:,1:10161] X = X.to_numpy() X = X.reshape([X.shape[0], X.shape[1],1]) X_train_1 = X[:,0:10080,:] X_train_2 = X[:,10080:10160,:].reshape(17,80) inputs_1 = keras.Input(shape=(10080, 1)) layer1 = Conv1D(64, 14)(inputs_1) layer2 = layers.MaxPool1D(5)(layer1) layer3 = Conv1D(64, 14)(layer2) layer4 = layers.GlobalMaxPooling1D()(layer3) layer5 = layers.Dropout(0.2)(layer4) inputs_2 = keras.Input(shape=(80,)) layer6 = layers.concatenate([layer5, inputs_2]) layer7 = Dense(128, activation='relu')(layer6) layer8 = layers.Dropout(0.5)(layer7) layer9 = Dense(2, activation='softmax')(layer8) model_2 = keras.models.Model(inputs = [inputs_1, inputs_2], outputs = [layer9]) adam = keras.optimizers.Adam(lr = 0.0001) model_2.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['acc']) prediction = pd.DataFrame(model_2.predict([X_train_1,X_train_2]),index = df.iloc[:,0]) pred = np.argmax(model_2.predict([X_train_1,X_train_2]), axis=1) display(prediction, pred) Examples of the contradictory results: trial 1: 0 1 id 11 1.131853e-07 1.000000 22 1.003963e-06 0.999999 33 1.226156e-07 1.000000 44 9.985497e-08 1.000000 55 1.234705e-07 1.000000 66 1.189311e-07 1.000000 77 6.631822e-08 1.000000 88 9.586067e-08 1.000000 99 9.494666e-08 1.000000 trial 2: 0 1 id 11 0.183640 0.816360 22 0.487814 0.512187 33 0.151600 0.848400 44 0.135977 0.864023 55 0.120982 0.879018 66 0.171371 0.828629 77 0.199774 0.800226 88 0.133711 0.866289 99 0.125785 0.874215 trial 3: 0 1 id 11 0.900128 0.099872 22 0.573520 0.426480 33 0.948409 0.051591 44 0.955184 0.044816 55 0.959075 0.040925 66 0.945758 0.054242 77 0.956582 0.043418 88 0.954180 0.045820 99 0.964601 0.035399 trial 4: 0 1 id 11 1.0 4.697790e-08 22 1.0 2.018885e-07 33 1.0 2.911827e-08 44 1.0 2.904826e-08 55 1.0 1.368165e-08 66 1.0 2.742492e-08 77 1.0 1.461449e-08 88 1.0 2.302636e-08 99 1.0 2.099636e-08 Model was been trained with: n_folds = 10 skf = StratifiedKFold(n_splits=n_folds, shuffle=True) skf = skf.split(X_train_1, Y_cat) cv_score = [] for i, (train, test) in enumerate(skf): model_2 = my_model() history = model_2.fit([X_train_1[train], X_train_2[train]], Y[train], validation_data=([X_train_1[test], X_train_2[test]], Y[test]), epochs=120, batch_size=10) result = model_2.evaluate([X_train_1[test], X_train_2[test]], Y[test]) keras.backend.clear_session()
[ "After you train a model, you have store the weights of that model in a file. If you don't do this, you don't keep the trained models if you run your program (or any other) again.\nYou can store the model weights during training by using the ModelCheckpoint callback. https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint\nYou can load the weights before inference (prediction) using model.load_weights(...)\nhttps://www.tensorflow.org/tutorials/keras/save_and_load\nTo clarify why your results are random. You keep initializing a new model with random weights and then directly try to predict the answer with the untrained model.\n[EDIT]\nThe reason your untrained model keeps giving the opposite results, may be due to the inputs combined with the weight initialization. The weight initialization is random, but there is some \"smarts\" behind the initialization to prevent dead connections. Here you can find more details on all weight initializers: https://www.tensorflow.org/api_docs/python/tf/keras/initializers\nGood Luck!\n", "U need to fix the random generator seeds for both tensorflow and sklearn.\nTry the following:\ntf.random.set_seed(42)\n\nand\nskf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42)\n\n\nThis will allow you to replicate your results.\n", "It's completely normal, when you create a new model, its weights are initialized as random, so the prediction will change each time you run this code.\n" ]
[ 1, 1, 0 ]
[]
[]
[ "keras", "machine_learning", "prediction", "python", "tensorflow" ]
stackoverflow_0066665742_keras_machine_learning_prediction_python_tensorflow.txt
Q: Maya Python scale picture How to scale a picture to fit a window/layout? With the code below the original image is not actually enlarged to 300px, it is displayed with the original image size instead. import maya.cmds as cmds if (cmds.window(window1, exists=True)): cmds.deleteUI(window1) window1 = cmds.window(w=300, h=300) layout = cmds.columnLayout(w=300, h=300) cmds.picture( image='image.png', w=300, h=300) cmds.showWindow( window1 ) A: Try this import maya.OpenMaya as om def resize_image(source_image, output_image, width, height): image = om.MImage() image.readFromFile(source_image) image.resize( width, height ) image.writeToFile(output_image, 'png') resizeImage('<source_image.png>','<output_image.png>', 300, 300) A: I have used iconTextButton to set an image with different width and height. Use adjustableColumn=False to set width/height of your image. You can also try iconTextStaticLabel cmd. Hope this will work for you.. import maya.cmds as cmds if cmds.window('Test',exists=True): cmds.deleteUI('Test') cmds.window('Test',title='Test Image', bgc=[0.266, 0.266, 0.266], widthHeight=[600,400], sizeable = True) parentLayout = cmds.columnLayout(adjustableColumn=False) cmds.iconTextButton(style="iconOnly",image1='Capture.png',p=parentLayout,w=150,h=150) cmds.setParent('..') cmds.showWindow('Test')
Maya Python scale picture
How to scale a picture to fit a window/layout? With the code below the original image is not actually enlarged to 300px, it is displayed with the original image size instead. import maya.cmds as cmds if (cmds.window(window1, exists=True)): cmds.deleteUI(window1) window1 = cmds.window(w=300, h=300) layout = cmds.columnLayout(w=300, h=300) cmds.picture( image='image.png', w=300, h=300) cmds.showWindow( window1 )
[ "Try this\nimport maya.OpenMaya as om\n\ndef resize_image(source_image, output_image, width, height):\n\n image = om.MImage()\n image.readFromFile(source_image)\n\n image.resize( width, height )\n image.writeToFile(output_image, 'png')\n\n\nresizeImage('<source_image.png>','<output_image.png>', 300, 300)\n\n", "I have used iconTextButton to set an image with different width and height.\nUse adjustableColumn=False to set width/height of your image.\nYou can also try iconTextStaticLabel cmd.\nHope this will work for you..\nimport maya.cmds as cmds\n\nif cmds.window('Test',exists=True):\n cmds.deleteUI('Test')\n \ncmds.window('Test',title='Test Image', bgc=[0.266, 0.266, 0.266], widthHeight=[600,400], sizeable = True)\nparentLayout = cmds.columnLayout(adjustableColumn=False)\ncmds.iconTextButton(style=\"iconOnly\",image1='Capture.png',p=parentLayout,w=150,h=150)\ncmds.setParent('..')\ncmds.showWindow('Test')\n\n" ]
[ 1, 0 ]
[]
[]
[ "maya", "python" ]
stackoverflow_0064632275_maya_python.txt
Q: A list of lists, substract the values in each sub-list and store the results in new sub-lists I have a list, that contains many sub-lists. Each sub-list, has two values. I want to substract the first value from the second value in each sub-list, and store the results in new lists. Now those new lists are also sub-lists, of another list of lists. So for example, lists_of_lists1 is something like this: lists_of_lists1 = [ran_list1, ran_list2, ran_list3, ran_list4, ran_list5, ran_list6, ran_list7,ran_list8] And this is ran_list1, a sub-list. All sub-lists look similar to this. [[34.39460533995712, 47.84539466004288], [33.095772478005635, 46.50422752199436], [36.66750709361337, 44.44360401749775], [33.33459042563053, 42.14689105585095], [36.638367322851444, 43.62250224236595], [36.465767572400296, 49.200899094266376], [32.220702473831686, 42.65929752616831], [34.31937169660605, 41.14216676493242], [31.198269305510344, 42.801730694489656], [31.216878962221035, 40.6092079943007], [28.465488368524227, 38.793770890735026], [34.50342917911651, 45.32990415421682]] Now substract ran_list1[1] - ran_list1[0] (for each sublist in this manner), and the results store in here: list_of_lists2 = [ran_subresult1 , ran_subresult2 , ran_subresult3 , ran_subresult4 , ran_subresult5 , ran_subresult6 , ran_subresult7, ran_subresult8] So ran_subresult1, is an empty list that the results of ran_list1[1] - ran_list1[0] would be store in it, and ran_subresult2 would store the resuls of ran_list2[1] - ran_list2[0], and so on... My try of this look like this: for i in lists_of_lists1: for j in range(len(i)): list_of_lists2[j].append(lists_of_lists1[j][1] - lists_of_lists1[j][0]) I got a bit lost with the i and j, I guess I'm in the right direction but I'm still unable to do it. I'll appreciate some help with this. Thanks! EDIT - This is the expected output. From lists_of_lists1, let's take the first sub-list as an example, which is ran_list1. The values inside ran_list1 are pairs of numbers: [[34.39460533995712, 47.84539466004288], [33.095772478005635, 46.50422752199436], [36.66750709361337, 44.44360401749775], [33.33459042563053, 42.14689105585095], [36.638367322851444, 43.62250224236595], [36.465767572400296, 49.200899094266376], [32.220702473831686, 42.65929752616831], [34.31937169660605, 41.14216676493242], [31.198269305510344, 42.801730694489656], [31.216878962221035, 40.6092079943007], [28.465488368524227, 38.793770890735026], [34.50342917911651, 45.32990415421682]] Now substract in this manner: 47.84539466004288 - 34.39460533995712 = 13.451 46.50422752199436 - 33.095772478005635 = 13.409 And so on... Now those results will be stored inside ran_subresult1, which is the first sub-list inside list_of_lists2. Hence, ran_subresult1 would be [13.451, 13.409.....] And so on for each sub-list. A: This is an alternative approach for your MWE: main_list = [ [ [34.39460533995712, 47.84539466004288], [33.095772478005635, 46.50422752199436], [36.66750709361337, 44.44360401749775], [33.33459042563053, 42.14689105585095], [36.638367322851444, 43.62250224236595], [36.465767572400296, 49.200899094266376], [32.220702473831686, 42.65929752616831], [34.31937169660605, 41.14216676493242], [31.198269305510344, 42.801730694489656], [31.216878962221035, 40.6092079943007], [28.465488368524227, 38.793770890735026], [34.50342917911651, 45.32990415421682], ] ] res_list = main_list.copy() for i, main_i in enumerate(main_list): for j, main_i_j in enumerate(main_i): res_list[i][j] = [main_i_j[1] - main_i_j[0]] A: Use numpy for this, it makes dealing with multidimensional arrays so much easier and it is far more efficient (because it is written in C). To start off, you have to install numpy. Then, in your code, import it. The convention is to import it as np: import numpy as np Next we want to turn your list of lists into a numpy array. If your different ran_lists have different dimensions, you will have to treat them independently, which is what I'll do here. (If they all have the same shape, you can treat it as one 3-d array.) : list_of_lists2 = [] for sub_list in lists_of_lists1: new_sub_list = np.array(sub_list) new_sub_list = new_sub_list[:,1] - new_sub_list[:,0] list_of_lists2.append(new_sub_list.tolist()) Here I have turned the numpy arrays back into nested lists but if you want to continue to do calculations with them it's probably better to keep them as numpy arrays.
A list of lists, substract the values in each sub-list and store the results in new sub-lists
I have a list, that contains many sub-lists. Each sub-list, has two values. I want to substract the first value from the second value in each sub-list, and store the results in new lists. Now those new lists are also sub-lists, of another list of lists. So for example, lists_of_lists1 is something like this: lists_of_lists1 = [ran_list1, ran_list2, ran_list3, ran_list4, ran_list5, ran_list6, ran_list7,ran_list8] And this is ran_list1, a sub-list. All sub-lists look similar to this. [[34.39460533995712, 47.84539466004288], [33.095772478005635, 46.50422752199436], [36.66750709361337, 44.44360401749775], [33.33459042563053, 42.14689105585095], [36.638367322851444, 43.62250224236595], [36.465767572400296, 49.200899094266376], [32.220702473831686, 42.65929752616831], [34.31937169660605, 41.14216676493242], [31.198269305510344, 42.801730694489656], [31.216878962221035, 40.6092079943007], [28.465488368524227, 38.793770890735026], [34.50342917911651, 45.32990415421682]] Now substract ran_list1[1] - ran_list1[0] (for each sublist in this manner), and the results store in here: list_of_lists2 = [ran_subresult1 , ran_subresult2 , ran_subresult3 , ran_subresult4 , ran_subresult5 , ran_subresult6 , ran_subresult7, ran_subresult8] So ran_subresult1, is an empty list that the results of ran_list1[1] - ran_list1[0] would be store in it, and ran_subresult2 would store the resuls of ran_list2[1] - ran_list2[0], and so on... My try of this look like this: for i in lists_of_lists1: for j in range(len(i)): list_of_lists2[j].append(lists_of_lists1[j][1] - lists_of_lists1[j][0]) I got a bit lost with the i and j, I guess I'm in the right direction but I'm still unable to do it. I'll appreciate some help with this. Thanks! EDIT - This is the expected output. From lists_of_lists1, let's take the first sub-list as an example, which is ran_list1. The values inside ran_list1 are pairs of numbers: [[34.39460533995712, 47.84539466004288], [33.095772478005635, 46.50422752199436], [36.66750709361337, 44.44360401749775], [33.33459042563053, 42.14689105585095], [36.638367322851444, 43.62250224236595], [36.465767572400296, 49.200899094266376], [32.220702473831686, 42.65929752616831], [34.31937169660605, 41.14216676493242], [31.198269305510344, 42.801730694489656], [31.216878962221035, 40.6092079943007], [28.465488368524227, 38.793770890735026], [34.50342917911651, 45.32990415421682]] Now substract in this manner: 47.84539466004288 - 34.39460533995712 = 13.451 46.50422752199436 - 33.095772478005635 = 13.409 And so on... Now those results will be stored inside ran_subresult1, which is the first sub-list inside list_of_lists2. Hence, ran_subresult1 would be [13.451, 13.409.....] And so on for each sub-list.
[ "This is an alternative approach for your MWE:\nmain_list = [\n [\n [34.39460533995712, 47.84539466004288],\n [33.095772478005635, 46.50422752199436],\n [36.66750709361337, 44.44360401749775],\n [33.33459042563053, 42.14689105585095],\n [36.638367322851444, 43.62250224236595],\n [36.465767572400296, 49.200899094266376],\n [32.220702473831686, 42.65929752616831],\n [34.31937169660605, 41.14216676493242],\n [31.198269305510344, 42.801730694489656],\n [31.216878962221035, 40.6092079943007],\n [28.465488368524227, 38.793770890735026],\n [34.50342917911651, 45.32990415421682],\n ]\n]\n\nres_list = main_list.copy()\nfor i, main_i in enumerate(main_list):\n for j, main_i_j in enumerate(main_i):\n res_list[i][j] = [main_i_j[1] - main_i_j[0]]\n\n", "Use numpy for this, it makes dealing with multidimensional arrays so much easier and it is far more efficient (because it is written in C). To start off, you have to install numpy. Then, in your code, import it. The convention is to import it as np:\nimport numpy as np\n\nNext we want to turn your list of lists into a numpy array. If your different ran_lists have different dimensions, you will have to treat them independently, which is what I'll do here. (If they all have the same shape, you can treat it as one 3-d array.) :\nlist_of_lists2 = []\nfor sub_list in lists_of_lists1:\n new_sub_list = np.array(sub_list)\n new_sub_list = new_sub_list[:,1] - new_sub_list[:,0]\n list_of_lists2.append(new_sub_list.tolist())\n\nHere I have turned the numpy arrays back into nested lists but if you want to continue to do calculations with them it's probably better to keep them as numpy arrays.\n" ]
[ 0, 0 ]
[ "This should work. We first get each element of the list of lists (a list with length 2) and then append the difference between the latter lists (i) elements.\nlist = [[1, 23], [3, 2], [32, 213], [2321, 23]]\nres_list = []\nfor i in list:\n res_list.append((i[1]-i[0]))\n\nprint(res_list)\n\n" ]
[ -1 ]
[ "for_loop", "list", "python" ]
stackoverflow_0074599719_for_loop_list_python.txt
Q: How to zip files on s3 using lambda and python I need to archive multiply files that exists on s3 and then upload the archive back to s3. I am trying to use lambda and python. As some of the files have more than 500MB, downloading in the '/tmp' is not an option. Is there any way to stream files one by one and put them in archive? A: AWS Lambda code: create zip from files by ext in bucket/filePath. def createZipFileStream(bucketName, bucketFilePath, jobKey, fileExt, createUrl=False): response = {} bucket = s3.Bucket(bucketName) filesCollection = bucket.objects.filter(Prefix=bucketFilePath).all() archive = BytesIO() with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zip_archive: for file in filesCollection: if file.key.endswith('.' + fileExt): with zip_archive.open(file.key, 'w') as file1: file1.write(file.get()['Body'].read()) archive.seek(0) s3.Object(bucketName, bucketFilePath + '/' + jobKey + '.zip').upload_fileobj(archive) archive.close() response['fileUrl'] = None if createUrl is True: s3Client = boto3.client('s3') response['fileUrl'] = s3Client.generate_presigned_url('get_object', Params={'Bucket': bucketName, 'Key': '' + bucketFilePath + '/' + jobKey + '.zip'}, ExpiresIn=3600) return response A: Do not write to disk, stream to and from S3 Stream the Zip file from the source bucket and read and write its contents on the fly using Python back to another S3 bucket. This method does not use up disk space and therefore is not limited by size. The basic steps are: Read the zip file from S3 using the Boto3 S3 resource Object into a BytesIO buffer object Open the object using the zipfile module Iterate over each file in the zip file using the namelist method Write the file back to another bucket in S3 using the resource meta.client.upload_fileobj method The Code Python 3.6 using Boto3 s3_resource = boto3.resource('s3') zip_obj = s3_resource.Object(bucket_name="bucket_name_here", key=zip_key) buffer = BytesIO(zip_obj.get()["Body"].read()) z = zipfile.ZipFile(buffer) for filename in z.namelist(): file_info = z.getinfo(filename) s3_resource.meta.client.upload_fileobj( z.open(filename), Bucket=bucket, Key=f'{filename}' ) Note: AWS Execution time limit has a maximum of 15 minutes so can you process your HUGE files in this amount of time? You can only know by testing. A: The /tmp/ directory is limited to 512MB for AWS Lambda functions. If you search StackOverflow, you'll see some code from people who have created Zip files on-the-fly without saving files to disk. It becomes pretty complicated. An alternative would be to attach an EFS filesystem to the Lambda function. It takes a bit of effort to setup, but the cost would be practically zero if you delete the files after use and you'll have plenty of disk space so your code will be more reliable and easier to maintain. A: # For me below code worked for single file in Glue job to take single .txt file form AWS S3 and make it zipped and upload back to AWS S3. import boto3 import zipfile from io import BytesIO import logging logger = logging.getLogger() s3_client = boto3.client('s3') s3_resource= boto3.resource('s3') # ZipFileStream function declaration self._createZipFileStream( bucketName="My_AWS_S3_bucket_name", bucketFilePath="My_txt_object_prefix", bucketfileobject="My_txt_Object_prefix + txt_file_name", zipKey="My_zip_file_prefix") # ZipFileStream function Defination def _createZipFileStream(self, bucketName: str, bucketFilePath: str, bucketfileobject: str, zipKey: str, ) -> None: try: obj = s3_resource.Object(bucket_name=bucketName, key=bucketfileobject) archive = BytesIO() with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zip_archive: with zip_archive.open(zipKey, 'w') as file1: file1.write(obj.get()['Body'].read()) archive.seek(0) s3_client.upload_fileobj(archive, bucketName, bucketFilePath + '/' + zipKey + '.zip') archive.close() # If you would like to delete the .txt after zipped from AWS S3 below code will work. self._delete_object( bucket=bucketName, key=bucketfileobject) except Exception as e: logger.error(f"Failed to zip the txt file for {bucketName}/{bucketfileobject}: str{e}") # Delete AWS S3 funcation defination. def _delete_object(bucket: str, key: str) -> None: try: logger.info(f"Deleting: {bucket}/{key}") S3.delete_object( Bucket=bucket, Key=key ) except Exception as e: logger.error(f"Failed to delete {bucket}/{key}: str{e}")`enter code here`
How to zip files on s3 using lambda and python
I need to archive multiply files that exists on s3 and then upload the archive back to s3. I am trying to use lambda and python. As some of the files have more than 500MB, downloading in the '/tmp' is not an option. Is there any way to stream files one by one and put them in archive?
[ "AWS Lambda code: create zip from files by ext in bucket/filePath.\n\ndef createZipFileStream(bucketName, bucketFilePath, jobKey, fileExt, createUrl=False):\n response = {} \n bucket = s3.Bucket(bucketName)\n filesCollection = bucket.objects.filter(Prefix=bucketFilePath).all() \n archive = BytesIO()\n\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zip_archive:\n for file in filesCollection:\n if file.key.endswith('.' + fileExt): \n with zip_archive.open(file.key, 'w') as file1:\n file1.write(file.get()['Body'].read()) \n\n archive.seek(0)\n s3.Object(bucketName, bucketFilePath + '/' + jobKey + '.zip').upload_fileobj(archive)\n archive.close()\n\n response['fileUrl'] = None\n\n if createUrl is True:\n s3Client = boto3.client('s3')\n response['fileUrl'] = s3Client.generate_presigned_url('get_object', Params={'Bucket': bucketName,\n 'Key': '' + bucketFilePath + '/' + jobKey + '.zip'},\n ExpiresIn=3600)\n\n return response\n \n\n", "Do not write to disk, stream to and from S3\nStream the Zip file from the source bucket and read and write its contents on the fly using Python back to another S3 bucket.\nThis method does not use up disk space and therefore is not limited by size.\nThe basic steps are:\n\nRead the zip file from S3 using the Boto3 S3 resource Object into a BytesIO buffer object\nOpen the object using the zipfile module\nIterate over each file in the zip file using the namelist method\nWrite the file back to another bucket in S3 using the resource meta.client.upload_fileobj method\n\nThe Code\nPython 3.6 using Boto3\ns3_resource = boto3.resource('s3')\nzip_obj = s3_resource.Object(bucket_name=\"bucket_name_here\", key=zip_key)\nbuffer = BytesIO(zip_obj.get()[\"Body\"].read())\n\nz = zipfile.ZipFile(buffer)\nfor filename in z.namelist():\n file_info = z.getinfo(filename)\n s3_resource.meta.client.upload_fileobj(\n z.open(filename),\n Bucket=bucket,\n Key=f'{filename}'\n )\n\nNote: AWS Execution time limit has a maximum of 15 minutes so can you process your HUGE files in this amount of time? You can only know by testing.\n", "The /tmp/ directory is limited to 512MB for AWS Lambda functions.\nIf you search StackOverflow, you'll see some code from people who have created Zip files on-the-fly without saving files to disk. It becomes pretty complicated.\nAn alternative would be to attach an EFS filesystem to the Lambda function. It takes a bit of effort to setup, but the cost would be practically zero if you delete the files after use and you'll have plenty of disk space so your code will be more reliable and easier to maintain.\n", "# For me below code worked for single file in Glue job to take single .txt file form AWS S3 and make it zipped and upload back to AWS S3. \nimport boto3\nimport zipfile\nfrom io import BytesIO\nimport logging\nlogger = logging.getLogger()\n\ns3_client = boto3.client('s3')\ns3_resource= boto3.resource('s3')\n\n# ZipFileStream function declaration\nself._createZipFileStream(\n bucketName=\"My_AWS_S3_bucket_name\",\n bucketFilePath=\"My_txt_object_prefix\", \n bucketfileobject=\"My_txt_Object_prefix + txt_file_name\",\n zipKey=\"My_zip_file_prefix\")\n\n# ZipFileStream function Defination\ndef _createZipFileStream(self, bucketName: str, bucketFilePath: str, bucketfileobject: str, zipKey: str, ) -> None:\n try:\n obj = s3_resource.Object(bucket_name=bucketName, key=bucketfileobject)\n archive = BytesIO()\n\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zip_archive:\n with zip_archive.open(zipKey, 'w') as file1:\n file1.write(obj.get()['Body'].read()) \n\n archive.seek(0)\n\n s3_client.upload_fileobj(archive, bucketName, bucketFilePath + '/' + zipKey + '.zip')\n archive.close()\n \n # If you would like to delete the .txt after zipped from AWS S3 below code will work. \n self._delete_object(\n bucket=bucketName, key=bucketfileobject)\n\n except Exception as e:\n logger.error(f\"Failed to zip the txt file for {bucketName}/{bucketfileobject}: str{e}\")\n\n# Delete AWS S3 funcation defination.\ndef _delete_object(bucket: str, key: str) -> None:\n try:\n logger.info(f\"Deleting: {bucket}/{key}\")\n S3.delete_object(\n Bucket=bucket,\n Key=key\n )\n except Exception as e:\n logger.error(f\"Failed to delete {bucket}/{key}: str{e}\")`enter code here`\n\n" ]
[ 4, 3, 0, 0 ]
[]
[]
[ "amazon_web_services", "aws_lambda", "python" ]
stackoverflow_0068065587_amazon_web_services_aws_lambda_python.txt
Q: SELENIUM (Python) : How to retrieve the URL to which an element redirects me to (opens a new tab) after clicking? Element has tag but no href I am trying to scrape a website with product listings that if clicked on redirect the user to a new tab with further information/contact the seller details. I am trying to retrieve said URL without actually having to click on each listing in the catalog and wait for the page to load as this would take a lot of time. I have searched in web inspector for the "href" but the only link available is to the image source of each listing. However, I noticed that after clicking each element, a GET request method gets sent and this is the URL (https://api.wallapop.com/api/v3/items/v6g2v4y045ze?language=es) it contains pretty much all the information I need, I'm not sure if it's of any use, but its the furthest I've gotten. UPDATE: I tried the code I was suggested (with modifications to specifically find the 'href' attributes in the clickable elements), but I get None returning. I have been looking into finding an 'onclick' element or something similar that might have what I'm looking for but so far it looks like the solution will end up being clicking each element and extracting all the information from there. elements123 = driver.find_elements(By.XPATH, '//a[contains(@class,"ItemCardList__item")]') for e in elements123: print(e.get_attribute('href')) I appreciate any insights, thank you in advance. A: You need something like this: from selenium import webdriver from selenium.webdriver.common.by import By driver = webdriver.Chrome() driver.get("https://google.com") # Get all the elements available with tag name 'a' elements = driver.find_elements(By.TAG_NAME, 'a') for e in elements: print(e.get_attribute('href'))
SELENIUM (Python) : How to retrieve the URL to which an element redirects me to (opens a new tab) after clicking? Element has tag but no href
I am trying to scrape a website with product listings that if clicked on redirect the user to a new tab with further information/contact the seller details. I am trying to retrieve said URL without actually having to click on each listing in the catalog and wait for the page to load as this would take a lot of time. I have searched in web inspector for the "href" but the only link available is to the image source of each listing. However, I noticed that after clicking each element, a GET request method gets sent and this is the URL (https://api.wallapop.com/api/v3/items/v6g2v4y045ze?language=es) it contains pretty much all the information I need, I'm not sure if it's of any use, but its the furthest I've gotten. UPDATE: I tried the code I was suggested (with modifications to specifically find the 'href' attributes in the clickable elements), but I get None returning. I have been looking into finding an 'onclick' element or something similar that might have what I'm looking for but so far it looks like the solution will end up being clicking each element and extracting all the information from there. elements123 = driver.find_elements(By.XPATH, '//a[contains(@class,"ItemCardList__item")]') for e in elements123: print(e.get_attribute('href')) I appreciate any insights, thank you in advance.
[ "You need something like this:\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://google.com\")\n\n# Get all the elements available with tag name 'a'\nelements = driver.find_elements(By.TAG_NAME, 'a')\nfor e in elements:\n print(e.get_attribute('href'))\n\n\n" ]
[ 0 ]
[]
[]
[ "python", "selenium", "selenium_webdriver", "web_scraping" ]
stackoverflow_0074600094_python_selenium_selenium_webdriver_web_scraping.txt
Q: Python product frequently bought with I have retail store transactional data and want to see what categories are bought together. The data is in the below format: transaction_no product_id category 1 100012 A 1 121111 A 1 121127 B 1 121127 G 2 465222 N 2 121127 M 3 121127 F 3 121127 G 3 121127 F 4 465222 M 4 121127 N Rules: The result should be aggregated based on unique transaction numbers. Also, the order shouldn't matter in this case (e.g. A bought with B is the same as B bought with A). If a category is repeated within the same transaction, it should be counted as 1 only (e.g. in transaction_no = 1, category A is counted once) Expected output: bucket count A, B, G 1 N, M 2 F, G 1 How do I achieve this? A: Use GroupBy.agg for aggregate frozenset, then count values by Series.value_counts and last create DataFrame with join for strings from frozensets: df1 = (df.groupby('transaction_no')['category'] .agg(frozenset) .value_counts() .rename(lambda x: ', '.join(sorted(x))) .rename_axis('bucket') .reset_index(name='count')) print (df1) bucket count 0 M, N 2 1 F, G 1 2 A, B, G 1 Another idea: df1 = (df.groupby('transaction_no')['category'] .agg(lambda x: ', '.join(sorted(set((x))))) .value_counts() .rename_axis('bucket') .reset_index(name='count') ) print (df1) bucket count 0 M, N 2 1 F, G 1 2 A, B, G 1
Python product frequently bought with
I have retail store transactional data and want to see what categories are bought together. The data is in the below format: transaction_no product_id category 1 100012 A 1 121111 A 1 121127 B 1 121127 G 2 465222 N 2 121127 M 3 121127 F 3 121127 G 3 121127 F 4 465222 M 4 121127 N Rules: The result should be aggregated based on unique transaction numbers. Also, the order shouldn't matter in this case (e.g. A bought with B is the same as B bought with A). If a category is repeated within the same transaction, it should be counted as 1 only (e.g. in transaction_no = 1, category A is counted once) Expected output: bucket count A, B, G 1 N, M 2 F, G 1 How do I achieve this?
[ "Use GroupBy.agg for aggregate frozenset, then count values by Series.value_counts and last create DataFrame with join for strings from frozensets:\ndf1 = (df.groupby('transaction_no')['category']\n .agg(frozenset)\n .value_counts()\n .rename(lambda x: ', '.join(sorted(x)))\n .rename_axis('bucket')\n .reset_index(name='count'))\nprint (df1)\n bucket count\n0 M, N 2\n1 F, G 1\n2 A, B, G 1\n\nAnother idea:\ndf1 = (df.groupby('transaction_no')['category']\n .agg(lambda x: ', '.join(sorted(set((x)))))\n .value_counts()\n .rename_axis('bucket')\n .reset_index(name='count')\n )\nprint (df1)\n bucket count\n0 M, N 2\n1 F, G 1\n2 A, B, G 1\n\n" ]
[ 2 ]
[]
[]
[ "combinations", "group_by", "pandas", "python" ]
stackoverflow_0074600010_combinations_group_by_pandas_python.txt
Q: Two dictionaries nested inside a list returns an ValueError - How to I nested the two dictionaries data_dict_var_1 and data_dict_var_2 inside the list data_dicts. The two dictionaries both include three keys interoception exteroception and cognitive. Each key contains an array of numeric values, such as {'interoception': array([-1.10037122, -1.12865588, -0.70395085,... ]. My aim is as follows: I would like to take the arrays of the three keys and merge them together into one list. This is achieved by the following part of the code: all_rois = np.array([ data_dict["interoception"], data_dict["exteroception"], data_dict["cognitive"] ]) all_rois = np.hstack(all_rois) Using a for loop, I would like to merge the three keys’ arrays for the two dictionaries instead of repeating the code individually for each dictionary. Furthermore, I would like to append the merged arrays of data_dict_var_1 to var_1_list = [], while the merged arrays of data_dict_var_2 have to be appended to var_2_list = []. Here is the full code that makes the problem described below easier to understand: var_1_list = [] var_2_list = [] data_dicts = [data_dict_var_1, data_dict_var_2] for data_dict in data_dicts: all_rois = np.array([ data_dict["interoception"], data_dict["exteroception"], data_dict["cognitive"] ]) all_rois = np.hstack(all_rois) if data_dict == data_dict_var_1: var_1_list.append(all_rois) elif data_dict == data_dict_var_2: var_2_list.append(all_rois) The problem is as follows: Python returns ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() for the following if line of the code: if data_dict == data_dict_var_1:. How can I best solve this problem? I read about this ValueError in other questions, but the suggested solutions I tried do not work for my specific case. The problem appears to be that Python does not understand that I refer to data_dict_var_1 in if data_dict == data_dict_var_1:. I have to somehow more specify this line of code for both dictionaries. A: I think you cannot check for dictionary equality. To solve you problem you can try the following: var_1_list = [] var_2_list = [] var_dict = {'var_1_list': [], 'var_2_list': []} data_dicts = [data_dict_var_1, data_dict_var_2] for i, data_dict in enumerate(data_dicts): all_rois = np.array([ data_dict["interoception"], data_dict["exteroception"], data_dict["cognitive"] ]) all_rois = np.hstack(all_rois) var_dict[f'var_{i+1}_list'].append(all_rois)
Two dictionaries nested inside a list returns an ValueError - How to
I nested the two dictionaries data_dict_var_1 and data_dict_var_2 inside the list data_dicts. The two dictionaries both include three keys interoception exteroception and cognitive. Each key contains an array of numeric values, such as {'interoception': array([-1.10037122, -1.12865588, -0.70395085,... ]. My aim is as follows: I would like to take the arrays of the three keys and merge them together into one list. This is achieved by the following part of the code: all_rois = np.array([ data_dict["interoception"], data_dict["exteroception"], data_dict["cognitive"] ]) all_rois = np.hstack(all_rois) Using a for loop, I would like to merge the three keys’ arrays for the two dictionaries instead of repeating the code individually for each dictionary. Furthermore, I would like to append the merged arrays of data_dict_var_1 to var_1_list = [], while the merged arrays of data_dict_var_2 have to be appended to var_2_list = []. Here is the full code that makes the problem described below easier to understand: var_1_list = [] var_2_list = [] data_dicts = [data_dict_var_1, data_dict_var_2] for data_dict in data_dicts: all_rois = np.array([ data_dict["interoception"], data_dict["exteroception"], data_dict["cognitive"] ]) all_rois = np.hstack(all_rois) if data_dict == data_dict_var_1: var_1_list.append(all_rois) elif data_dict == data_dict_var_2: var_2_list.append(all_rois) The problem is as follows: Python returns ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() for the following if line of the code: if data_dict == data_dict_var_1:. How can I best solve this problem? I read about this ValueError in other questions, but the suggested solutions I tried do not work for my specific case. The problem appears to be that Python does not understand that I refer to data_dict_var_1 in if data_dict == data_dict_var_1:. I have to somehow more specify this line of code for both dictionaries.
[ "I think you cannot check for dictionary equality. To solve you problem you can try the following:\nvar_1_list = []\nvar_2_list = []\nvar_dict = {'var_1_list': [],\n 'var_2_list': []}\ndata_dicts = [data_dict_var_1, data_dict_var_2]\nfor i, data_dict in enumerate(data_dicts):\n all_rois = np.array([\n data_dict[\"interoception\"],\n data_dict[\"exteroception\"],\n data_dict[\"cognitive\"]\n ])\n all_rois = np.hstack(all_rois)\n \n var_dict[f'var_{i+1}_list'].append(all_rois)\n\n" ]
[ 1 ]
[]
[]
[ "dictionary", "python", "valueerror" ]
stackoverflow_0074600034_dictionary_python_valueerror.txt
Q: Redact and remove password from URL I have an URL like this: https://user:[email protected]/path?key=value#hash The result should be: https://user:[email protected]/path?key=value#hash I could use a regex, but instead I would like to parse the URL a high level data structure, then operate on this data structure, then serializing to a string. Is this possible with Python? A: You can use the built in urlparse to query out the password from a url. It is available in both Python 2 and 3, but under different locations. Python 2 import urlparse Python 3 from urllib.parse import urlparse Example from urllib.parse import urlparse parsed = urlparse("https://user:[email protected]/path?key=value#hash") parsed.password # 'password' replaced = parsed._replace(netloc="{}:{}@{}".format(parsed.username, "???", parsed.hostname)) replaced.geturl() # 'https://user:[email protected]/path?key=value#hash' See also this question: Changing hostname in a url A: from urllib.parse import urlparse def redact_url(url: str) -> str: url_components = urlparse(url) if url_components.username or url_components.password: url_components = url_components._replace( netloc=f"{url_components.username}:???@{url_components.hostname}", ) return url_components.geturl() A: The pip module already have an internal utility function which does exactly this. >>> from pip._internal.utils.misc import redact_auth_from_url >>> >>> redact_auth_from_url("https://user:[email protected]/path?key=value#hash") 'https://user:****@example.com/path?key=value#hash' >>> redact_auth_from_url.__doc__ 'Replace the password in a given url with ****.' This will provide the expected result even if the url does not contain username or password. >>> redact_auth_from_url("https://example.com/path?key=value#hash") 'https://example.com/path?key=value#hash'
Redact and remove password from URL
I have an URL like this: https://user:[email protected]/path?key=value#hash The result should be: https://user:[email protected]/path?key=value#hash I could use a regex, but instead I would like to parse the URL a high level data structure, then operate on this data structure, then serializing to a string. Is this possible with Python?
[ "You can use the built in urlparse to query out the password from a url. It is available in both Python 2 and 3, but under different locations.\nPython 2 import urlparse\nPython 3 from urllib.parse import urlparse\nExample\nfrom urllib.parse import urlparse\n\nparsed = urlparse(\"https://user:[email protected]/path?key=value#hash\")\nparsed.password # 'password'\n\nreplaced = parsed._replace(netloc=\"{}:{}@{}\".format(parsed.username, \"???\", parsed.hostname))\nreplaced.geturl() # 'https://user:[email protected]/path?key=value#hash'\n\nSee also this question: Changing hostname in a url\n", "from urllib.parse import urlparse\n\ndef redact_url(url: str) -> str:\n url_components = urlparse(url)\n if url_components.username or url_components.password:\n url_components = url_components._replace(\n netloc=f\"{url_components.username}:???@{url_components.hostname}\",\n )\n\n return url_components.geturl()\n\n", "The pip module already have an internal utility function which does exactly this.\n>>> from pip._internal.utils.misc import redact_auth_from_url\n>>> \n>>> redact_auth_from_url(\"https://user:[email protected]/path?key=value#hash\")\n'https://user:****@example.com/path?key=value#hash'\n>>> redact_auth_from_url.__doc__\n'Replace the password in a given url with ****.'\n\nThis will provide the expected result even if the url does not contain username or password.\n>>> redact_auth_from_url(\"https://example.com/path?key=value#hash\") \n'https://example.com/path?key=value#hash'\n\n" ]
[ 15, 1, 0 ]
[]
[]
[ "python", "url_parsing" ]
stackoverflow_0046905367_python_url_parsing.txt
Q: Sorting np.array of dates I'm having this matrix of dates that I would like to sort by dates and then have back in the same format as it started data = np.array( [[2015, 1, 1, 23, 4, 59], [2015, 4, 30, 23, 5, 1], [2015, 1, 1, 23, 5, 25], [2015, 2, 15, 58,5, 0], [2015, 5, 20, 50, 27, 37], [2015, 6, 21, 25, 27, 29]]) I tried datetime.datetime, but couldn't convert the data back to this format A: You can sort your data without the conversion to datetime, since the date/time components already appear in sorted order (year, month, etc.). So a np.sort(data, axis=0) should do: import numpy as np data = np.array( [[2015, 1, 1, 23, 4, 59], [2015, 4, 30, 23, 5, 1], [2015, 1, 1, 23, 5, 25], [2015, 2, 15, 58,5, 0], [2015, 5, 20, 50, 27, 37], [2015, 6, 21, 25, 27, 29]]) np.sort(data, axis=0) array([[2015, 1, 1, 23, 4, 0], [2015, 1, 1, 23, 5, 1], [2015, 2, 15, 23, 5, 25], [2015, 4, 20, 25, 5, 29], [2015, 5, 21, 50, 27, 37], [2015, 6, 30, 58, 27, 59]])
Sorting np.array of dates
I'm having this matrix of dates that I would like to sort by dates and then have back in the same format as it started data = np.array( [[2015, 1, 1, 23, 4, 59], [2015, 4, 30, 23, 5, 1], [2015, 1, 1, 23, 5, 25], [2015, 2, 15, 58,5, 0], [2015, 5, 20, 50, 27, 37], [2015, 6, 21, 25, 27, 29]]) I tried datetime.datetime, but couldn't convert the data back to this format
[ "You can sort your data without the conversion to datetime, since the date/time components already appear in sorted order (year, month, etc.). So a np.sort(data, axis=0) should do:\nimport numpy as np\n\ndata = np.array(\n [[2015, 1, 1, 23, 4, 59],\n [2015, 4, 30, 23, 5, 1],\n [2015, 1, 1, 23, 5, 25],\n [2015, 2, 15, 58,5, 0],\n [2015, 5, 20, 50, 27, 37],\n [2015, 6, 21, 25, 27, 29]])\n\nnp.sort(data, axis=0)\narray([[2015, 1, 1, 23, 4, 0],\n [2015, 1, 1, 23, 5, 1],\n [2015, 2, 15, 23, 5, 25],\n [2015, 4, 20, 25, 5, 29],\n [2015, 5, 21, 50, 27, 37],\n [2015, 6, 30, 58, 27, 59]])\n\n" ]
[ 0 ]
[]
[]
[ "arrays", "date", "datetime", "python", "sorting" ]
stackoverflow_0074595193_arrays_date_datetime_python_sorting.txt
Q: Faster way of adding results of computed medians to list without for cycle? I would be interested in another, faster, way of adding the median values ​​to the sheet without using a for loop. Suppose I have the following matrix: data_matrix = [ [2,4,5,6,4] [5,6,5,6,4] . . . [etc.,..,etc.] ] I want to calculate the median from each row and insert the results into a 1D list medians[]. The list median[] will therefore contain the calculated values ​​of the medians. I did it with for cycle and it works without problem: medians = [] for data_row in data_matrix: medians.append(median(delta_row)) Ok, it works fine but I´m not sure if this method isn´t slow for more computations. Is anoter way which does the same as code above but speed of code would be faster? A: If you don't want to loop (python loop), use numpy: import numpy as np data_matrix = [ [2,4,5,6,4], [5,6,5,6,4], ] out = np.median(data_matrix, axis=1).tolist() Output: [4.0, 5.0]
Faster way of adding results of computed medians to list without for cycle?
I would be interested in another, faster, way of adding the median values ​​to the sheet without using a for loop. Suppose I have the following matrix: data_matrix = [ [2,4,5,6,4] [5,6,5,6,4] . . . [etc.,..,etc.] ] I want to calculate the median from each row and insert the results into a 1D list medians[]. The list median[] will therefore contain the calculated values ​​of the medians. I did it with for cycle and it works without problem: medians = [] for data_row in data_matrix: medians.append(median(delta_row)) Ok, it works fine but I´m not sure if this method isn´t slow for more computations. Is anoter way which does the same as code above but speed of code would be faster?
[ "If you don't want to loop (python loop), use numpy:\nimport numpy as np\n\ndata_matrix = [\n[2,4,5,6,4],\n[5,6,5,6,4],\n]\n\nout = np.median(data_matrix, axis=1).tolist()\n\nOutput: [4.0, 5.0]\n" ]
[ 2 ]
[]
[]
[ "performance", "python" ]
stackoverflow_0074600196_performance_python.txt
Q: visualising data with python of time series and float colmn i have the following quastion- What can you tell about the relationship between time and speed? Is there a best time of day to connect? Has it changed throughout the years? this is my dataframedataframe my columns data does any one have any suggestion on how i would aprouch this question ? import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('/Users/dimagoroh/Desktop/data_vis/big_file.csv', low_memory=False) sns.lmplot(x="hours",y="speed",data=df) im trying to do a plot but get this error i think i need to manipulate the hour column to a diffrent data type right now it is set as object A: Please post the error you get. From the data I think you need to pass x="hour" and not x="hours". Also try df.hour = pd.to_datetime(df.hour)
visualising data with python of time series and float colmn
i have the following quastion- What can you tell about the relationship between time and speed? Is there a best time of day to connect? Has it changed throughout the years? this is my dataframedataframe my columns data does any one have any suggestion on how i would aprouch this question ? import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('/Users/dimagoroh/Desktop/data_vis/big_file.csv', low_memory=False) sns.lmplot(x="hours",y="speed",data=df) im trying to do a plot but get this error i think i need to manipulate the hour column to a diffrent data type right now it is set as object
[ "Please post the error you get. From the data I think you need to pass x=\"hour\" and not x=\"hours\". Also try\ndf.hour = pd.to_datetime(df.hour)\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "python" ]
stackoverflow_0074600156_dataframe_python.txt
Q: Is there any way I can shorten this roman to int program? (python) I'm writing a roman numeral to integers program and was testing some preexisting code with a few modifications I made. list1={'I':1,'IV':4,'V':5,'IX':9,'X':10,'XL':40,'L':50,'XC':90,'C':100,'CD':400,'D':500,'CM':900,'M':1000} def romanint(str): result=0 count=0 while (count < len(str)): value1 = list1[str[count]] if (count + 1 < len(str)): value2 = list1[str[count + 1]] if (value1 >= value2): result = result + value1 count = count + 1 else: result = result + value2 - value1 count = count + 2 else: result = result + value1 count = count + 1 return result x=input("Please enter a Roman numeral: ") print(romanint(x)) It works fine but I feel like there's a way to shorten it. I've tried to delete lines I've felt were unnecessary but errors always pop up. Is there a way to modify it or is it fine the way it is?
Is there any way I can shorten this roman to int program? (python)
I'm writing a roman numeral to integers program and was testing some preexisting code with a few modifications I made. list1={'I':1,'IV':4,'V':5,'IX':9,'X':10,'XL':40,'L':50,'XC':90,'C':100,'CD':400,'D':500,'CM':900,'M':1000} def romanint(str): result=0 count=0 while (count < len(str)): value1 = list1[str[count]] if (count + 1 < len(str)): value2 = list1[str[count + 1]] if (value1 >= value2): result = result + value1 count = count + 1 else: result = result + value2 - value1 count = count + 2 else: result = result + value1 count = count + 1 return result x=input("Please enter a Roman numeral: ") print(romanint(x)) It works fine but I feel like there's a way to shorten it. I've tried to delete lines I've felt were unnecessary but errors always pop up. Is there a way to modify it or is it fine the way it is?
[]
[]
[ "Check out the roman library, pip install roman the info is here on their gitgub. Assuming that you are simply trying to optimize a roman numeral converter.\nAlso, check twitter, you'll find help there with regards to recommendations for help on your code. I'm learning that StackOverflow has a set of rules that discourages asking for help, on a site built around... asking for help..\n" ]
[ -1 ]
[ "python" ]
stackoverflow_0074600166_python.txt
Q: Glue database connection update username aws cli/boto3 Trying to update Glue database JDBC connection username and keep failing. choices are CLI or boto3. CLI docs are so limited. https://docs.aws.amazon.com/cli/latest/reference/glue/update-connection.html update-connection [--catalog-id <value>] --name <value> --connection-input <value> [--cli-input-json <value>] [--generate-cli-skeleton <value>] Can someone guide, how to pass username to update here. Also similar in boto3. Throwing exception of invalid parameter. response = client.update_connection( Name='test-db', ConnectionInput={ 'Name': 'test-db', 'ConnectionType': 'JDBC' , 'ConnectionProperties': { 'Username': username } } ) A: Try: 'ConnectionProperties': { 'USER_NAME': 'your_user_name', 'PASSWORD' : 'your_user_password' } Caution: Above is not tested. Its based on Glue Boto3 documentation from here. A: So it supposed to be like this. 'USERNAME': username, 'PASSWORD': password }, 'PhysicalConnectionRequirements': PhysicalConnectionRequirements } ) A: You have to add all of the details for the update_connection: glue_client = boto3.client('glue') glue_client.update_connection( Name='Redshift-Rosko', ConnectionInput={ 'Name': 'Redshift-Rosko', 'ConnectionType': 'JDBC', 'ConnectionProperties': { "USERNAME": username, "PASSWORD": password, "JDBC_ENFORCE_SSL": "true", "JDBC_CONNECTION_URL": "jdbc:redshift://...:5439/rosko_db", "KAFKA_SSL_ENABLED": "false" }, 'PhysicalConnectionRequirements': { 'SubnetId': 'subnet-......', 'SecurityGroupIdList': [ 'sg-......', 'sg-......', ], "AvailabilityZone": "eu-central-1a" } } )
Glue database connection update username aws cli/boto3
Trying to update Glue database JDBC connection username and keep failing. choices are CLI or boto3. CLI docs are so limited. https://docs.aws.amazon.com/cli/latest/reference/glue/update-connection.html update-connection [--catalog-id <value>] --name <value> --connection-input <value> [--cli-input-json <value>] [--generate-cli-skeleton <value>] Can someone guide, how to pass username to update here. Also similar in boto3. Throwing exception of invalid parameter. response = client.update_connection( Name='test-db', ConnectionInput={ 'Name': 'test-db', 'ConnectionType': 'JDBC' , 'ConnectionProperties': { 'Username': username } } )
[ "Try:\n'ConnectionProperties': {\n 'USER_NAME': 'your_user_name',\n 'PASSWORD' : 'your_user_password'\n }\n\nCaution: Above is not tested. Its based on Glue Boto3 documentation from here.\n", "So it supposed to be like this.\n 'USERNAME': username,\n 'PASSWORD': password\n },\n 'PhysicalConnectionRequirements': PhysicalConnectionRequirements\n }\n )\n\n\n", "You have to add all of the details for the update_connection:\n glue_client = boto3.client('glue')\n glue_client.update_connection(\n Name='Redshift-Rosko',\n ConnectionInput={\n 'Name': 'Redshift-Rosko',\n 'ConnectionType': 'JDBC',\n 'ConnectionProperties': {\n \"USERNAME\": username,\n \"PASSWORD\": password,\n \"JDBC_ENFORCE_SSL\": \"true\",\n \"JDBC_CONNECTION_URL\": \"jdbc:redshift://...:5439/rosko_db\",\n \"KAFKA_SSL_ENABLED\": \"false\"\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'subnet-......',\n 'SecurityGroupIdList': [\n 'sg-......',\n 'sg-......',\n ],\n \"AvailabilityZone\": \"eu-central-1a\"\n }\n }\n )\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "amazon_web_services", "aws_cli", "aws_glue", "boto3", "python" ]
stackoverflow_0069748595_amazon_web_services_aws_cli_aws_glue_boto3_python.txt
Q: Plotting empty data in a plotly graph I have a situation where I have a series of data, with some values missing in the middle. Like below: If you see the data, 2 is missing in the series. I wish to plot a box plot or a violin plot where, I can have a placeholder for the 2 series, which would mean no data is present for it. Right now I can plot by inserting 2 and substituting NaNs and it gives a plot like below: Is there a better way to plot without manipulating the data, either by use of texts on X Axis or by just having a placeholder? A: You can combine a Categorical and seaborn.boxplot: import seaborn as sns df = pd.DataFrame({'X': [1,1,1,1,1,3,3,3,3,3], 'Y': [1,2,3,4,5,6,7,8,9,10] }) df['X'] = pd.Categorical(df['X'], categories=[1, 2, 3]) sns.boxplot(data=df, x='X', y='Y') Output: annotating the missing categories: ax = sns.boxplot(data=df, x='X', y='Y') # positions of the categories in the X-axis cats = {c: i for i,c in enumerate(df['X'].cat.categories)} missing = set(df['X'].cat.categories)-set(df['X']) # {2} # mid-point of the Y-axis y_pos = np.mean(ax.get_ylim()) for x in missing: ax.annotate('N/A', (cats[x], y_pos), ha='center') Output:
Plotting empty data in a plotly graph
I have a situation where I have a series of data, with some values missing in the middle. Like below: If you see the data, 2 is missing in the series. I wish to plot a box plot or a violin plot where, I can have a placeholder for the 2 series, which would mean no data is present for it. Right now I can plot by inserting 2 and substituting NaNs and it gives a plot like below: Is there a better way to plot without manipulating the data, either by use of texts on X Axis or by just having a placeholder?
[ "You can combine a Categorical and seaborn.boxplot:\nimport seaborn as sns\n\ndf = pd.DataFrame({'X': [1,1,1,1,1,3,3,3,3,3],\n 'Y': [1,2,3,4,5,6,7,8,9,10]\n })\ndf['X'] = pd.Categorical(df['X'], categories=[1, 2, 3])\n\nsns.boxplot(data=df, x='X', y='Y')\n\nOutput:\n\nannotating the missing categories:\nax = sns.boxplot(data=df, x='X', y='Y')\n\n# positions of the categories in the X-axis\ncats = {c: i for i,c in enumerate(df['X'].cat.categories)}\nmissing = set(df['X'].cat.categories)-set(df['X'])\n# {2}\n\n# mid-point of the Y-axis\ny_pos = np.mean(ax.get_ylim())\n\nfor x in missing:\n ax.annotate('N/A', (cats[x], y_pos), ha='center')\n\nOutput:\n\n" ]
[ 2 ]
[]
[]
[ "matplotlib", "plotly", "python", "python_3.x" ]
stackoverflow_0074600302_matplotlib_plotly_python_python_3.x.txt
Q: Search for a value anywhere in a pandas DataFrame This seems like a simple question, but I couldn't find it asked before (this and this are close but the answers aren't great). The question is: if I want to search for a value somewhere in my df (I don't know which column it's in) and return all rows with a match. What's the most Pandaic way to do it? Is there anything better than: for col in list(df): try: df[col] == var return df[df[col] == var] except TypeError: continue ? A: You can perform equality comparison on the entire DataFrame: df[df.eq(var1).any(1)] A: You should using isin , this is return the column , is want row check cold' answer :-) df.isin(['bal1']).any() A False B True C False CLASS False dtype: bool Or df[df.isin(['bal1'])].stack() # level 0 index is row index , level 1 index is columns which contain that value 0 B bal1 1 B bal1 dtype: object A: You can try the code below: import pandas as pd x = pd.read_csv(r"filePath") x.columns = x.columns.str.lower().str.replace(' ', '_') y = x.columns.values z = y.tolist() print("Note: It take Case Sensitive Values.") keyWord = input("Type a Keyword to Search: ") try: for k in range(len(z)-1): l = x[x[z[k]].str.match(keyWord)] print(l.head(10)) k = k+1 except: print("") A: This is a solution which will return the actual column you need. df.columns[df.isin(['Yes']).any()]
Search for a value anywhere in a pandas DataFrame
This seems like a simple question, but I couldn't find it asked before (this and this are close but the answers aren't great). The question is: if I want to search for a value somewhere in my df (I don't know which column it's in) and return all rows with a match. What's the most Pandaic way to do it? Is there anything better than: for col in list(df): try: df[col] == var return df[df[col] == var] except TypeError: continue ?
[ "You can perform equality comparison on the entire DataFrame:\ndf[df.eq(var1).any(1)]\n\n", "You should using isin , this is return the column , is want row check cold' answer :-) \ndf.isin(['bal1']).any()\nA False\nB True\nC False\nCLASS False\ndtype: bool\n\nOr \ndf[df.isin(['bal1'])].stack() # level 0 index is row index , level 1 index is columns which contain that value \n0 B bal1\n1 B bal1\ndtype: object\n\n", "You can try the code below:\nimport pandas as pd\nx = pd.read_csv(r\"filePath\")\nx.columns = x.columns.str.lower().str.replace(' ', '_')\ny = x.columns.values\nz = y.tolist()\nprint(\"Note: It take Case Sensitive Values.\")\nkeyWord = input(\"Type a Keyword to Search: \")\ntry:\n for k in range(len(z)-1):\n l = x[x[z[k]].str.match(keyWord)]\n print(l.head(10))\n k = k+1\nexcept:\n print(\"\")\n\n", "This is a solution which will return the actual column you need.\ndf.columns[df.isin(['Yes']).any()]\n\n" ]
[ 60, 30, 2, 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0053979403_dataframe_pandas_python.txt
Q: What is the right approach to solve a differential equation at every timestep? Does any equation solver work for a timestep case? I've been implementing ODEint, Solve_ivp and even sympy to solve a first order diff.eq like this : dTsdt = Ts* A - B + C # Set up in a function. This is sort the mathematical model. where A,B,C are vectors that depend on time(e.g. A[1,3,4,5 ...]). tloop=[t[i-1],t[i]] Sol_Ts = solve_ivp(dTsdt,tloop,[Ts0],args=(A[i],B[i],C[i],)) I just wonder, if this approach is correct to solve the equation at every timestep. As I am replacing the value of those constants at every time and thus asking for result at that specific time which is then stored in a variable. I'm not sure if these solvers are suitable for the task or if in fact, I should be using a different method like "Finite Difference Method", although, the latter would take more time and is prone to time issues. The results are so far obtained out of spec. Any advice would be really appreciate ! A: Yes, that is a valid strategy for i in range(N): Sol_Ts = solve_ivp(dTsdt,t[[i,i+1]],[Ts0],args=(A[i],B[i],C[i],)) Ts_arr.append(Sol_Ts.y.copy()) time_arr.append(Sol_Ts.t.copy) Ts0 = Sol_Ts.y[:,-1] Ts_arr = np.concatenate(Ts_arr, axis=1) time_arr = np.concatenate(time_arr) You could also integrate over the full interval using the sympy.interpolate.interp1d function generator for interpolation functions of various types, here you would use the "zero-order hold", the piece-wise constant extension of the given function table.
What is the right approach to solve a differential equation at every timestep?
Does any equation solver work for a timestep case? I've been implementing ODEint, Solve_ivp and even sympy to solve a first order diff.eq like this : dTsdt = Ts* A - B + C # Set up in a function. This is sort the mathematical model. where A,B,C are vectors that depend on time(e.g. A[1,3,4,5 ...]). tloop=[t[i-1],t[i]] Sol_Ts = solve_ivp(dTsdt,tloop,[Ts0],args=(A[i],B[i],C[i],)) I just wonder, if this approach is correct to solve the equation at every timestep. As I am replacing the value of those constants at every time and thus asking for result at that specific time which is then stored in a variable. I'm not sure if these solvers are suitable for the task or if in fact, I should be using a different method like "Finite Difference Method", although, the latter would take more time and is prone to time issues. The results are so far obtained out of spec. Any advice would be really appreciate !
[ "Yes, that is a valid strategy\nfor i in range(N):\n Sol_Ts = solve_ivp(dTsdt,t[[i,i+1]],[Ts0],args=(A[i],B[i],C[i],))\n Ts_arr.append(Sol_Ts.y.copy())\n time_arr.append(Sol_Ts.t.copy)\n Ts0 = Sol_Ts.y[:,-1]\n\nTs_arr = np.concatenate(Ts_arr, axis=1)\ntime_arr = np.concatenate(time_arr)\n\nYou could also integrate over the full interval using the sympy.interpolate.interp1d function generator for interpolation functions of various types, here you would use the \"zero-order hold\", the piece-wise constant extension of the given function table.\n" ]
[ 0 ]
[]
[]
[ "equation_solving", "ode", "python" ]
stackoverflow_0074598903_equation_solving_ode_python.txt
Q: How to use template when creating new python file on VScode? In my python file I always start with the following lines import sys import matplotlib as mpl sys.append('C:\\MyPackages') rc_fonts = { "text.usetex": True, 'font.size': 20, 'text.latex.preamble': r"\usepackage{bm}", } mpl.rcParams.update(rc_fonts) Is there a way to indicate to VScode that each time I create a new file.py, it will start with the previous lines ? For now, I copy/paste a 'template.py' but this is not really convenient. And because I work with Windows, I also tried to add 'C:\MyPackages' to the user variables Path but it didn't work. A: For Doing this kind of repetitive task we can use snippets in VSCode. Step 1 : Hit > shift+ctrl+p open command palette. Step 2 : Select Snippets: Configure User Snippets Step 3 : Select Python Step 4 : paste below code in python.json file. change prefix value. like "prefix": "hedwin" so now when you type hedwin vscode will paste our code snippet "": { "prefix": "", "body": [ "import sys", "import matplotlib as mpl", "sys.append('C:\\\\MyPackages')", "", "rc_fonts = {", " \"text.usetex\": True,", " 'font.size': 20,", " 'text.latex.preamble': r\"\\usepackage{bm}\",", "}", "mpl.rcParams.update(rc_fonts)" ], "description": "" } For making snippet : snippet generator A: You can use the extension File Templates. Save a file python.py with your start content in the default template location. (see extension page, you have to set it in the settings) Then use Ctrl+N and select Python, give it a name and the file is created. You can add variables and snippets to customize the instantiation.
How to use template when creating new python file on VScode?
In my python file I always start with the following lines import sys import matplotlib as mpl sys.append('C:\\MyPackages') rc_fonts = { "text.usetex": True, 'font.size': 20, 'text.latex.preamble': r"\usepackage{bm}", } mpl.rcParams.update(rc_fonts) Is there a way to indicate to VScode that each time I create a new file.py, it will start with the previous lines ? For now, I copy/paste a 'template.py' but this is not really convenient. And because I work with Windows, I also tried to add 'C:\MyPackages' to the user variables Path but it didn't work.
[ "For Doing this kind of repetitive task we can use snippets in VSCode.\nStep 1 : Hit > shift+ctrl+p open command palette.\nStep 2 : Select Snippets: Configure User Snippets\nStep 3 : Select Python\nStep 4 : paste below code in python.json file. change prefix value. like \"prefix\": \"hedwin\" so now when you type hedwin vscode will paste our code snippet\n\"\": {\n \"prefix\": \"\",\n \"body\": [\n \"import sys\",\n \"import matplotlib as mpl\",\n \"sys.append('C:\\\\\\\\MyPackages')\",\n \"\",\n \"rc_fonts = {\",\n \" \\\"text.usetex\\\": True,\",\n \" 'font.size': 20,\",\n \" 'text.latex.preamble': r\\\"\\\\usepackage{bm}\\\",\",\n \"}\",\n \"mpl.rcParams.update(rc_fonts)\"\n ],\n \"description\": \"\"\n}\n\nFor making snippet : snippet generator\n", "You can use the extension File Templates.\nSave a file python.py with your start content in the default template location. (see extension page, you have to set it in the settings)\nThen use Ctrl+N and select Python, give it a name and the file is created.\nYou can add variables and snippets to customize the instantiation.\n" ]
[ 0, 0 ]
[]
[]
[ "python", "visual_studio_code" ]
stackoverflow_0074599665_python_visual_studio_code.txt
Q: How to paginate in django for filtered datas views.py import datetime from .filters import MyModelFilter from django.shortcuts import render import pymysql from django.http import HttpResponseRedirect from facligoapp.models import Scrapper from django.db.models import Q from django.utils import timezone import pytz from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger users = "" def index(request): if request.method == "POST": from_date = request.POST.get("from_date") f_date = datetime.datetime.strptime(from_date,'%Y-%m-%d') print(f_date) to_date = request.POST.get("to_date") t_date = datetime.datetime.strptime(to_date, '%Y-%m-%d') print(t_date) get_records_by_date = Scrapper.objects.all().filter(Q(start_time__date=f_date)|Q(end_time__date=t_date)) print(get_records_by_date) filtered_dates = MyModelFilter(request.GET,queryset=get_records_by_date) page = request.GET.get('page', 1) paginator = Paginator(filtered_dates.qs, 5) global users try: users = paginator.get_page(page) except PageNotAnInteger: users = paginator.page(1) except EmptyPage: users = paginator.page(paginator.num_pages) else: roles = Scrapper.objects.all() page = request.GET.get('page', 1) paginator = Paginator(roles, 5) try: users = paginator.page(page) except PageNotAnInteger: users = paginator.page(1) except EmptyPage: users = paginator.page(paginator.num_pages) return render(request, "home.html", {"users": users}) return render(request, "home.html", {"users": users}) filters.py: import django_filters from.models import Scrapper class MyModelFilter(django_filters.FilterSet): class Meta: model = Scrapper # Declare all your model fields by which you will filter # your queryset here: fields = ['start_time', 'end_time'] home.html <!DOCTYPE html> <html> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet" id="bootstrap-css"> <script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script> <script src="http://code.jquery.com/jquery-1.11.1.min.js"></script> <body> <style> h2 {text-align: center;} </style> <h1>Facilgo Completed Jobs</h1> <form action="" method="post"> {% csrf_token %} <label for="from_date">From Date:</label> <input type="date" id="from_date" name="from_date"> <label for="to_date">To Date:</label> <input type="date" id="to_date" name="to_date"> <input type="submit"><br> </form> <div class="container"> <div class="row"> <div class="col-md-12"> <h2>Summary Details</h2> <table id="bootstrapdatatable" class="table table-striped table-bordered" width="100%"> <thead> <tr> <th>scrapper_id</th> <th>scrapper_jobs_log_id</th> <th>external_job_source_id</th> <th>start_time</th> <th>end_time</th> <th>scrapper_status</th> <th>processed_records</th> <th>new_records</th> <th>skipped_records</th> <th>error_records</th> </tr> </thead> <tbody> {% for stud in users %} {% csrf_token %} <tr> <td>{{stud.scrapper_id}}</td> <td>{{stud.scrapper_jobs_log_id}}</td> <td>{{stud.external_job_source_id}}</td> <td>{{stud.start_time}}</td> <td>{{stud.end_time}}</td> <td>{{stud.scrapper_status}}</td> <td>{{stud.processed_records}}</td> <td>{{stud.new_records}}</td> <td>{{stud.skipped_records}}</td> <td>{{stud.error_records}}</td> </tr> {% endfor %} </tbody> </table> {% if users.has_other_pages %} <ul class="pagination"> {% if users.has_previous %} <li><a href="?page={{ users.previous_page_number }}">«</a></li> {% else %} <li class="disabled"><span>«</span></li> {% endif %} {% if user.number|add:'-4' > 1 %} <li><a href="?page={{ page_obj.number|add:'-5' }}">&hellip;</a></li> {% endif %} {% for i in users.paginator.page_range %} {% if users.number == i %} <li class="active"><span>{{ i }} <span class="sr-only">(current)</span></span></li> {% elif i > users.number|add:'-5' and i < users.number|add:'5' %} <li><a href="?page={{ i }}">{{ i }}</a></li> {% endif %} {% endfor %} {% if users.has_next %} <li><a href="?page={{ users.next_page_number }}">»</a></li> {% else %} <li class="disabled"><span>»</span></li> {% endif %} </ul> {% endif %} </div> </div> </div> </body> </html> I need to get only the datas which I have filtered et_records_by_date = Scrapper.objects.all().filter(Q(start_time__date=f_date)|Q(end_time__date=t_date)) in pagination. But when I click the next page its showing different datas. Is there any solution to get only the datas for the particular query. When I post the datas the of dates the 1st pages is showing the correct details but when I click page 2 its showing the other datas A: When you click on 'next page' you're performing a GET request, and not a POST request. Which means it will go into the else block, which has no filtering but just returns all the Scrapper objects. You're better off including the from_date and to_date in a GET request and not using a POST request. If you're using a form you can simply set the method: <form method="GET" action="..." /> A: Shared method def paginate(request,obj,total=25): paginator = Paginator(obj,total) try: page = int(request.GET.get('page', 1)) except: page = 1 try: obj_list = paginator.page(page) except(EmptyPage,InvalidPage): obj_list = paginator.page(paginator.num_pages) return obj_list View users = UserInfo.objects.all() data = { 'users': paginate(request,users,15), } return render(request,self.template_name,data) HTML {% include './pagination.html' with obj=users %} pagination.html file {% if obj.paginator.num_pages > 1 %} <div class="text-center"> <ul class="pagination"> <li class="{% if not obj.has_previous %} disabled{% endif %}"> {% if obj.has_previous %} <a data-page="{{obj.previous_page_number}}" href="?page={{obj.previous_page_number}}{% for key, value in request.GET.items %}{% if key != 'page' %}&{{ key }}={{ value }}{% endif %}{% endfor %}" aria-label="{% trans 'Previous' %}" tabindex="-1"> &laquo; </a> {% else %} <a data-page="0" class="" href="javascript:void(0);" tabindex="-1">&laquo;</a> {% endif %} </li> {% for i in obj.paginator.page_range %} {% if obj.number == i %} <li class="active"> <a data-page="0" class="" href="javascript:void(0)">{{ i }}</a> </li> {% elif i > obj.number|add:'-5' and i < obj.number|add:'5' %} <li class=""><a data-page="{{i}}" class="" href="?page={{i}}{% for key, value in request.GET.items %}{% if key != 'page' %}&{{ key }}={{ value }}{% endif %}{% endfor %}">{{ i }}</a> </li> {% endif %} {% endfor %} <li class="{% if not obj.has_next %} disabled{% endif %}"> {% if obj.has_next %} <a data-page="{{ obj.next_page_number }}" class="" href="?page={{obj.next_page_number}}{% for key, value in request.GET.items %}{% if key != 'page' %}&{{ key }}={{ value }}{% endif %}{% endfor %}" tabindex="-1">&raquo;</a> {% else %} <a data-page="0" class="" href="javascript:void(0);" tabindex="-1">&raquo;</a> {% endif %} </li> </ul> </div> {% endif %}
How to paginate in django for filtered datas
views.py import datetime from .filters import MyModelFilter from django.shortcuts import render import pymysql from django.http import HttpResponseRedirect from facligoapp.models import Scrapper from django.db.models import Q from django.utils import timezone import pytz from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger users = "" def index(request): if request.method == "POST": from_date = request.POST.get("from_date") f_date = datetime.datetime.strptime(from_date,'%Y-%m-%d') print(f_date) to_date = request.POST.get("to_date") t_date = datetime.datetime.strptime(to_date, '%Y-%m-%d') print(t_date) get_records_by_date = Scrapper.objects.all().filter(Q(start_time__date=f_date)|Q(end_time__date=t_date)) print(get_records_by_date) filtered_dates = MyModelFilter(request.GET,queryset=get_records_by_date) page = request.GET.get('page', 1) paginator = Paginator(filtered_dates.qs, 5) global users try: users = paginator.get_page(page) except PageNotAnInteger: users = paginator.page(1) except EmptyPage: users = paginator.page(paginator.num_pages) else: roles = Scrapper.objects.all() page = request.GET.get('page', 1) paginator = Paginator(roles, 5) try: users = paginator.page(page) except PageNotAnInteger: users = paginator.page(1) except EmptyPage: users = paginator.page(paginator.num_pages) return render(request, "home.html", {"users": users}) return render(request, "home.html", {"users": users}) filters.py: import django_filters from.models import Scrapper class MyModelFilter(django_filters.FilterSet): class Meta: model = Scrapper # Declare all your model fields by which you will filter # your queryset here: fields = ['start_time', 'end_time'] home.html <!DOCTYPE html> <html> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet" id="bootstrap-css"> <script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script> <script src="http://code.jquery.com/jquery-1.11.1.min.js"></script> <body> <style> h2 {text-align: center;} </style> <h1>Facilgo Completed Jobs</h1> <form action="" method="post"> {% csrf_token %} <label for="from_date">From Date:</label> <input type="date" id="from_date" name="from_date"> <label for="to_date">To Date:</label> <input type="date" id="to_date" name="to_date"> <input type="submit"><br> </form> <div class="container"> <div class="row"> <div class="col-md-12"> <h2>Summary Details</h2> <table id="bootstrapdatatable" class="table table-striped table-bordered" width="100%"> <thead> <tr> <th>scrapper_id</th> <th>scrapper_jobs_log_id</th> <th>external_job_source_id</th> <th>start_time</th> <th>end_time</th> <th>scrapper_status</th> <th>processed_records</th> <th>new_records</th> <th>skipped_records</th> <th>error_records</th> </tr> </thead> <tbody> {% for stud in users %} {% csrf_token %} <tr> <td>{{stud.scrapper_id}}</td> <td>{{stud.scrapper_jobs_log_id}}</td> <td>{{stud.external_job_source_id}}</td> <td>{{stud.start_time}}</td> <td>{{stud.end_time}}</td> <td>{{stud.scrapper_status}}</td> <td>{{stud.processed_records}}</td> <td>{{stud.new_records}}</td> <td>{{stud.skipped_records}}</td> <td>{{stud.error_records}}</td> </tr> {% endfor %} </tbody> </table> {% if users.has_other_pages %} <ul class="pagination"> {% if users.has_previous %} <li><a href="?page={{ users.previous_page_number }}">«</a></li> {% else %} <li class="disabled"><span>«</span></li> {% endif %} {% if user.number|add:'-4' > 1 %} <li><a href="?page={{ page_obj.number|add:'-5' }}">&hellip;</a></li> {% endif %} {% for i in users.paginator.page_range %} {% if users.number == i %} <li class="active"><span>{{ i }} <span class="sr-only">(current)</span></span></li> {% elif i > users.number|add:'-5' and i < users.number|add:'5' %} <li><a href="?page={{ i }}">{{ i }}</a></li> {% endif %} {% endfor %} {% if users.has_next %} <li><a href="?page={{ users.next_page_number }}">»</a></li> {% else %} <li class="disabled"><span>»</span></li> {% endif %} </ul> {% endif %} </div> </div> </div> </body> </html> I need to get only the datas which I have filtered et_records_by_date = Scrapper.objects.all().filter(Q(start_time__date=f_date)|Q(end_time__date=t_date)) in pagination. But when I click the next page its showing different datas. Is there any solution to get only the datas for the particular query. When I post the datas the of dates the 1st pages is showing the correct details but when I click page 2 its showing the other datas
[ "When you click on 'next page' you're performing a GET request, and not a POST request. Which means it will go into the else block, which has no filtering but just returns all the Scrapper objects.\nYou're better off including the from_date and to_date in a GET request and not using a POST request.\nIf you're using a form you can simply set the method:\n<form method=\"GET\" action=\"...\" />\n", "Shared method\ndef paginate(request,obj,total=25):\n paginator = Paginator(obj,total) \n try:\n page = int(request.GET.get('page', 1))\n except:\n page = 1\n\n try:\n obj_list = paginator.page(page)\n except(EmptyPage,InvalidPage):\n obj_list = paginator.page(paginator.num_pages)\n return obj_list\n\nView\nusers = UserInfo.objects.all()\ndata = {\n 'users': paginate(request,users,15),\n}\nreturn render(request,self.template_name,data)\n\nHTML\n{% include './pagination.html' with obj=users %}\n\npagination.html file\n {% if obj.paginator.num_pages > 1 %}\n <div class=\"text-center\">\n <ul class=\"pagination\">\n \n <li class=\"{% if not obj.has_previous %} disabled{% endif %}\">\n \n\n {% if obj.has_previous %}\n <a data-page=\"{{obj.previous_page_number}}\" href=\"?page={{obj.previous_page_number}}{% for key, value in request.GET.items %}{% if key != 'page' %}&{{ key }}={{ value }}{% endif %}{% endfor %}\" aria-label=\"{% trans 'Previous' %}\" tabindex=\"-1\"> &laquo; </a>\n {% else %}\n <a data-page=\"0\" class=\"\" href=\"javascript:void(0);\" tabindex=\"-1\">&laquo;</a>\n {% endif %}\n </li>\n \n {% for i in obj.paginator.page_range %}\n {% if obj.number == i %}\n <li class=\"active\">\n <a data-page=\"0\" class=\"\" href=\"javascript:void(0)\">{{ i }}</a>\n </li>\n {% elif i > obj.number|add:'-5' and i < obj.number|add:'5' %}\n <li class=\"\"><a data-page=\"{{i}}\" class=\"\" href=\"?page={{i}}{% for key, value in request.GET.items %}{% if key != 'page' %}&{{ key }}={{ value }}{% endif %}{% endfor %}\">{{ i }}</a>\n </li>\n {% endif %}\n {% endfor %}\n\n <li class=\"{% if not obj.has_next %} disabled{% endif %}\">\n {% if obj.has_next %}\n <a data-page=\"{{ obj.next_page_number }}\" class=\"\" href=\"?page={{obj.next_page_number}}{% for key, value in request.GET.items %}{% if key != 'page' %}&{{ key }}={{ value }}{% endif %}{% endfor %}\" tabindex=\"-1\">&raquo;</a>\n {% else %}\n <a data-page=\"0\" class=\"\" href=\"javascript:void(0);\" tabindex=\"-1\">&raquo;</a>\n {% endif %}\n </li>\n </ul>\n </div>\n{% endif %}\n\n" ]
[ 0, 0 ]
[]
[]
[ "django", "python" ]
stackoverflow_0074597111_django_python.txt
Q: Activate Conda environment inside R script I am new to R coding. I want to run an R script called from a Python script. The Python script will use a Conda environment, env1, while the R script will use a different Conda environment, env2, in Linux. So, I activate env1 before running the python script: conda activate /condaenv/env1/ Then I run the python script python testpy.py (this python script will call the R script testr.R). My python script (testpy.py) is as follow: import subprocess subprocess.call(['Rscript','testr.R','hello']) My Rscript testr.R will be as follows: #!/condaenv/env2/bin/Rscript library(Peaks) library(httr) I want to import all the R library inside the testr.R script from /condaenv/env2. But I don't know how can an R script activate a Conda environment (env2) and will the R script run using the package installed in env2? A: A note regarding the title ("Activate Conda environment inside R script"), just as you activate python env before executing your the python script, R environment should be activated before invoking the R script. Setting up 2 conda enviruonments and using conda run for executing Python script and invoking R from Python: Conda envs # base python env: conda create -n conda_python -c conda-forge python # base r env + httr: conda create -n conda_r -c conda-forge r-base r-httr Test scripts # testpy.py import os import subprocess print('CONDA_PREFIX (py) :', os.environ['CONDA_PREFIX']) # subprocess.call('conda run -n conda_r Rscript testr.R', shell=True) # or subprocess.call(['conda', 'run', '-n', 'conda_r', 'Rscript', 'testr.R'] ) # testr.R message("-- R --") message("CONDA_PREFIX (R) : ", Sys.getenv("CONDA_PREFIX")) message("R .libPaths() : ", .libPaths()) resp <- httr::GET("https://api.stackexchange.com/2.3/info?site=stackoverflow") message('Questions in SO : ', httr::content(resp, as ="parsed")$items[[1]]$total_questions) Executing testpy.py (when conda_python is not currently activated environment) # conda run -n conda_python python testpy.py CONDA_PREFIX (py) : /home/marguslt/miniconda3/envs/conda_python -- R -- CONDA_PREFIX (R) : /home/marguslt/miniconda3/envs/conda_r R .libPaths() : /home/marguslt/miniconda3/envs/conda_r/lib/R/library Questions in SO : 23249251 As an alternative, using conda nested activation : py script replaced with: # testpy_noconda.py import os import subprocess print('CONDA_PREFIX (py) :', os.environ['CONDA_PREFIX']) subprocess.call(['Rscript', 'testr.R'] ) Activating both environments with --stack option and running testpy_noconda.py: (base): conda activate conda_python (conda_python): conda activate --stack conda_r (conda_r): python testpy_noconda.py CONDA_PREFIX (py) : /home/marguslt/miniconda3/envs/conda_r -- R -- CONDA_PREFIX (R) : /home/marguslt/miniconda3/envs/conda_r R .libPaths() : /home/marguslt/miniconda3/envs/conda_r/lib/R/library Questions in SO : 23249325 Environment: (conda_r): env | grep CONDA_ CONDA_EXE=/home/marguslt/miniconda3/bin/conda CONDA_PYTHON_EXE=/home/marguslt/miniconda3/bin/python CONDA_SHLVL=3 CONDA_PREFIX=/home/marguslt/miniconda3/envs/conda_r CONDA_DEFAULT_ENV=conda_r CONDA_PROMPT_MODIFIER=(conda_r) CONDA_PREFIX_1=/home/marguslt/miniconda3 CONDA_PREFIX_2=/home/marguslt/miniconda3/envs/conda_python CONDA_STACKED_3=true
Activate Conda environment inside R script
I am new to R coding. I want to run an R script called from a Python script. The Python script will use a Conda environment, env1, while the R script will use a different Conda environment, env2, in Linux. So, I activate env1 before running the python script: conda activate /condaenv/env1/ Then I run the python script python testpy.py (this python script will call the R script testr.R). My python script (testpy.py) is as follow: import subprocess subprocess.call(['Rscript','testr.R','hello']) My Rscript testr.R will be as follows: #!/condaenv/env2/bin/Rscript library(Peaks) library(httr) I want to import all the R library inside the testr.R script from /condaenv/env2. But I don't know how can an R script activate a Conda environment (env2) and will the R script run using the package installed in env2?
[ "A note regarding the title (\"Activate Conda environment inside R script\"), just as you activate python env before executing your the python script, R environment should be activated before invoking the R script.\nSetting up 2 conda enviruonments and using conda run for executing Python script and invoking R from Python:\nConda envs\n# base python env:\nconda create -n conda_python -c conda-forge python\n# base r env + httr:\nconda create -n conda_r -c conda-forge r-base r-httr\n\nTest scripts\n# testpy.py\n\nimport os\nimport subprocess\nprint('CONDA_PREFIX (py) :', os.environ['CONDA_PREFIX'])\n# subprocess.call('conda run -n conda_r Rscript testr.R', shell=True)\n# or\nsubprocess.call(['conda', 'run', '-n', 'conda_r', 'Rscript', 'testr.R'] )\n\n# testr.R\n\nmessage(\"-- R --\")\nmessage(\"CONDA_PREFIX (R) : \", Sys.getenv(\"CONDA_PREFIX\"))\nmessage(\"R .libPaths() : \", .libPaths())\nresp <- httr::GET(\"https://api.stackexchange.com/2.3/info?site=stackoverflow\")\nmessage('Questions in SO : ', httr::content(resp, as =\"parsed\")$items[[1]]$total_questions)\n\nExecuting testpy.py\n(when conda_python is not currently activated environment)\n# conda run -n conda_python python testpy.py\nCONDA_PREFIX (py) : /home/marguslt/miniconda3/envs/conda_python\n-- R --\nCONDA_PREFIX (R) : /home/marguslt/miniconda3/envs/conda_r\nR .libPaths() : /home/marguslt/miniconda3/envs/conda_r/lib/R/library\nQuestions in SO : 23249251\n\nAs an alternative, using conda nested activation :\npy script replaced with:\n# testpy_noconda.py\n\nimport os\nimport subprocess\nprint('CONDA_PREFIX (py) :', os.environ['CONDA_PREFIX'])\nsubprocess.call(['Rscript', 'testr.R'] )\n\nActivating both environments with --stack option and running testpy_noconda.py:\n(base): conda activate conda_python\n(conda_python): conda activate --stack conda_r\n(conda_r): python testpy_noconda.py\nCONDA_PREFIX (py) : /home/marguslt/miniconda3/envs/conda_r\n-- R --\nCONDA_PREFIX (R) : /home/marguslt/miniconda3/envs/conda_r\nR .libPaths() : /home/marguslt/miniconda3/envs/conda_r/lib/R/library\nQuestions in SO : 23249325\n\nEnvironment:\n(conda_r): env | grep CONDA_\nCONDA_EXE=/home/marguslt/miniconda3/bin/conda\nCONDA_PYTHON_EXE=/home/marguslt/miniconda3/bin/python\nCONDA_SHLVL=3\nCONDA_PREFIX=/home/marguslt/miniconda3/envs/conda_r\nCONDA_DEFAULT_ENV=conda_r\nCONDA_PROMPT_MODIFIER=(conda_r)\nCONDA_PREFIX_1=/home/marguslt/miniconda3\nCONDA_PREFIX_2=/home/marguslt/miniconda3/envs/conda_python\nCONDA_STACKED_3=true\n\n" ]
[ 1 ]
[]
[]
[ "conda", "python", "r" ]
stackoverflow_0074597051_conda_python_r.txt
Q: How can I break out of multiple loops? Given the following code (that doesn't work): while True: # Snip: print out current state while True: ok = get_input("Is this ok? (y/n)") if ok.lower() == "y": break 2 # This doesn't work :( if ok.lower() == "n": break # Do more processing with menus and stuff Is there a way to make this work? Or do I have do one check to break out of the input loop, then another, more limited, check in the outside loop to break out all together if the user is satisfied? A: My first instinct would be to refactor the nested loop into a function and use return to break out. A: Here's another approach that is short. The disadvantage is that you can only break the outer loop, but sometimes it's exactly what you want. for a in xrange(10): for b in xrange(20): if something(a, b): # Break the inner loop... break else: # Continue if the inner loop wasn't broken. continue # Inner loop was broken, break the outer. break This uses the for / else construct explained at: Why does python use 'else' after for and while loops? Key insight: It only seems as if the outer loop always breaks. But if the inner loop doesn't break, the outer loop won't either. The continue statement is the magic here. It's in the for-else clause. By definition that happens if there's no inner break. In that situation continue neatly circumvents the outer break. A: PEP 3136 proposes labeled break/continue. Guido rejected it because "code so complicated to require this feature is very rare". The PEP does mention some workarounds, though (such as the exception technique), while Guido feels refactoring to use return will be simpler in most cases. A: First, ordinary logic is helpful. If, for some reason, the terminating conditions can't be worked out, exceptions are a fall-back plan. class GetOutOfLoop( Exception ): pass try: done= False while not done: isok= False while not (done or isok): ok = get_input("Is this ok? (y/n)") if ok in ("y", "Y") or ok in ("n", "N") : done= True # probably better raise GetOutOfLoop # other stuff except GetOutOfLoop: pass For this specific example, an exception may not be necessary. On other other hand, we often have "Y", "N" and "Q" options in character-mode applications. For the "Q" option, we want an immediate exit. That's more exceptional. A: I tend to agree that refactoring into a function is usually the best approach for this sort of situation, but for when you really need to break out of nested loops, here's an interesting variant of the exception-raising approach that @S.Lott described. It uses Python's with statement to make the exception raising look a bit nicer. Define a new context manager (you only have to do this once) with: from contextlib import contextmanager @contextmanager def nested_break(): class NestedBreakException(Exception): pass try: yield NestedBreakException except NestedBreakException: pass Now you can use this context manager as follows: with nested_break() as mylabel: while True: print "current state" while True: ok = raw_input("Is this ok? (y/n)") if ok == "y" or ok == "Y": raise mylabel if ok == "n" or ok == "N": break print "more processing" Advantages: (1) it's slightly cleaner (no explicit try-except block), and (2) you get a custom-built Exception subclass for each use of nested_break; no need to declare your own Exception subclass each time. A: Introduce a new variable that you'll use as a 'loop breaker'. First assign something to it(False,0, etc.), and then, inside the outer loop, before you break from it, change the value to something else(True,1,...). Once the loop exits make the 'parent' loop check for that value. Let me demonstrate: breaker = False #our mighty loop exiter! while True: while True: if conditionMet: #insert code here... breaker = True break if breaker: # the interesting part! break # <--- ! If you have an infinite loop, this is the only way out; for other loops execution is really a lot faster. This also works if you have many nested loops. You can exit all, or just a few. Endless possibilities! Hope this helped! A: First, you may also consider making the process of getting and validating the input a function; within that function, you can just return the value if its correct, and keep spinning in the while loop if not. This essentially obviates the problem you solved, and can usually be applied in the more general case (breaking out of multiple loops). If you absolutely must keep this structure in your code, and really don't want to deal with bookkeeping booleans... You may also use goto in the following way (using an April Fools module from here): #import the stuff from goto import goto, label while True: #snip: print out current state while True: ok = get_input("Is this ok? (y/n)") if ok == "y" or ok == "Y": goto .breakall if ok == "n" or ok == "N": break #do more processing with menus and stuff label .breakall I know, I know, "thou shalt not use goto" and all that, but it works well in strange cases like this. A: To break out of multiple nested loops, without refactoring into a function, make use of a "simulated goto statement" with the built-in StopIteration exception: try: for outer in range(100): for inner in range(100): if break_early(): raise StopIteration except StopIteration: pass See this discussion on the use of goto statements for breaking out of nested loops. A: keeplooping = True while keeplooping: # Do stuff while keeplooping: # Do some other stuff if finisheddoingstuff(): keeplooping = False or something like that. You could set a variable in the inner loop, and check it in the outer loop immediately after the inner loop exits, breaking if appropriate. I kind of like the GOTO method, provided you don't mind using an April Fool's joke module - it’s not Pythonic, but it does make sense. A: This isn't the prettiest way to do it, but in my opinion, it's the best way. def loop(): while True: #snip: print out current state while True: ok = get_input("Is this ok? (y/n)") if ok == "y" or ok == "Y": return if ok == "n" or ok == "N": break #do more processing with menus and stuff I'm pretty sure you could work out something using recursion here as well, but I don't know if that's a good option for you. A: Keep looping if two conditions are true. I think this is a more Pythonic way: dejaVu = True while dejaVu: while True: ok = raw_input("Is this ok? (y/n)") if ok == "y" or ok == "Y" or ok == "n" or ok == "N": dejaVu = False break A: There is no way to do this from a language level. Some languages have a goto others have a break that takes an argument, python does not. The best options are: Set a flag which is checked by the outer loop, or set the outer loops condition. Put the loop in a function and use return to break out of all the loops at once. Reformulate your logic. Credit goes to Vivek Nagarajan, Programmer since 1987 Using Function def doMywork(data): for i in data: for e in i: return Using flag is_break = False for i in data: if is_break: break # outer loop break for e in i: is_break = True break # inner loop break A: Factor your loop logic into an iterator that yields the loop variables and returns when done -- here is a simple one that lays out images in rows/columns until we're out of images or out of places to put them: def it(rows, cols, images): i = 0 for r in xrange(rows): for c in xrange(cols): if i >= len(images): return yield r, c, images[i] i += 1 for r, c, image in it(rows=4, cols=4, images=['a.jpg', 'b.jpg', 'c.jpg']): ... do something with r, c, image ... This has the advantage of splitting up the complicated loop logic and the processing... A: There is a hidden trick in the Python while ... else structure which can be used to simulate the double break without much code changes/additions. In essence if the while condition is false, the else block is triggered. Neither exceptions, continue or break trigger the else block. For more information see answers to "Else clause on Python while statement", or Python doc on while (v2.7). while True: #snip: print out current state ok = "" while ok != "y" and ok != "n": ok = get_input("Is this ok? (y/n)") if ok == "n" or ok == "N": break # Breaks out of inner loop, skipping else else: break # Breaks out of outer loop #do more processing with menus and stuff The only downside is that you need to move the double breaking condition into the while condition (or add a flag variable). Variations of this exists also for the for loop, where the else block is triggered after loop completion. A: An easy way to turn multiple loops into a single, breakable loop is to use numpy.ndindex for i in range(n): for j in range(n): val = x[i, j] break # still inside the outer loop! for i, j in np.ndindex(n, n): val = x[i, j] break # you left the only loop there was! You do have to index into your objects, as opposed to being able to iterate through the values explicitly, but at least in simple cases it seems to be approximately 2-20 times simpler than most of the answers suggested. A: In this case, as pointed out by others as well, functional decomposition is the way to go. Code in Python 3: def user_confirms(): while True: answer = input("Is this OK? (y/n) ").strip().lower() if answer in "yn": return answer == "y" def main(): while True: # do stuff if user_confirms(): break A: Another way of reducing your iteration to a single-level loop would be via the use of generators as also specified in the python reference for i, j in ((i, j) for i in A for j in B): print(i , j) if (some_condition): break You could scale it up to any number of levels for the loop The downside is that you can no longer break only a single level. It's all or nothing. Another downside is that it doesn't work with a while loop. I originally wanted to post this answer on Python - `break` out of all loops but unfortunately that's closed as a duplicate of this one A: I'd like to remind you that functions in Python can be created right in the middle of the code and can access the surrounding variables transparently for reading and with nonlocal or global declaration for writing. So you can use a function as a "breakable control structure", defining a place you want to return to: def is_prime(number): foo = bar = number def return_here(): nonlocal foo, bar init_bar = bar while foo > 0: bar = init_bar while bar >= foo: if foo*bar == number: return bar -= 1 foo -= 1 return_here() if foo == 1: print(number, 'is prime') else: print(number, '=', bar, '*', foo) >>> is_prime(67) 67 is prime >>> is_prime(117) 117 = 13 * 9 >>> is_prime(16) 16 = 4 * 4 A: By using a function: def myloop(): for i in range(1,6,1): # 1st loop print('i:',i) for j in range(1,11,2): # 2nd loop print(' i, j:' ,i, j) for k in range(1,21,4): # 3rd loop print(' i,j,k:', i,j,k) if i%3==0 and j%3==0 and k%3==0: return # getting out of all loops myloop() Try running the above codes by commenting out the return as well. Without using any function: done = False for i in range(1,6,1): # 1st loop print('i:', i) for j in range(1,11,2): # 2nd loop print(' i, j:' ,i, j) for k in range(1,21,4): # 3rd loop print(' i,j,k:', i,j,k) if i%3==0 and j%3==0 and k%3==0: done = True break # breaking from 3rd loop if done: break # breaking from 2nd loop if done: break # breaking from 1st loop Now, run the above codes as is first and then try running by commenting out each line containing break one at a time from the bottom. A: Try using an infinite generator. from itertools import repeat inputs = (get_input("Is this ok? (y/n)") for _ in repeat(None)) response = (i.lower()=="y" for i in inputs if i.lower() in ("y", "n")) while True: #snip: print out current state if next(response): break #do more processing with menus and stuff A: # this version uses a level counter to choose how far to break out break_levels = 0 while True: # snip: print out current state while True: ok = get_input("Is this ok? (y/n)") if ok == "y" or ok == "Y": break_levels = 1 # how far nested, excluding this break break if ok == "n" or ok == "N": break # normal break if break_levels: break_levels -= 1 break # pop another level if break_levels: break_levels -= 1 break # ...and so on A: # this version breaks up to a certain label break_label = None while True: # snip: print out current state while True: ok = get_input("Is this ok? (y/n)") if ok == "y" or ok == "Y": break_label = "outer" # specify label to break to break if ok == "n" or ok == "N": break if break_label: if break_label != "inner": break # propagate up break_label = None # we have arrived! if break_label: if break_label != "outer": break # propagate up break_label = None # we have arrived! #do more processing with menus and stuff A: Solutions in two ways With an example: Are these two matrices equal/same? matrix1 and matrix2 are the same size, n, two-dimensional matrices. First solution, without a function same_matrices = True inner_loop_broken_once = False n = len(matrix1) for i in range(n): for j in range(n): if matrix1[i][j] != matrix2[i][j]: same_matrices = False inner_loop_broken_once = True break if inner_loop_broken_once: break Second solution, with a function This is the final solution for my case. def are_two_matrices_the_same (matrix1, matrix2): n = len(matrix1) for i in range(n): for j in range(n): if matrix1[i][j] != matrix2[i][j]: return False return True A: Here's an implementation that seems to work: break_ = False for i in range(10): if break_: break for j in range(10): if j == 3: break_ = True break else: print(i, j) The only draw back is that you have to define break_ before the loops. A: What I would personally do is use a boolean that toggles when I am ready to break out the outer loop. For example while True: #snip: print out current state quit = False while True: ok = input("Is this ok? (y/n)") if ok.lower() == "y": quit = True break # this should work now :-) if ok.lower() == "n": quit = True break # This should work too :-) if quit: break #do more processing with menus and stuff A: My reason for coming here is that i had an outer loop and an inner loop like so: for x in array: for y in dont_use_these_values: if x.value==y: array.remove(x) # fixed, was array.pop(x) in my original answer continue do some other stuff with x As you can see, it won't actually go to the next x, but will go to the next y instead. what i found to solve this simply was to run through the array twice instead: for x in array: for y in dont_use_these_values: if x.value==y: array.remove(x) # fixed, was array.pop(x) in my original answer continue for x in array: do some other stuff with x I know this was a specific case of OP's question, but I am posting it in the hope that it will help someone think about their problem differently while keeping things simple. A: probably little trick like below will do if not prefer to refactorial into function added 1 break_level variable to control the while loop condition break_level = 0 # while break_level < 3: # if we have another level of nested loop here while break_level < 2: #snip: print out current state while break_level < 1: ok = get_input("Is this ok? (y/n)") if ok == "y" or ok == "Y": break_level = 2 # break 2 level if ok == "n" or ok == "N": break_level = 1 # break 1 level A: You can define a variable( for example break_statement ), then change it to a different value when two-break condition occurs and use it in if statement to break from second loop also. while True: break_statement=0 while True: ok = raw_input("Is this ok? (y/n)") if ok == "n" or ok == "N": break if ok == "y" or ok == "Y": break_statement=1 break if break_statement==1: break A: Trying to minimal changes to the OP's question, I just added a flag before breaking the 1st for loop and check that flag on the outer loop to see if we need to brake once again. break_2 = False while True: # Snip: print out current state if break_2: break while True: ok = get_input("Is this ok? (y/n)") if ok.lower() == "y": break_2 = True if break_2: break if ok.lower() == "n": break # Do more processing with menus and stuff A: I came across this recently and, wanting to avoid a duplicate return statement, which can conceal logical errors, looked at @yak's idea. This works well within nested for loops but is not very elegant. An alternative is to check for the condition before the next loop: b = None for a in range(10): if something(a, b): # should never = True if b is None break for b in range(20): pass This might not work everywhere but is adaptable and, if required, has the advantage of allowing the condition to be duplicated rather than a potential result. A: while True: # Snip: print out current state while True: ok = get_input("Is this ok? (y/n)") if ok.lower() == "y": break_2 = True if ok.lower() == "n": break if break_2: break A: If you just need to test an edge case inside a complex nest of for loops, you can throw in a 1/0 to raise an exception. I promise I won't tell anyone. This comes in handy when you quickly want to test a single iteration of a deeply nested for loop, and you don't want to track down a large amount of break statements or comment out a significant amount of code. Yes, you could wrap it in a function and use return, but in some contexts that can be infeasibly cumbersome. example for entry level programmers: for i in first_iter: for j in second_iter: for k in third_iter: print(i_want_to_run_this_once_and_stop_executing(i,j,k)) 1/0 code_that_takes_a_long_time() expensive_code() in big jupyter notebook scripts that do some heavy data pre-processing, this is especially handy.
How can I break out of multiple loops?
Given the following code (that doesn't work): while True: # Snip: print out current state while True: ok = get_input("Is this ok? (y/n)") if ok.lower() == "y": break 2 # This doesn't work :( if ok.lower() == "n": break # Do more processing with menus and stuff Is there a way to make this work? Or do I have do one check to break out of the input loop, then another, more limited, check in the outside loop to break out all together if the user is satisfied?
[ "My first instinct would be to refactor the nested loop into a function and use return to break out. \n", "Here's another approach that is short. The disadvantage is that you can only break the outer loop, but sometimes it's exactly what you want.\nfor a in xrange(10):\n for b in xrange(20):\n if something(a, b):\n # Break the inner loop...\n break\n else:\n # Continue if the inner loop wasn't broken.\n continue\n # Inner loop was broken, break the outer.\n break\n\nThis uses the for / else construct explained at: Why does python use 'else' after for and while loops?\nKey insight: It only seems as if the outer loop always breaks. But if the inner loop doesn't break, the outer loop won't either. \nThe continue statement is the magic here. It's in the for-else clause. By definition that happens if there's no inner break. In that situation continue neatly circumvents the outer break.\n", "PEP 3136 proposes labeled break/continue. Guido rejected it because \"code so complicated to require this feature is very rare\". The PEP does mention some workarounds, though (such as the exception technique), while Guido feels refactoring to use return will be simpler in most cases.\n", "First, ordinary logic is helpful. \nIf, for some reason, the terminating conditions can't be worked out, exceptions are a fall-back plan. \nclass GetOutOfLoop( Exception ):\n pass\n\ntry:\n done= False\n while not done:\n isok= False\n while not (done or isok):\n ok = get_input(\"Is this ok? (y/n)\")\n if ok in (\"y\", \"Y\") or ok in (\"n\", \"N\") : \n done= True # probably better\n raise GetOutOfLoop\n # other stuff\nexcept GetOutOfLoop:\n pass\n\nFor this specific example, an exception may not be necessary. \nOn other other hand, we often have \"Y\", \"N\" and \"Q\" options in character-mode applications. For the \"Q\" option, we want an immediate exit. That's more exceptional.\n", "I tend to agree that refactoring into a function is usually the best approach for this sort of situation, but for when you really need to break out of nested loops, here's an interesting variant of the exception-raising approach that @S.Lott described. It uses Python's with statement to make the exception raising look a bit nicer. Define a new context manager (you only have to do this once) with:\nfrom contextlib import contextmanager\n@contextmanager\ndef nested_break():\n class NestedBreakException(Exception):\n pass\n try:\n yield NestedBreakException\n except NestedBreakException:\n pass\n\nNow you can use this context manager as follows:\nwith nested_break() as mylabel:\n while True:\n print \"current state\"\n while True:\n ok = raw_input(\"Is this ok? (y/n)\")\n if ok == \"y\" or ok == \"Y\": raise mylabel\n if ok == \"n\" or ok == \"N\": break\n print \"more processing\"\n\nAdvantages: (1) it's slightly cleaner (no explicit try-except block), and (2) you get a custom-built Exception subclass for each use of nested_break; no need to declare your own Exception subclass each time.\n", "Introduce a new variable that you'll use as a 'loop breaker'. First assign something to it(False,0, etc.), and then, inside the outer loop, before you break from it, change the value to something else(True,1,...). Once the loop exits make the 'parent' loop check for that value. Let me demonstrate:\nbreaker = False #our mighty loop exiter!\nwhile True:\n while True:\n if conditionMet:\n #insert code here...\n breaker = True \n break\n if breaker: # the interesting part!\n break # <--- !\n\nIf you have an infinite loop, this is the only way out; for other loops execution is really a lot faster. This also works if you have many nested loops. You can exit all, or just a few. Endless possibilities! Hope this helped!\n", "First, you may also consider making the process of getting and validating the input a function; within that function, you can just return the value if its correct, and keep spinning in the while loop if not. This essentially obviates the problem you solved, and can usually be applied in the more general case (breaking out of multiple loops). If you absolutely must keep this structure in your code, and really don't want to deal with bookkeeping booleans...\nYou may also use goto in the following way (using an April Fools module from here):\n#import the stuff\nfrom goto import goto, label\n\nwhile True:\n #snip: print out current state\n while True:\n ok = get_input(\"Is this ok? (y/n)\")\n if ok == \"y\" or ok == \"Y\": goto .breakall\n if ok == \"n\" or ok == \"N\": break\n #do more processing with menus and stuff\nlabel .breakall\n\nI know, I know, \"thou shalt not use goto\" and all that, but it works well in strange cases like this.\n", "To break out of multiple nested loops, without refactoring into a function, make use of a \"simulated goto statement\" with the built-in StopIteration exception:\ntry:\n for outer in range(100):\n for inner in range(100):\n if break_early():\n raise StopIteration\n\nexcept StopIteration: pass\n\nSee this discussion on the use of goto statements for breaking out of nested loops.\n", "keeplooping = True\nwhile keeplooping:\n # Do stuff\n while keeplooping:\n # Do some other stuff\n if finisheddoingstuff():\n keeplooping = False\n\nor something like that.\nYou could set a variable in the inner loop, and check it in the outer loop immediately after the inner loop exits, breaking if appropriate. I kind of like the GOTO method, provided you don't mind using an April Fool's joke module - it’s not Pythonic, but it does make sense.\n", "This isn't the prettiest way to do it, but in my opinion, it's the best way.\ndef loop():\n while True:\n #snip: print out current state\n while True:\n ok = get_input(\"Is this ok? (y/n)\")\n if ok == \"y\" or ok == \"Y\": return\n if ok == \"n\" or ok == \"N\": break\n #do more processing with menus and stuff\n\nI'm pretty sure you could work out something using recursion here as well, but I don't know if that's a good option for you.\n", "Keep looping if two conditions are true.\nI think this is a more Pythonic way:\ndejaVu = True\n\nwhile dejaVu:\n while True:\n ok = raw_input(\"Is this ok? (y/n)\")\n if ok == \"y\" or ok == \"Y\" or ok == \"n\" or ok == \"N\":\n dejaVu = False\n break\n\n", "\nThere is no way to do this from a language level. Some languages have\n a goto others have a break that takes an argument, python does not.\nThe best options are:\n\nSet a flag which is checked by the outer loop, or set the outer\n loops condition.\nPut the loop in a function and use return to break out of all the loops at once.\nReformulate your logic.\n\n\nCredit goes to Vivek Nagarajan, Programmer since 1987\n\nUsing Function \ndef doMywork(data):\n for i in data:\n for e in i:\n return \n\nUsing flag\nis_break = False\nfor i in data:\n if is_break:\n break # outer loop break\n for e in i:\n is_break = True\n break # inner loop break\n\n", "Factor your loop logic into an iterator that yields the loop variables and returns when done -- here is a simple one that lays out images in rows/columns until we're out of images or out of places to put them:\ndef it(rows, cols, images):\n i = 0\n for r in xrange(rows):\n for c in xrange(cols):\n if i >= len(images):\n return\n yield r, c, images[i]\n i += 1 \n\nfor r, c, image in it(rows=4, cols=4, images=['a.jpg', 'b.jpg', 'c.jpg']):\n ... do something with r, c, image ...\n\nThis has the advantage of splitting up the complicated loop logic and the processing...\n", "There is a hidden trick in the Python while ... else structure which can be used to simulate the double break without much code changes/additions. In essence if the while condition is false, the else block is triggered. Neither exceptions, continue or break trigger the else block. For more information see answers to \"Else clause on Python while statement\", or Python doc on while (v2.7).\nwhile True:\n #snip: print out current state\n ok = \"\"\n while ok != \"y\" and ok != \"n\":\n ok = get_input(\"Is this ok? (y/n)\")\n if ok == \"n\" or ok == \"N\":\n break # Breaks out of inner loop, skipping else\n\n else:\n break # Breaks out of outer loop\n\n #do more processing with menus and stuff\n\nThe only downside is that you need to move the double breaking condition into the while condition (or add a flag variable). Variations of this exists also for the for loop, where the else block is triggered after loop completion.\n", "An easy way to turn multiple loops into a single, breakable loop is to use numpy.ndindex\nfor i in range(n):\n for j in range(n):\n val = x[i, j]\n break # still inside the outer loop!\n\nfor i, j in np.ndindex(n, n):\n val = x[i, j]\n break # you left the only loop there was!\n\nYou do have to index into your objects, as opposed to being able to iterate through the values explicitly, but at least in simple cases it seems to be approximately 2-20 times simpler than most of the answers suggested.\n", "In this case, as pointed out by others as well, functional decomposition is the way to go. Code in Python 3:\ndef user_confirms():\n while True:\n answer = input(\"Is this OK? (y/n) \").strip().lower()\n if answer in \"yn\":\n return answer == \"y\"\n\ndef main():\n while True:\n # do stuff\n if user_confirms():\n break\n\n", "Another way of reducing your iteration to a single-level loop would be via the use of generators as also specified in the python reference\nfor i, j in ((i, j) for i in A for j in B):\n print(i , j)\n if (some_condition):\n break\n\nYou could scale it up to any number of levels for the loop\nThe downside is that you can no longer break only a single level. It's all or nothing.\nAnother downside is that it doesn't work with a while loop. I originally wanted to post this answer on Python - `break` out of all loops but unfortunately that's closed as a duplicate of this one \n", "I'd like to remind you that functions in Python can be created right in the middle of the code and can access the surrounding variables transparently for reading and with nonlocal or global declaration for writing.\nSo you can use a function as a \"breakable control structure\", defining a place you want to return to:\ndef is_prime(number):\n\n foo = bar = number\n\n def return_here():\n nonlocal foo, bar\n init_bar = bar\n while foo > 0:\n bar = init_bar\n while bar >= foo:\n if foo*bar == number:\n return\n bar -= 1\n foo -= 1\n\n return_here()\n\n if foo == 1:\n print(number, 'is prime')\n else:\n print(number, '=', bar, '*', foo)\n\n\n>>> is_prime(67)\n67 is prime\n>>> is_prime(117)\n117 = 13 * 9\n>>> is_prime(16)\n16 = 4 * 4\n\n", "By using a function:\ndef myloop():\n for i in range(1,6,1): # 1st loop\n print('i:',i)\n for j in range(1,11,2): # 2nd loop\n print(' i, j:' ,i, j)\n for k in range(1,21,4): # 3rd loop\n print(' i,j,k:', i,j,k)\n if i%3==0 and j%3==0 and k%3==0:\n return # getting out of all loops\n\nmyloop()\n\nTry running the above codes by commenting out the return as well.\nWithout using any function:\ndone = False\nfor i in range(1,6,1): # 1st loop\n print('i:', i)\n for j in range(1,11,2): # 2nd loop\n print(' i, j:' ,i, j)\n for k in range(1,21,4): # 3rd loop\n print(' i,j,k:', i,j,k)\n if i%3==0 and j%3==0 and k%3==0:\n done = True\n break # breaking from 3rd loop\n if done: break # breaking from 2nd loop\n if done: break # breaking from 1st loop\n\nNow, run the above codes as is first and then try running by commenting out each line containing break one at a time from the bottom.\n", "Try using an infinite generator.\nfrom itertools import repeat\ninputs = (get_input(\"Is this ok? (y/n)\") for _ in repeat(None))\nresponse = (i.lower()==\"y\" for i in inputs if i.lower() in (\"y\", \"n\"))\n\nwhile True:\n #snip: print out current state\n if next(response):\n break\n #do more processing with menus and stuff\n\n", "# this version uses a level counter to choose how far to break out\n\nbreak_levels = 0\nwhile True:\n # snip: print out current state\n while True:\n ok = get_input(\"Is this ok? (y/n)\")\n if ok == \"y\" or ok == \"Y\":\n break_levels = 1 # how far nested, excluding this break\n break\n if ok == \"n\" or ok == \"N\":\n break # normal break\n if break_levels:\n break_levels -= 1\n break # pop another level\nif break_levels:\n break_levels -= 1\n break\n\n# ...and so on\n\n", "# this version breaks up to a certain label\n\nbreak_label = None\nwhile True:\n # snip: print out current state\n while True:\n ok = get_input(\"Is this ok? (y/n)\")\n if ok == \"y\" or ok == \"Y\":\n break_label = \"outer\" # specify label to break to\n break\n if ok == \"n\" or ok == \"N\":\n break\n if break_label:\n if break_label != \"inner\":\n break # propagate up\n break_label = None # we have arrived!\nif break_label:\n if break_label != \"outer\":\n break # propagate up\n break_label = None # we have arrived!\n\n#do more processing with menus and stuff\n\n", "Solutions in two ways\nWith an example: Are these two matrices equal/same? \nmatrix1 and matrix2 are the same size, n, two-dimensional matrices.\nFirst solution, without a function\nsame_matrices = True\ninner_loop_broken_once = False\nn = len(matrix1)\n\nfor i in range(n):\n for j in range(n):\n\n if matrix1[i][j] != matrix2[i][j]:\n same_matrices = False\n inner_loop_broken_once = True\n break\n\n if inner_loop_broken_once:\n break\n\n\nSecond solution, with a function\nThis is the final solution for my case.\ndef are_two_matrices_the_same (matrix1, matrix2):\n n = len(matrix1)\n for i in range(n):\n for j in range(n):\n if matrix1[i][j] != matrix2[i][j]:\n return False\n return True\n\n", "Here's an implementation that seems to work:\nbreak_ = False\nfor i in range(10):\n if break_:\n break\n for j in range(10):\n if j == 3:\n break_ = True\n break\n else:\n print(i, j)\n\nThe only draw back is that you have to define break_ before the loops.\n", "What I would personally do is use a boolean that toggles when I am ready to break out the outer loop. For example\nwhile True:\n #snip: print out current state\n quit = False\n while True:\n ok = input(\"Is this ok? (y/n)\")\n if ok.lower() == \"y\":\n quit = True\n break # this should work now :-)\n if ok.lower() == \"n\":\n quit = True\n break # This should work too :-)\n if quit:\n break\n #do more processing with menus and stuff\n\n", "My reason for coming here is that i had an outer loop and an inner loop like so:\nfor x in array:\n for y in dont_use_these_values:\n if x.value==y:\n array.remove(x) # fixed, was array.pop(x) in my original answer\n continue\n\n do some other stuff with x\n\nAs you can see, it won't actually go to the next x, but will go to the next y instead.\nwhat i found to solve this simply was to run through the array twice instead:\nfor x in array:\n for y in dont_use_these_values:\n if x.value==y:\n array.remove(x) # fixed, was array.pop(x) in my original answer\n continue\n\nfor x in array:\n do some other stuff with x\n\nI know this was a specific case of OP's question, but I am posting it in the hope that it will help someone think about their problem differently while keeping things simple.\n", "probably little trick like below will do if not prefer to refactorial into function\nadded 1 break_level variable to control the while loop condition\nbreak_level = 0\n# while break_level < 3: # if we have another level of nested loop here\nwhile break_level < 2:\n #snip: print out current state\n while break_level < 1:\n ok = get_input(\"Is this ok? (y/n)\")\n if ok == \"y\" or ok == \"Y\": break_level = 2 # break 2 level\n if ok == \"n\" or ok == \"N\": break_level = 1 # break 1 level\n\n", "You can define a variable( for example break_statement ), then change it to a different value when two-break condition occurs and use it in if statement to break from second loop also. \nwhile True:\n break_statement=0\n while True:\n ok = raw_input(\"Is this ok? (y/n)\")\n if ok == \"n\" or ok == \"N\": \n break\n if ok == \"y\" or ok == \"Y\": \n break_statement=1\n break\n if break_statement==1:\n break\n\n", "Trying to minimal changes to the OP's question, I just added a flag before breaking the 1st for loop and check that flag on the outer loop to see if we need to brake once again.\nbreak_2 = False\nwhile True:\n # Snip: print out current state\n if break_2: break\n while True:\n ok = get_input(\"Is this ok? (y/n)\")\n if ok.lower() == \"y\": break_2 = True\n if break_2: break\n if ok.lower() == \"n\": break\n # Do more processing with menus and stuff\n\n", "I came across this recently and, wanting to avoid a duplicate return statement, which can conceal logical errors, looked at @yak's idea. This works well within nested for loops but is not very elegant. An alternative is to check for the condition before the next loop:\nb = None\nfor a in range(10):\n if something(a, b): # should never = True if b is None\n break\n for b in range(20):\n pass\n\nThis might not work everywhere but is adaptable and, if required, has the advantage of allowing the condition to be duplicated rather than a potential result.\n", "while True:\n # Snip: print out current state\n while True:\n ok = get_input(\"Is this ok? (y/n)\")\n if ok.lower() == \"y\": \n break_2 = True\n if ok.lower() == \"n\": \n break\n if break_2:\n break\n\n", "If you just need to test an edge case inside a complex nest of for loops, you can throw in a 1/0 to raise an exception. I promise I won't tell anyone. This comes in handy when you quickly want to test a single iteration of a deeply nested for loop, and you don't want to track down a large amount of break statements or comment out a significant amount of code.\nYes, you could wrap it in a function and use return, but in some contexts that can be infeasibly cumbersome.\nexample for entry level programmers:\nfor i in first_iter:\n for j in second_iter:\n for k in third_iter:\n print(i_want_to_run_this_once_and_stop_executing(i,j,k))\n 1/0\n code_that_takes_a_long_time()\n expensive_code()\n\nin big jupyter notebook scripts that do some heavy data pre-processing, this is especially handy.\n" ]
[ 698, 450, 178, 149, 63, 63, 46, 42, 21, 13, 12, 9, 8, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0 ]
[ "Hopefully this helps:\nx = True\ny = True\nwhile x == True:\n while y == True:\n ok = get_input(\"Is this ok? (y/n)\") \n if ok == \"y\" or ok == \"Y\":\n x,y = False,False #breaks from both loops\n if ok == \"n\" or ok == \"N\": \n break #breaks from just one\n\n", "break for outer and inner while loops:\nwhile True:\n while True:\n print('Breaks inner \"while\" loop')\n break # Here\n print('Breaks outer \"while\" loop')\n break # Here\n\nOr, break for outer and inner while loops with if statement:\nwhile True:\n while True:\n if True:\n print('Breaks inner \"while\" loop')\n break # Here\n print('Breaks outer \"while\" loop')\n break # Here\n\nOutput:\nBreaks inner \"while\" loop\nBreaks outer \"while\" loop\n\nbreak for outer and inner for loops:\nfor _ in iter(int, 1):\n for _ in iter(int, 1):\n print('Breaks inner \"for\" loop')\n break # Here\n print('Breaks outer \"for\" loop')\n break # Here\n\nOr, break for outer and inner for loops with if statement:\nfor _ in iter(int, 1):\n for _ in iter(int, 1):\n if True:\n print('Breaks inner \"for\" loop')\n break # Here\n print('Breaks outer \"for\" loop')\n break # Here\n\nOutput:\nBreaks inner \"for\" loop\nBreaks outer \"for\" loop\n\n", "Similar like the one before, but more compact.\n(Booleans are just numbers)\nbreaker = False #our mighty loop exiter!\nwhile True:\n while True:\n ok = get_input(\"Is this ok? (y/n)\")\n breaker+= (ok.lower() == \"y\")\n break\n\n if breaker: # the interesting part!\n break # <--- !\n\n", "Since this question has become a standard question for breaking into a particular loop, I would like to give my answer with example using Exception. \nAlthough there exists no label named breaking of loop in multipally looped construct, we can make use of User-defined Exceptions to break into a particular loop of our choice. Consider the following example where let us print all numbers upto 4 digits in base-6 numbering system: \nclass BreakLoop(Exception):\n def __init__(self, counter):\n Exception.__init__(self, 'Exception 1')\n self.counter = counter\n\nfor counter1 in range(6): # Make it 1000\n try:\n thousand = counter1 * 1000\n for counter2 in range(6): # Make it 100\n try:\n hundred = counter2 * 100\n for counter3 in range(6): # Make it 10\n try:\n ten = counter3 * 10\n for counter4 in range(6):\n try:\n unit = counter4\n value = thousand + hundred + ten + unit\n if unit == 4 :\n raise BreakLoop(4) # Don't break from loop\n if ten == 30: \n raise BreakLoop(3) # Break into loop 3\n if hundred == 500:\n raise BreakLoop(2) # Break into loop 2\n if thousand == 2000:\n raise BreakLoop(1) # Break into loop 1\n\n print('{:04d}'.format(value))\n except BreakLoop as bl:\n if bl.counter != 4:\n raise bl\n except BreakLoop as bl:\n if bl.counter != 3:\n raise bl\n except BreakLoop as bl:\n if bl.counter != 2:\n raise bl\n except BreakLoop as bl:\n pass\n\nWhen we print the output, we will never get any value whose unit place is with 4. In that case, we don't break from any loop as BreakLoop(4) is raised and caught in same loop. Similarly, whenever ten place is having 3, we break into third loop using BreakLoop(3). Whenever hundred place is having 5, we break into second loop using BreakLoop(2) and whenver the thousand place is having 2, we break into first loop using BreakLoop(1).\nIn short, raise your Exception (in-built or user defined) in the inner loops, and catch it in the loop from where you want to resume your control to. If you want to break from all loops, catch the Exception outside all the loops. (I have not shown this case in example).\n", "The way I solve this is by defining a variable that is referenced to determine if you break to the next level or not. In this example, this variable is called 'shouldbreak'.\nVariable_That_Counts_To_Three=1\nwhile 1==1:\n shouldbreak='no'\n Variable_That_Counts_To_Five=0\n while 2==2:\n Variable_That_Counts_To_Five+=1\n print(Variable_That_Counts_To_Five)\n if Variable_That_Counts_To_Five == 5:\n if Variable_That_Counts_To_Three == 3:\n shouldbreak='yes'\n break\n print('Three Counter = ' + str(Variable_That_Counts_To_Three))\n Variable_That_Counts_To_Three+=1\n if shouldbreak == 'yes':\n break\n\nprint('''\nThis breaks out of two loops!''')\n\nThis gives a lot of control over how exactly you want the program to break, allowing you to choose when you want to break and how many levels to go down.\n" ]
[ -1, -1, -3, -3, -4 ]
[ "break", "control_flow", "nested_loops", "python" ]
stackoverflow_0000189645_break_control_flow_nested_loops_python.txt
Q: Pyspark - Convert to Timestamp Spark version : 2.1 I'm trying to convert a string datetime column to utc timestamp with the format yyyy-mm-ddThh:mm:ss I first start by changing the format of the string column to yyyy-mm-ddThh:mm:ss and then convert it to timestamp type. Later I would convert the timestamp to UTC using to_utc_timestamp function. df.select( f.to_timestamp( f.date_format(f.col("time"), "yyyy-MM-dd'T'HH:mm:ss"), "yyyy-MM-dd'T'HH:mm:ss" ) ).show(5, False) The date_format works fine by giving me the correct format. But, when I do to_timestamp on top of that result, the format changes to yyyy-MM-dd HH:mm:ss, when it should instead be yyyy-MM-dd'T'HH:mm:ss. Why does this happen? Could someone tell me how I could retain the format given by date_format? What should I do? A: The function to_timestamp returns a string to a timestamp, with the format yyyy-MM-dd HH:mm:ss. The second argument is used to define the format of the DateTime in the string you are trying to parse. You can see a couple of examples in the official documentation. A: The code should be like this, just look at the single 'd' part here, and this is tricky in many cases. data= data.withColumn('date', to_timestamp(col('date'), 'yyyy/MM/d'))
Pyspark - Convert to Timestamp
Spark version : 2.1 I'm trying to convert a string datetime column to utc timestamp with the format yyyy-mm-ddThh:mm:ss I first start by changing the format of the string column to yyyy-mm-ddThh:mm:ss and then convert it to timestamp type. Later I would convert the timestamp to UTC using to_utc_timestamp function. df.select( f.to_timestamp( f.date_format(f.col("time"), "yyyy-MM-dd'T'HH:mm:ss"), "yyyy-MM-dd'T'HH:mm:ss" ) ).show(5, False) The date_format works fine by giving me the correct format. But, when I do to_timestamp on top of that result, the format changes to yyyy-MM-dd HH:mm:ss, when it should instead be yyyy-MM-dd'T'HH:mm:ss. Why does this happen? Could someone tell me how I could retain the format given by date_format? What should I do?
[ "The function to_timestamp returns a string to a timestamp, with the format yyyy-MM-dd HH:mm:ss.\nThe second argument is used to define the format of the DateTime in the string you are trying to parse.\nYou can see a couple of examples in the official documentation.\n", "\nThe code should be like this, just look at the single 'd' part here, and this is tricky in many cases.\ndata= data.withColumn('date', to_timestamp(col('date'), 'yyyy/MM/d'))\n\n\n" ]
[ 0, 0 ]
[]
[]
[ "apache_spark", "apache_spark_sql", "date", "pyspark", "python" ]
stackoverflow_0069894719_apache_spark_apache_spark_sql_date_pyspark_python.txt
Q: Error when trying a function that returns the total quantities in the storage unit The goal is to write a function that returns the total quantity of all storage units put together (Madrid, Barcelona and Seville), I do think its better to use a recursion for this problem however i cant seem to work it out! I have this dictionary: Storage = { "Madrid": [ {"name": "pencil", "quantity": 5}, {"name": "cam", "quantity": 11}, {"name": "powder", "quantity": 51} ], "Barcelona": { "Branch 1": [ {"name": "pencil", "quantity": 11}, {"name": "cam", "quantity": 25} ], "Branch 2": [ {"name": "pencil", "quantity": 17}, {"name": "cam", "quantity": 9} ] }, "Seville": { "Branch 1": { "Sub Branch 1": { "Sub sub Branch 1": [ {"name": "powder", "quantity": 11} ] } }, "Branch 2": [ {"name": "pencil", "quantity": 4} ] } } I searched and wrote a lot of codes and this is the one that made the most sense def recursive_sum(n): current_sum = 0 for key in n: if not isinstance(n[key], dict): if not isinstance(n[key], str): current_sum = current_sum + n[key] else: current_sum = current_sum + recursive_sum(n[key]) return current_sum print(recursive_sum(Storage)) but it returns this: Traceback (most recent call last): File "/Users/user/Desktop/pythonProject/main.py", line 85, in <module> print(recursive_sum(Storage)) File "/Users/user/Desktop/pythonProject/main.py", line 79, in recursive_sum current_sum = current_sum + n[key] TypeError: unsupported operand type(s) for +: 'int' and 'list' i searched a lot but i cant seem to understand how am i going to take the values of the list inside the dictionary, am i thinking wrong? Thank you in advance! A: I see two thing going wrong here: You never check for the type "list", which you would need to iterate over Once you are iterating over the list, you will get dictionaries again, of which you need to extract the "quantities" before you try to sum them. I would approach it differently: create an empty output dictionary. Then dig into the source (iterating roughly as you do), and check whether the "quantity" key exists inside the current level. Then check if the corresponding key exists, and add the quantity to the result dictionary. A: Your functions fails when it gets here if not isinstance(n[key], str): current_sum = current_sum + n[key] and n[key] is : [{'name': 'pencil', 'quantity': 5}, {'name': 'cam', 'quantity': 11}, {'name': 'powder', 'quantity': 51}] This can be fixed with a simple loop def recursive_sum(n): current_sum = 0 for key in n: if not isinstance(n[key], dict): if not isinstance(n[key], str): for i in n[key]: current_sum = current_sum + i["quantity"] else: current_sum = current_sum + recursive_sum(n[key]) return current_sum
Error when trying a function that returns the total quantities in the storage unit
The goal is to write a function that returns the total quantity of all storage units put together (Madrid, Barcelona and Seville), I do think its better to use a recursion for this problem however i cant seem to work it out! I have this dictionary: Storage = { "Madrid": [ {"name": "pencil", "quantity": 5}, {"name": "cam", "quantity": 11}, {"name": "powder", "quantity": 51} ], "Barcelona": { "Branch 1": [ {"name": "pencil", "quantity": 11}, {"name": "cam", "quantity": 25} ], "Branch 2": [ {"name": "pencil", "quantity": 17}, {"name": "cam", "quantity": 9} ] }, "Seville": { "Branch 1": { "Sub Branch 1": { "Sub sub Branch 1": [ {"name": "powder", "quantity": 11} ] } }, "Branch 2": [ {"name": "pencil", "quantity": 4} ] } } I searched and wrote a lot of codes and this is the one that made the most sense def recursive_sum(n): current_sum = 0 for key in n: if not isinstance(n[key], dict): if not isinstance(n[key], str): current_sum = current_sum + n[key] else: current_sum = current_sum + recursive_sum(n[key]) return current_sum print(recursive_sum(Storage)) but it returns this: Traceback (most recent call last): File "/Users/user/Desktop/pythonProject/main.py", line 85, in <module> print(recursive_sum(Storage)) File "/Users/user/Desktop/pythonProject/main.py", line 79, in recursive_sum current_sum = current_sum + n[key] TypeError: unsupported operand type(s) for +: 'int' and 'list' i searched a lot but i cant seem to understand how am i going to take the values of the list inside the dictionary, am i thinking wrong? Thank you in advance!
[ "I see two thing going wrong here:\n\nYou never check for the type \"list\", which you would need to iterate over\nOnce you are iterating over the list, you will get dictionaries again, of which you need to extract the \"quantities\" before you try to sum them.\n\nI would approach it differently: create an empty output dictionary. Then dig into the source (iterating roughly as you do), and check whether the \"quantity\" key exists inside the current level. Then check if the corresponding key exists, and add the quantity to the result dictionary.\n", "Your functions fails when it gets here\nif not isinstance(n[key], str):\n current_sum = current_sum + n[key]\n\nand n[key] is :\n[{'name': 'pencil', 'quantity': 5}, {'name': 'cam', 'quantity': 11}, {'name': 'powder', 'quantity': 51}]\n\nThis can be fixed with a simple loop\ndef recursive_sum(n):\ncurrent_sum = 0\nfor key in n:\n if not isinstance(n[key], dict):\n if not isinstance(n[key], str):\n for i in n[key]:\n current_sum = current_sum + i[\"quantity\"]\n \n else:\n current_sum = current_sum + recursive_sum(n[key])\nreturn current_sum\n\n" ]
[ 1, 0 ]
[]
[]
[ "dictionary", "list", "nested_lists", "python" ]
stackoverflow_0074600439_dictionary_list_nested_lists_python.txt
Q: Find class name of the function in python I have multiple classes in my code. From input I can extract the function name. But to execute that function I need to know which class is that function belongs to. Is there any way to find the class name of that function (from the list of classes) ? I tried isinstance, but it wont give the class name. And I also tried __class__.name, but it gives class name after we declare it before. Is there any other way for this? A: Not sure if that's what you need, but you can call .__dict__ on a class and then check if the name of your function is present as a key in the returned dictionary. class B: def foo(self):pass B.__dict__ mappingproxy({'__module__': '__main__', 'foo': <function B.foo at 0x7f4861baee80>, '__dict__': <attribute '__dict__' of 'B' objects>, '__weakref__': <attribute '__weakref__' of 'B' objects>, '__doc__': None}) But from the description it seems you have multiple @classmethod, which is most often used to declare customized initializers (sometimes wrongly called "constructors"), otherwise maybe get rid of the class and just declare "normal" functions.
Find class name of the function in python
I have multiple classes in my code. From input I can extract the function name. But to execute that function I need to know which class is that function belongs to. Is there any way to find the class name of that function (from the list of classes) ? I tried isinstance, but it wont give the class name. And I also tried __class__.name, but it gives class name after we declare it before. Is there any other way for this?
[ "Not sure if that's what you need, but you can call .__dict__ on a class and then check if the name of your function is present as a key in the returned dictionary.\nclass B:\n def foo(self):pass\n\n B.__dict__\nmappingproxy({'__module__': '__main__', 'foo': <function B.foo at 0x7f4861baee80>, '__dict__': <attribute '__dict__' of 'B' objects>, '__weakref__': <attribute '__weakref__' of 'B' objects>, '__doc__': None})\n\nBut from the description it seems you have multiple @classmethod, which is most often used to declare customized initializers (sometimes wrongly called \"constructors\"), otherwise maybe get rid of the class and just declare \"normal\" functions.\n" ]
[ 0 ]
[]
[]
[ "class", "python", "python_class" ]
stackoverflow_0074600482_class_python_python_class.txt
Q: How to filter the dates based on range for datetime in django views.py def index(request): if request.method == "POST": from_date = request.POST.get("from_date") f_date = datetime.datetime.strptime(from_date,'%Y-%m-%d') print(f_date) to_date = request.POST.get("to_date") t_date = datetime.datetime.strptime(to_date, '%Y-%m-%d') print(t_date) global get_records_by_date get_records_by_date = Scrapper.objects.all().filter(Q(start_time__range=f_date),Q(end_time__range=t_date)) print(get_records_by_date) I need to get the dates from the range start time and end time based on datetime field. When I run the script its showing TypeError at / 'datetime.datetime' object is not iterable. Is there any solution for particular issue A: The __range lookup [Django-doc] expects a 2-tuple with the from and to datetime, so: def index(request): if request.method == 'POST': from_date = request.POST.get('from_date') f_date = datetime.datetime.strptime(from_date, '%Y-%m-%d') to_date = request.POST.get('to_date') t_date = datetime.datetime.strptime(to_date, '%Y-%m-%d') get_records_by_date = Scrapper.objects.filter( some_date_field__range=(f_date, t_date) ) # … I would however advise to work with a form to process and clean the input, and not use a global variable: global state, especially in a webserver is a very bad idea. A: You can put dates in the filter to find our range in between the dates. import datetime from_date=datetime.datetime.today().date()-datetime.timedelta(days=29) to_date=datetime.datetime.today().date() Somemodel.objects.filter(date_created__gte=from_date, date_created__lte=to_date)
How to filter the dates based on range for datetime in django
views.py def index(request): if request.method == "POST": from_date = request.POST.get("from_date") f_date = datetime.datetime.strptime(from_date,'%Y-%m-%d') print(f_date) to_date = request.POST.get("to_date") t_date = datetime.datetime.strptime(to_date, '%Y-%m-%d') print(t_date) global get_records_by_date get_records_by_date = Scrapper.objects.all().filter(Q(start_time__range=f_date),Q(end_time__range=t_date)) print(get_records_by_date) I need to get the dates from the range start time and end time based on datetime field. When I run the script its showing TypeError at / 'datetime.datetime' object is not iterable. Is there any solution for particular issue
[ "The __range lookup [Django-doc] expects a 2-tuple with the from and to datetime, so:\ndef index(request):\n if request.method == 'POST':\n from_date = request.POST.get('from_date')\n f_date = datetime.datetime.strptime(from_date, '%Y-%m-%d')\n to_date = request.POST.get('to_date')\n t_date = datetime.datetime.strptime(to_date, '%Y-%m-%d')\n get_records_by_date = Scrapper.objects.filter(\n some_date_field__range=(f_date, t_date)\n )\n # …\nI would however advise to work with a form to process and clean the input, and not use a global variable: global state, especially in a webserver is a very bad idea.\n", "You can put dates in the filter to find our range in between the dates.\nimport datetime\n\nfrom_date=datetime.datetime.today().date()-datetime.timedelta(days=29)\nto_date=datetime.datetime.today().date()\nSomemodel.objects.filter(date_created__gte=from_date, date_created__lte=to_date)\n\n" ]
[ 1, 1 ]
[]
[]
[ "django", "python" ]
stackoverflow_0074599820_django_python.txt
Q: How to save Pandas dataframe into a npz file? I have some dataframes which are loaded from different npz files. I combine all the data into a single dataframe and apply some processing to it. Now I want to save the new combined dataframe into a new npz file. How do I do that? Since the dataframe is large (5000 rows, 30 columns) I would also like to know the most efficient way of doing so. I tried to look over the internet for the solution but the results are about how to convert pandas dataframe to numpy data. A: It seems that the best solution for your problem is to convert your dataframe to a numpy array and afterwards save it. np.savez(file, df.to_numpy()) file has to be a file, in which you want to save your data and df is the dataframe in which you have your data.
How to save Pandas dataframe into a npz file?
I have some dataframes which are loaded from different npz files. I combine all the data into a single dataframe and apply some processing to it. Now I want to save the new combined dataframe into a new npz file. How do I do that? Since the dataframe is large (5000 rows, 30 columns) I would also like to know the most efficient way of doing so. I tried to look over the internet for the solution but the results are about how to convert pandas dataframe to numpy data.
[ "It seems that the best solution for your problem is to convert your dataframe to a numpy array and afterwards save it.\nnp.savez(file, df.to_numpy())\n\nfile has to be a file, in which you want to save your data and df is the dataframe in which you have your data.\n" ]
[ 1 ]
[]
[]
[ "numpy", "pandas", "python" ]
stackoverflow_0074600244_numpy_pandas_python.txt
Q: How to add the message content to the results in Google Pub/Sub? I have the following code, based on Google's official API def publish_messages_with_error_handler(project_id: str = GOOGLE_CLOUD_PROJECT_ID, topic_id: str = GOOGLE_CLOUD_TOPIC_ID, data: List[str] = []) -> dict: # [START pubsub_publish_with_error_handler] """Publishes multiple messages to a Pub/Sub topic with an error handler.""" publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path(project_id, topic_id) publish_futures = [] result = { "succeded": [], "failed": [] } def get_callback(publish_future: pubsub_v1.publisher.futures.Future, data: str) -> Callable[[pubsub_v1.publisher.futures.Future], None]: def callback(publish_future: pubsub_v1.publisher.futures.Future) -> None: try: logger.info(publish_future.result(timeout=0)) except futures.TimeoutError: logger.info(f"Publishing {data} timed out.") return callback if data: for message in data: publish_future = publisher.publish(topic_path, message.encode("utf-8")) publish_future.add_done_callback(get_callback(publish_future, message)) publish_futures.append(publish_future) futures.wait(publish_futures, return_when=futures.ALL_COMPLETED) print(f"Published messages with error handler to {topic_path}.") for future in publish_futures: if future.exception(): result["failed"].append(future.result()) else: result["succeded"].append(future.result()) return result The data variable is just a list of uuid4. If the publish succeeded, I want to append the message's id in results['succeded'], otherwise, I want to append the message's id in results['failed']. How can I achieve that? Thanks in advance. A: Your code is already doing that - captures all success and failure in two separate list and return them as dictionary with keys succeded and failed: # Define a dictionary - 2 keys "succeded" and "failed" with empty list result = { "succeded": [], "failed": []} # append test values to empty list result["succeded"].append("success-uuid") #from future.result() - success result["failed"].append("failed-uuid") #from future.result() - failure result["succeded"].append("868716387") #from future.result() - success result["failed"].append("97234692369") #from future.result() - failure print(result) Output: {'succeded': ['success-uuid', '868716387'], 'failed': ['failed-uuid', '97234692369']}
How to add the message content to the results in Google Pub/Sub?
I have the following code, based on Google's official API def publish_messages_with_error_handler(project_id: str = GOOGLE_CLOUD_PROJECT_ID, topic_id: str = GOOGLE_CLOUD_TOPIC_ID, data: List[str] = []) -> dict: # [START pubsub_publish_with_error_handler] """Publishes multiple messages to a Pub/Sub topic with an error handler.""" publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path(project_id, topic_id) publish_futures = [] result = { "succeded": [], "failed": [] } def get_callback(publish_future: pubsub_v1.publisher.futures.Future, data: str) -> Callable[[pubsub_v1.publisher.futures.Future], None]: def callback(publish_future: pubsub_v1.publisher.futures.Future) -> None: try: logger.info(publish_future.result(timeout=0)) except futures.TimeoutError: logger.info(f"Publishing {data} timed out.") return callback if data: for message in data: publish_future = publisher.publish(topic_path, message.encode("utf-8")) publish_future.add_done_callback(get_callback(publish_future, message)) publish_futures.append(publish_future) futures.wait(publish_futures, return_when=futures.ALL_COMPLETED) print(f"Published messages with error handler to {topic_path}.") for future in publish_futures: if future.exception(): result["failed"].append(future.result()) else: result["succeded"].append(future.result()) return result The data variable is just a list of uuid4. If the publish succeeded, I want to append the message's id in results['succeded'], otherwise, I want to append the message's id in results['failed']. How can I achieve that? Thanks in advance.
[ "Your code is already doing that - captures all success and failure in two separate list and return them as dictionary with keys succeded and failed:\n# Define a dictionary - 2 keys \"succeded\" and \"failed\" with empty list\nresult = { \"succeded\": [], \"failed\": []}\n\n# append test values to empty list\nresult[\"succeded\"].append(\"success-uuid\") #from future.result() - success\nresult[\"failed\"].append(\"failed-uuid\") #from future.result() - failure\n\n\nresult[\"succeded\"].append(\"868716387\") #from future.result() - success\nresult[\"failed\"].append(\"97234692369\") #from future.result() - failure\n\nprint(result)\n\nOutput:\n{'succeded': ['success-uuid', '868716387'], 'failed': ['failed-uuid', '97234692369']}\n\n" ]
[ 0 ]
[]
[]
[ "google_cloud_pubsub", "python" ]
stackoverflow_0074599733_google_cloud_pubsub_python.txt
Q: How to determine feature importance of non linear kernals in SVM I am using following code for feature importance calculation. from matplotlib import pyplot as plt from sklearn import svm def features_importances(coef, names): imp = coef imp,names = zip(*sorted(zip(imp,names))) plt.barh(range(len(names)), imp, align='center') plt.yticks(range(len(names)), names) plt.show() features_names = ['input1', 'input2'] svm = svm.SVC(kernel='linear') svm.fit(X, Y) feature_importances(svm.coef_, features_names) How would I be able to calculate featurue importance of a non linear kernal, which doesn't give expected result in the given example. A: Short answer: It's not possible, (at least the present libraries are not able to do it.) The feature importance of linear SVMs could be found out but not for a nonlinear SVMs, the reason being that, when the SVM is non-linear the dataset is mapped into a space of higher dimension, which is quite different from the parent dataset and the hyperplane is obtained and this high dimensional data and hence the property is changed from that of the parent dataset and hence it is not possible to find the feature importance of this SVM in relation to the parent dataset features. A: An N x N kernel result is not invertible, only traceable! Please check, if you do or can use Gradients. Those should normally trace the calculations. For the importance you need the trace after an impulse response I guess. Thus, if you input a bunch of ones. I am not that deep into the implementation of SciKit-Learn and if it ever makes sense to attempt to get access to the traces. But at that point, you traced the response back to the features, it should give you the importance. Nevertheless any gradient descent is not specifically made to directly trace the inputs rather than the parameters which lead to a specific output. You have to find those back-propagated parameters of your kernel w.r.t. the response (The gradients of the kernel params given the response itself). As, because this may be even impossible or is absolutely complex, I would refer to anything which can alternatively bring good results. Such as kernels between the different dimensions of your samples instead of between each of your individual samples. Or some response functions, which give a good dynamic scaling of you features. A: You can't directly extract the feature importance of a SVM. But, you can use the permutation_importance from sklearn to get it. Here is an example: from sklearn.svm import SVC from sklearn.inspection import permutation_importance import numpy as np import matplotlib.pyplot as plt svm = SVC(kernel='poly') svm.fit(X, Y) perm_importance = permutation_importance(svm, X, Y) # Making the sum of feature importance being equal to 1.0, # so feature importance can be understood as percentage perm_importance_normalized = perm_importance.importances_mean/perm_importance.importances_mean.sum() # Feature's name (considering your X a DataFrame) feature_names = X.columns features = np.array(feature_names) # Sort to plot in order of importance sorted_idx = perm_importance_normalized.argsort() # Plotting plt.figure(figsize=(13,5)) plt.title('Feature Importance',fontsize=20) plt.barh(features[sorted_idx], perm_importance_normalized[sorted_idx], color='b', align='center') plt.xlabel('Relative Importance', fontsize=15) plt.xticks(fontsize=15) plt.yticks(fontsize=15) for index, value in enumerate(perm_importance_normalized[sorted_idx]): plt.text(value, index, str(round(value,2)), fontsize=15) plt.show()
How to determine feature importance of non linear kernals in SVM
I am using following code for feature importance calculation. from matplotlib import pyplot as plt from sklearn import svm def features_importances(coef, names): imp = coef imp,names = zip(*sorted(zip(imp,names))) plt.barh(range(len(names)), imp, align='center') plt.yticks(range(len(names)), names) plt.show() features_names = ['input1', 'input2'] svm = svm.SVC(kernel='linear') svm.fit(X, Y) feature_importances(svm.coef_, features_names) How would I be able to calculate featurue importance of a non linear kernal, which doesn't give expected result in the given example.
[ "Short answer: It's not possible, (at least the present libraries are not able to do it.) The feature importance of linear SVMs could be found out but not for a nonlinear SVMs, the reason being that, when the SVM is non-linear the dataset is mapped into a space of higher dimension, which is quite different from the parent dataset and the hyperplane is obtained and this high dimensional data and hence the property is changed from that of the parent dataset and hence it is not possible to find the feature importance of this SVM in relation to the parent dataset features. \n", "An N x N kernel result is not invertible, only traceable!\nPlease check, if you do or can use Gradients.\nThose should normally trace the calculations.\nFor the importance you need the trace after an impulse response I guess.\nThus, if you input a bunch of ones.\nI am not that deep into the implementation of SciKit-Learn and if it ever makes sense to attempt to get access to the traces.\nBut at that point, you traced the response back to the features, it should give you the importance.\nNevertheless any gradient descent is not specifically made to directly trace the inputs rather than the parameters which lead to a specific output.\nYou have to find those back-propagated parameters of your kernel w.r.t. the response (The gradients of the kernel params given the response itself).\nAs, because this may be even impossible or is absolutely complex, I would refer to anything which can alternatively bring good results.\nSuch as kernels between the different dimensions of your samples instead of between each of your individual samples.\nOr some response functions, which give a good dynamic scaling of you features.\n", "You can't directly extract the feature importance of a SVM. But, you can use the permutation_importance from sklearn to get it.\nHere is an example:\nfrom sklearn.svm import SVC\nfrom sklearn.inspection import permutation_importance\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nsvm = SVC(kernel='poly')\nsvm.fit(X, Y)\n\nperm_importance = permutation_importance(svm, X, Y)\n\n# Making the sum of feature importance being equal to 1.0,\n# so feature importance can be understood as percentage\nperm_importance_normalized = perm_importance.importances_mean/perm_importance.importances_mean.sum()\n\n# Feature's name (considering your X a DataFrame)\nfeature_names = X.columns\nfeatures = np.array(feature_names)\n\n# Sort to plot in order of importance\nsorted_idx = perm_importance_normalized.argsort()\n\n# Plotting\nplt.figure(figsize=(13,5))\nplt.title('Feature Importance',fontsize=20)\nplt.barh(features[sorted_idx], perm_importance_normalized[sorted_idx], color='b', align='center')\nplt.xlabel('Relative Importance', fontsize=15)\nplt.xticks(fontsize=15)\nplt.yticks(fontsize=15)\n\nfor index, value in enumerate(perm_importance_normalized[sorted_idx]):\n plt.text(value, index,\n str(round(value,2)), fontsize=15)\n\nplt.show()\n\n\n" ]
[ 1, 0, 0 ]
[]
[]
[ "machine_learning", "python", "scikit_learn", "svm" ]
stackoverflow_0041628264_machine_learning_python_scikit_learn_svm.txt
Q: Plotly two mapbox figures in a single map with different color I want to plot two mapbox figures in a single map. This is what I have right now: fig = px.choropleth_mapbox(geo_df, geojson=geo_df.geometry, locations=geo_df.index, color="TOTAL_POPULATION", color_continuous_scale=px.colors.sequential.Greens, center={"lat": 40.7, "lon": -73.95}, mapbox_style="open-street-map", zoom=10) fig2 = px.scatter_mapbox(geo_df, lat="INTPTLAT", lon="INTPTLON", size="MEDIAN_VALUE", color="MEDIAN_VALUE", color_continuous_scale=px.colors.sequential.Blues, mapbox_style="open-street-map") fig.add_trace(fig2.data[0]) fig.update_layout( autosize=False, width=1400, height=1000, ) Here, I have specified different colors for the two mapbox, but its only picking the first one and applying it to both. How can I print them with different colors to improve visibility? A: Since your question does not present any data, I have combined the reference example with another example to confirm the events. I searched the plotly community for a solution and identified examples that would solve the issue. The way to do this is to add a graph object choropleth map to the graph object and then add an express graph. One issue is that the specified colormap is not valid. We are currently investigating but may not be able to reach a solution. I believe it is compatible with the solution to your question. import plotly.express as px import plotly.graph_objects as go px.set_mapbox_access_token(open("mapbox_api_key.txt").read()) # fig for data df_election = px.data.election() geojson = px.data.election_geojson() # fig2 for data df_car = px.data.carshare() df_car['peak_hour2'] = df_car['peak_hour']*20 fig = go.Figure() fig.add_trace(go.Choroplethmapbox(geojson=geojson, z=df_election["Bergeron"], colorscale='greens', locations=df_election["district"], featureidkey="properties.district", colorbar_x=1.12, colorbar_title='election' )) fig.update_layout(mapbox_style="open-street-map", mapbox_center={"lat": 45.5517, "lon": -73.7073}, mapbox_zoom=10) map_scatter = px.scatter_mapbox(df_car, lat="centroid_lat", lon="centroid_lon", color="peak_hour", size="car_hours", color_continuous_scale=px.colors.sequential.Blues, size_max=15, zoom=9) fig.add_traces(list(map_scatter.select_traces())) fig.update_layout(coloraxis={'colorbar': {'title': {'text': 'peak_hour'}}}) fig.update_layout(autosize=True, height=600, margin={"r":0,"t":0,"l":0,"b":0}) fig.show()
Plotly two mapbox figures in a single map with different color
I want to plot two mapbox figures in a single map. This is what I have right now: fig = px.choropleth_mapbox(geo_df, geojson=geo_df.geometry, locations=geo_df.index, color="TOTAL_POPULATION", color_continuous_scale=px.colors.sequential.Greens, center={"lat": 40.7, "lon": -73.95}, mapbox_style="open-street-map", zoom=10) fig2 = px.scatter_mapbox(geo_df, lat="INTPTLAT", lon="INTPTLON", size="MEDIAN_VALUE", color="MEDIAN_VALUE", color_continuous_scale=px.colors.sequential.Blues, mapbox_style="open-street-map") fig.add_trace(fig2.data[0]) fig.update_layout( autosize=False, width=1400, height=1000, ) Here, I have specified different colors for the two mapbox, but its only picking the first one and applying it to both. How can I print them with different colors to improve visibility?
[ "Since your question does not present any data, I have combined the reference example with another example to confirm the events.\nI searched the plotly community for a solution and identified examples that would solve the issue.\nThe way to do this is to add a graph object choropleth map to the graph object and then add an express graph.\nOne issue is that the specified colormap is not valid. We are currently investigating but may not be able to reach a solution. I believe it is compatible with the solution to your question.\nimport plotly.express as px\nimport plotly.graph_objects as go\n\npx.set_mapbox_access_token(open(\"mapbox_api_key.txt\").read())\n\n# fig for data\ndf_election = px.data.election()\ngeojson = px.data.election_geojson()\n\n# fig2 for data\ndf_car = px.data.carshare()\ndf_car['peak_hour2'] = df_car['peak_hour']*20\n\nfig = go.Figure()\nfig.add_trace(go.Choroplethmapbox(geojson=geojson,\n z=df_election[\"Bergeron\"],\n colorscale='greens',\n locations=df_election[\"district\"],\n featureidkey=\"properties.district\",\n colorbar_x=1.12,\n colorbar_title='election'\n ))\nfig.update_layout(mapbox_style=\"open-street-map\",\n mapbox_center={\"lat\": 45.5517, \"lon\": -73.7073},\n mapbox_zoom=10)\n\nmap_scatter = px.scatter_mapbox(df_car,\n lat=\"centroid_lat\",\n lon=\"centroid_lon\",\n color=\"peak_hour\",\n size=\"car_hours\",\n color_continuous_scale=px.colors.sequential.Blues,\n size_max=15,\n zoom=9)\nfig.add_traces(list(map_scatter.select_traces()))\n\nfig.update_layout(coloraxis={'colorbar': {'title': {'text': 'peak_hour'}}})\nfig.update_layout(autosize=True, height=600, margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\nfig.show()\n\n\n" ]
[ 2 ]
[]
[]
[ "plotly", "python" ]
stackoverflow_0074597150_plotly_python.txt
Q: what we mean by using <<< in shell to execute a python code Hello guys i wanna write a Shell script that runs Python code saved in variable called $code. So i save the script in variable $code with this command: $ export CODE='print("Hello world")' To resolve the problem I write the following script in a file called run: #!/bin/bash echo "$CODE" > main.py python3 main.py To running the shell script i use: ./run and its work but I found another answer which I don't understand: python3 <<< $CODE so what do we mean by using <<<? A: In a lot of shells <<< denotes a here string and is a way to pass standard input to commands. <<< is used for strings, e.g. $ python3 <<< 'print("hi there")' hi there It passes the word on the right to the standard input of the command on the left. whereas << denotes a here document, e.g. command <<MultiLineDoc Standard Input That Streches many Lines and preserves indentation and linebreaks which is useful for passing many arguments to a command, e.g. passing text to a program and preserving its indentation. The beginning and ending _MultiLineDoc_ delimiter can be named any way wanted, it can be considered the name of the document. Important is that it repeats identically at both beginning and end and nowhere else in the document, everything between that delimiter is passed. MultiLineDoc < is used for passing the contents of a file, e.g. command < filename.txt As for your example with <<< : You could do the same with | but that's only OK if all your variables are defined in what you are passing. If you do have other variables that you have defined in your environment and which you wish to cross reference you would use a here-string as in your example, that lets you reference other variables within the content you are passing. Please see: https://en.wikipedia.org/wiki/Here_document https://linuxhint.com/bash-heredoc-tutorial/ A: In Bash, zsh (and some other shells) <<< is the here string operator. The code you’ve posted is roughly equivalent to echo "$PYCODE" | python3
what we mean by using <<< in shell to execute a python code
Hello guys i wanna write a Shell script that runs Python code saved in variable called $code. So i save the script in variable $code with this command: $ export CODE='print("Hello world")' To resolve the problem I write the following script in a file called run: #!/bin/bash echo "$CODE" > main.py python3 main.py To running the shell script i use: ./run and its work but I found another answer which I don't understand: python3 <<< $CODE so what do we mean by using <<<?
[ "In a lot of shells <<< denotes a here string and is a way to pass standard input to commands. <<< is used for strings, e.g.\n$ python3 <<< 'print(\"hi there\")'\nhi there\n\nIt passes the word on the right to the standard input of the command on the left.\nwhereas << denotes a here document, e.g.\ncommand <<MultiLineDoc \nStandard Input\nThat\n Streches many\nLines and preserves \n indentation and \n\nlinebreaks\n\nwhich is useful for passing many arguments to a command, \ne.g. passing text to a program and preserving its indentation.\nThe beginning and ending _MultiLineDoc_ delimiter can be named any way wanted, \nit can be considered the name of the document. \nImportant is that it repeats identically at \nboth beginning and end and nowhere else in the \ndocument, everything between that delimiter is passed.\nMultiLineDoc\n\n< is used for passing the contents of a file, e.g. command < filename.txt\nAs for your example with <<< :\nYou could do the same with | but that's only OK if all your variables are defined in what you are passing. If you do have other variables that you have defined in your environment and which you wish to cross reference you would use a here-string as in your example, that lets you reference other variables within the content you are passing.\nPlease see: https://en.wikipedia.org/wiki/Here_document\nhttps://linuxhint.com/bash-heredoc-tutorial/\n", "In Bash, zsh (and some other shells) <<< is the here string operator. The code you’ve posted is roughly equivalent to\necho \"$PYCODE\" | python3\n\n" ]
[ 1, 0 ]
[]
[]
[ "linux", "python", "python_3.x", "shell" ]
stackoverflow_0074600429_linux_python_python_3.x_shell.txt
Q: How to add background-color only on some part of my page I make a local page that prints me some information. My boos want to add two different colours as a background colour. I know how to add background colour to the whole page, but I don't know how to separate the page into two different background colours. This is what my code looks like for now: <!DOCTYPE html> <html lang="pl"> <head> <title>Raport Transmisji</title> </head> <body bgcolor=”#e6fff5"> <h1 style="text-align: center;"><strong>INSTYTUT NIEZNANY</strong></h1> <div> <h1 style="text-align: center;"><strong>DANE PRZYPADKOWE</strong></h1> <div> <h3 style="text-align: center;">ZAKŁAD NIEZNANY</h3> <div> <div style="text-align: center;">ul. Kozłowska 20, tel. 123-456-789 </div> <p style="text-align: center;">RAPORT TRANSMISJI DANYCH DO ## z dnia <span style="color: #ff0000;">(data)</span></p> <p style="text-align: center;">DO PRZEBIEGU OBLICZEŃ <span style="color: #ff0000;">(liczba)</span>/<span style="color: #ff0000;">(liczba)</span></p> <hr /> <p>Łączna liczba zwierząt w aktualnej transmisji - <span style="color: #ff0000;">(liczba)</span></p> <hr /> <p>CZYSTORASOWE:</p> <p style="text-align: justify; padding-left: 80px;">WBP - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">PBZ - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Puławska - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Hampshire - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Duroc - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Pietrain - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <hr /> <p>MIESZAŃCE:</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą WBP - liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to WBP(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to WBP(razem/knurki/loszki - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą PBZ- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to PBZ(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to PBZ(razem/knurki/loszki - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Puławską- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Puławska(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Puławska(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Hampshire- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Hampshire(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Hampshire(razem/knurki/loszki - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Duroc- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Duroc(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Duroc(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Pietrain- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Pietrain(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Pietrain(razem/knurki/loszki) - liczba/liczba/liczba</p> <p>&nbsp;</p> <p style="text-align: justify;font-size: 12px">DONICE, DNIA&nbsp; ..............&nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;...................</p> <p style="font-size: 10px; padding-left: 560px;">(podpis)</p> </body> </html> Every word "liczba" or "data" will be replaced in my python program. This code gives me a good-looking report but I have one colour on my whole page. I need one colour from "RAPORT TRANSMISJI DANYCH DO ## z dnia" to "Pietrain - (razem/knurki/loszki) - (liczba)(liczba)(liczba)" included this both lines. Second colour from "MIESZAŃE" to "Mieszańców gdzie rasa matki to Pietrain(razem/knurki/loszki) - liczba/liczba/liczba" included this both lines. Thanks for all your help. A: You can use two different div class and apply different styles to each div particular div. eg. <div class=one></div> <div class=two></div> and then in the css file .one { background-color: yellow; } .two { background-color: blue; }
How to add background-color only on some part of my page
I make a local page that prints me some information. My boos want to add two different colours as a background colour. I know how to add background colour to the whole page, but I don't know how to separate the page into two different background colours. This is what my code looks like for now: <!DOCTYPE html> <html lang="pl"> <head> <title>Raport Transmisji</title> </head> <body bgcolor=”#e6fff5"> <h1 style="text-align: center;"><strong>INSTYTUT NIEZNANY</strong></h1> <div> <h1 style="text-align: center;"><strong>DANE PRZYPADKOWE</strong></h1> <div> <h3 style="text-align: center;">ZAKŁAD NIEZNANY</h3> <div> <div style="text-align: center;">ul. Kozłowska 20, tel. 123-456-789 </div> <p style="text-align: center;">RAPORT TRANSMISJI DANYCH DO ## z dnia <span style="color: #ff0000;">(data)</span></p> <p style="text-align: center;">DO PRZEBIEGU OBLICZEŃ <span style="color: #ff0000;">(liczba)</span>/<span style="color: #ff0000;">(liczba)</span></p> <hr /> <p>Łączna liczba zwierząt w aktualnej transmisji - <span style="color: #ff0000;">(liczba)</span></p> <hr /> <p>CZYSTORASOWE:</p> <p style="text-align: justify; padding-left: 80px;">WBP - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">PBZ - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Puławska - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Hampshire - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Duroc - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <p style="text-align: justify; padding-left: 80px;">Pietrain - (razem/knurki/loszki) - (liczba)/(liczba)/(liczba)</p> <hr /> <p>MIESZAŃCE:</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą WBP - liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to WBP(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to WBP(razem/knurki/loszki - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą PBZ- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to PBZ(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to PBZ(razem/knurki/loszki - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Puławską- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Puławska(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Puławska(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Hampshire- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Hampshire(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Hampshire(razem/knurki/loszki - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Duroc- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Duroc(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Duroc(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">&nbsp;</p> <p style="padding-left: 40px;">Mieszańc&oacute;w z rasą Pietrain- liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa ojca to Pietrain(razem/knurki/loszki) - liczba/liczba/liczba</p> <p style="padding-left: 40px;">Mieszańc&oacute;w gdzie rasa matki to Pietrain(razem/knurki/loszki) - liczba/liczba/liczba</p> <p>&nbsp;</p> <p style="text-align: justify;font-size: 12px">DONICE, DNIA&nbsp; ..............&nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;...................</p> <p style="font-size: 10px; padding-left: 560px;">(podpis)</p> </body> </html> Every word "liczba" or "data" will be replaced in my python program. This code gives me a good-looking report but I have one colour on my whole page. I need one colour from "RAPORT TRANSMISJI DANYCH DO ## z dnia" to "Pietrain - (razem/knurki/loszki) - (liczba)(liczba)(liczba)" included this both lines. Second colour from "MIESZAŃE" to "Mieszańców gdzie rasa matki to Pietrain(razem/knurki/loszki) - liczba/liczba/liczba" included this both lines. Thanks for all your help.
[ "You can use two different div class and apply different styles to each div particular div. eg.\n<div class=one></div>\n<div class=two></div>\n\nand then in the css file\n.one {\n background-color: yellow;\n}\n\n.two {\n background-color: blue;\n}\n\n" ]
[ 1 ]
[]
[]
[ "css", "html", "python" ]
stackoverflow_0074599800_css_html_python.txt
Q: How to avoid duplicates while looking for a minimum value? I am getting duplicate values in my data frame. Sample data: **Fitness Value MSU Locations MSU Range** 1.180694 {17, 38, 15} 2.017782 1.202132 {10, 22, 39} 2.032507 1.179097 {10, 5, 38} 2.048932 1.175793 {27, 20, 36} 1.820395 1.187460 {33, 10, 34} 1.922506 I am trying to find a minimum value in Fitness Value column and keeping the whole row record. Sample Code: df_min_value_in_each_generation = pd.DataFrame() for x in range(0, 2, 1): new_generation = genetic_algorithm(initial_pop_chromosome_fitness) initial_pop_chromosome_fitness = new_generation #print (new_generation) df = new_generation.loc[new_generation['Fitness Value'].idxmin()] df_min_value_in_each_generation = df_min_value_in_each_generation.append(df) #WATTx = df_min_value_in_each_generation.loc[df_min_value_in_each_generation.['Fitness Value'].eq(df['Fitness Value'].min())] WATT = df_min_value_in_each_generation.loc[df_min_value_in_each_generation['Fitness Value'].idxmin()] print (WATT) Output: Fitness Value MSU Locations MSU Range 9 1.158857 {24, 17, 4} 2.06536 9 1.158857 {24, 17, 4} 2.06536 I have tried to use this: df = df_2.loc[[df_2['Fitness Value'].idxmin()]] Also, df.loc[df['Fitness Value'].eq(df['Fitness Value'].min())] But, still it gives me the duplicate smallest number (Repeated smallest value). I do not want to keep duplicates, Any suggestion? A: You can try to use df.reset_index() before using boolean indexing with idxmin: df = df.reset_index().loc[df['Fitness Value'].idxmin()]
How to avoid duplicates while looking for a minimum value?
I am getting duplicate values in my data frame. Sample data: **Fitness Value MSU Locations MSU Range** 1.180694 {17, 38, 15} 2.017782 1.202132 {10, 22, 39} 2.032507 1.179097 {10, 5, 38} 2.048932 1.175793 {27, 20, 36} 1.820395 1.187460 {33, 10, 34} 1.922506 I am trying to find a minimum value in Fitness Value column and keeping the whole row record. Sample Code: df_min_value_in_each_generation = pd.DataFrame() for x in range(0, 2, 1): new_generation = genetic_algorithm(initial_pop_chromosome_fitness) initial_pop_chromosome_fitness = new_generation #print (new_generation) df = new_generation.loc[new_generation['Fitness Value'].idxmin()] df_min_value_in_each_generation = df_min_value_in_each_generation.append(df) #WATTx = df_min_value_in_each_generation.loc[df_min_value_in_each_generation.['Fitness Value'].eq(df['Fitness Value'].min())] WATT = df_min_value_in_each_generation.loc[df_min_value_in_each_generation['Fitness Value'].idxmin()] print (WATT) Output: Fitness Value MSU Locations MSU Range 9 1.158857 {24, 17, 4} 2.06536 9 1.158857 {24, 17, 4} 2.06536 I have tried to use this: df = df_2.loc[[df_2['Fitness Value'].idxmin()]] Also, df.loc[df['Fitness Value'].eq(df['Fitness Value'].min())] But, still it gives me the duplicate smallest number (Repeated smallest value). I do not want to keep duplicates, Any suggestion?
[ "You can try to use df.reset_index() before using boolean indexing with idxmin:\ndf = df.reset_index().loc[df['Fitness Value'].idxmin()]\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "genetic_algorithm", "genetic_programming", "pandas", "python" ]
stackoverflow_0074548310_dataframe_genetic_algorithm_genetic_programming_pandas_python.txt
Q: What's the benefit of a fixture with function scope and no teardown code? What's advantage of a (default) function-scope fixture without teardown code? Why not just call the function at the beginning of the test? For example, what's the benefit of writing: @pytest.fixture def smtp(): return smtplib.SMTP("smtp.gmail.com") def test_ehlo(smtp): response, msg = smtp.ehlo() # ... instead of simply: def create_smtp(): return smtplib.SMTP("smtp.gmail.com") def test_ehlo(): smtp = create_smtp() response, msg = smtp.ehlo() # ... I understand why fixtures are useful when we need teardown code. I also understand why fixtures with scope other than function are useful: we may want to reuse the same "external" object in multiple tests (to save the time it takes to create it; or perhaps even to maintain its state -- although that seems to be rather dangerous since this creates an hard-to-see coupling between separate tests). A: I had a similar question when I started using it. Here's my experience: Fixtures can be set to autouse=True, i.e., trigger automatically that may not be possible with an inline call. This is useful in some cases. Fixtures add readability, at least for me. Looking at the signature of the test, one can figure out what initialisations are a pre-requisite for a given test. In that sense, it also help keep the test and it's initialisation isolated. A: What's advantage of a (default) function-scope fixture without teardown code? Why not just call the function at the beginning of the test? Saving vertical space. Consider something like this, where you have more than one fixture per test: import pytest @pytest.fixture def value1(): return 1 @pytest.fixture def value2(): return 2 @pytest.fixture def value3(): return 3 def test_values(value1, value2, value3): assert value1 == 1 assert value2 == 2 assert value3 == 3 If we were to do this your way: def test_values(): v1 = value1() v2 = value2() v3 = value3() assert v1 == 1 assert v2 == 2 assert v3 == 3 That's three extra lines of code. Not a big deal, but then what if you had 10 tests that needed value1, value2 and value3? Now you have 30 extra lines of vertical space for basically no reason. Obviously, both of our examples are overly simplified (I could just have done the call and assert inline), but I think it's straightforward to see how this could have an impact with real code. A: I believe that one of the most important advantage of function-scoped fixtures is consistency. It is much more readable and logical if all your fixtures (no matter of they scope or tear-down code) are used in exactly same way. Also if in some point in future you'll decided to change the scope of this fixture or to add some tear-down code to it then you would not need to change any test cases, only the code of the fixture. A: Look at this answer: https://stackoverflow.com/a/67636207/11277611 I'm not so liking fixtures, but one advatage - it's reusability. If you have some function with long computtion - you can define it once in fixture and reuse it every time in the tests. Surely you can do this with a regular function, IMO, it's a kind of test.
What's the benefit of a fixture with function scope and no teardown code?
What's advantage of a (default) function-scope fixture without teardown code? Why not just call the function at the beginning of the test? For example, what's the benefit of writing: @pytest.fixture def smtp(): return smtplib.SMTP("smtp.gmail.com") def test_ehlo(smtp): response, msg = smtp.ehlo() # ... instead of simply: def create_smtp(): return smtplib.SMTP("smtp.gmail.com") def test_ehlo(): smtp = create_smtp() response, msg = smtp.ehlo() # ... I understand why fixtures are useful when we need teardown code. I also understand why fixtures with scope other than function are useful: we may want to reuse the same "external" object in multiple tests (to save the time it takes to create it; or perhaps even to maintain its state -- although that seems to be rather dangerous since this creates an hard-to-see coupling between separate tests).
[ "I had a similar question when I started using it. Here's my experience:\n\nFixtures can be set to autouse=True, i.e., trigger automatically that may not be possible with an inline call. This is useful in some cases.\nFixtures add readability, at least for me. Looking at the signature of the test, one can figure out what initialisations are a pre-requisite for a given test. In that sense, it also help keep the test and it's initialisation isolated.\n\n", "\nWhat's advantage of a (default) function-scope fixture without\n teardown code? Why not just call the function at the beginning of the\n test?\n\nSaving vertical space.\nConsider something like this, where you have more than one fixture per test:\nimport pytest\n\n\[email protected]\ndef value1():\n return 1\n\[email protected]\ndef value2():\n return 2\n\[email protected]\ndef value3():\n return 3\n\n\ndef test_values(value1, value2, value3):\n assert value1 == 1\n assert value2 == 2\n assert value3 == 3\n\nIf we were to do this your way:\ndef test_values():\n v1 = value1()\n v2 = value2()\n v3 = value3()\n\n assert v1 == 1\n assert v2 == 2\n assert v3 == 3\n\nThat's three extra lines of code. Not a big deal, but then what if you had 10 tests that needed value1, value2 and value3? Now you have 30 extra lines of vertical space for basically no reason. \nObviously, both of our examples are overly simplified (I could just have done the call and assert inline), but I think it's straightforward to see how this could have an impact with real code.\n", "I believe that one of the most important advantage of function-scoped fixtures is consistency. It is much more readable and logical if all your fixtures (no matter of they scope or tear-down code) are used in exactly same way.\nAlso if in some point in future you'll decided to change the scope of this fixture or to add some tear-down code to it then you would not need to change any test cases, only the code of the fixture.\n", "Look at this answer:\nhttps://stackoverflow.com/a/67636207/11277611\nI'm not so liking fixtures, but one advatage - it's reusability.\nIf you have some function with long computtion - you can define it once in fixture and reuse it every time in the tests.\nSurely you can do this with a regular function, IMO, it's a kind of test.\n" ]
[ 5, 1, 1, 1 ]
[]
[]
[ "pytest", "python" ]
stackoverflow_0042308799_pytest_python.txt
Q: Pylint doesn't like string.format() and wants me to use f-strings. Is this fixable? I've upgraded to pylint 2.15.2, and suddenly I'm getting lots of consider-using-f-string warnings whenever I run pylint, where I've used % formatting for strings. I understand why Pylint doesn't want to use the old % formatting, but I also get this error when I try to use string.format() instead. Take the following code as an example: """Example module""" def some_long_complicated_function(a, b): """Do something""" return a + b def main(): """Main function""" a = 2 b = 3 percent_string = "The result of %s + %s is %s" % ( a, b, some_long_complicated_function(a, b) ) format_string = "The result of {} + {} is {}".format( a, b, some_long_complicated_function(a, b) ) f_string = f"The result of {a} + {b} is {some_long_complicated_function(a, b)}" print(percent_string) print(format_string) print(f_string) if __name__ == "__main__": main() When I run pylint on this code, I get the following output: ************* Module pyexample ./pyexample.py:11:21: C0209: Formatting a regular string which could be a f-string (consider-using-f-string) ./pyexample.py:15:20: C0209: Formatting a regular string which could be a f-string (consider-using-f-string) ------------------------------------------------------------------ Your code has been rated at 8.46/10 (previous run: 6.15/10, +2.31) There are instances like this where I don't want to use an f-string, because I think it actually hampers - not helps - readability, especially in cases like these where I may be writing long function calls inline within the string. In these places I'd rather use string.format(), because you can nicely separate out the format specifiers {} from the functions to generate the strings I want by putting them on a separate line. With f-strings, my lines may end up being too long and I have to resort to using line continuation characters, which again harms the readability IMO. The problem is, Pylint doesn't like string.format() - it only wants me to use f-strings. I know that this is a 'Convention' not 'Error', but my code has to pass Pylint 100%. I could waive this message, but that's not good practice and there are places in my code where I do want to swap out the %-string formats. My question: Is there a way to configure Pylint so that when I run it, it will not flag a consider-using-f-string warning when I use string.format() (only when I use % strings)? I've had a look in the rc-file but I can't see any obvious setting like this. Or is the only way to fix this to waive the warning entirely? A: If you just want to avoid long line or line continuation character, I usually choose to use parentheses: f_string = (f"The result of {a} + {b} is " f"{some_long_complicated_function(a, b)}")
Pylint doesn't like string.format() and wants me to use f-strings. Is this fixable?
I've upgraded to pylint 2.15.2, and suddenly I'm getting lots of consider-using-f-string warnings whenever I run pylint, where I've used % formatting for strings. I understand why Pylint doesn't want to use the old % formatting, but I also get this error when I try to use string.format() instead. Take the following code as an example: """Example module""" def some_long_complicated_function(a, b): """Do something""" return a + b def main(): """Main function""" a = 2 b = 3 percent_string = "The result of %s + %s is %s" % ( a, b, some_long_complicated_function(a, b) ) format_string = "The result of {} + {} is {}".format( a, b, some_long_complicated_function(a, b) ) f_string = f"The result of {a} + {b} is {some_long_complicated_function(a, b)}" print(percent_string) print(format_string) print(f_string) if __name__ == "__main__": main() When I run pylint on this code, I get the following output: ************* Module pyexample ./pyexample.py:11:21: C0209: Formatting a regular string which could be a f-string (consider-using-f-string) ./pyexample.py:15:20: C0209: Formatting a regular string which could be a f-string (consider-using-f-string) ------------------------------------------------------------------ Your code has been rated at 8.46/10 (previous run: 6.15/10, +2.31) There are instances like this where I don't want to use an f-string, because I think it actually hampers - not helps - readability, especially in cases like these where I may be writing long function calls inline within the string. In these places I'd rather use string.format(), because you can nicely separate out the format specifiers {} from the functions to generate the strings I want by putting them on a separate line. With f-strings, my lines may end up being too long and I have to resort to using line continuation characters, which again harms the readability IMO. The problem is, Pylint doesn't like string.format() - it only wants me to use f-strings. I know that this is a 'Convention' not 'Error', but my code has to pass Pylint 100%. I could waive this message, but that's not good practice and there are places in my code where I do want to swap out the %-string formats. My question: Is there a way to configure Pylint so that when I run it, it will not flag a consider-using-f-string warning when I use string.format() (only when I use % strings)? I've had a look in the rc-file but I can't see any obvious setting like this. Or is the only way to fix this to waive the warning entirely?
[ "If you just want to avoid long line or line continuation character, I usually choose to use parentheses:\nf_string = (f\"The result of {a} + {b} is \"\n f\"{some_long_complicated_function(a, b)}\")\n\n" ]
[ 0 ]
[]
[]
[ "f_string", "pylint", "python", "string.format" ]
stackoverflow_0074600829_f_string_pylint_python_string.format.txt
Q: Kivy program: how to change focus in pycharm? When I run a kivy program in Pycharm, the kivy window doesn't have the default kivy app title, the kivy logo in top-left nor the 3 control buttons in the top-right. It completely occupies my screen and I can't do anything anymore. I'm on Windows 10. Need help, please. I'm using Python 3.9.0, kivy 2.0.0 from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.widget import Widget from kivy.uix.button import Button from kivy.uix.label import Label class MainWidget(Widget): pass class boxLayoutExample(BoxLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.orientation = "vertical" item1 = Button(text='a button') self.add_widget(item1) item2 = Label(text='a label') self.add_widget(item2) item3 = Button(text='a button') self.add_widget(item3) class myApp(App): pass myApp().run() A: the key is the configuration which can either be modified using the config.ini file or the config object. on my windows system the config file is in C:\users<username>.kivy\config.ini and you may have to change Windows Explorer to show hidden items if you want to see the .kivy directory. inside that configuration file, the borderless property being set to 1 could cause this behavior so set this to 0. The curios thing is that by default this should have been 0 so I don't know why it would have changed. [graphics] borderless = 0 and you may also be interested in these properties width = 1200 height = 600 fullscreen = 0
Kivy program: how to change focus in pycharm?
When I run a kivy program in Pycharm, the kivy window doesn't have the default kivy app title, the kivy logo in top-left nor the 3 control buttons in the top-right. It completely occupies my screen and I can't do anything anymore. I'm on Windows 10. Need help, please. I'm using Python 3.9.0, kivy 2.0.0 from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.widget import Widget from kivy.uix.button import Button from kivy.uix.label import Label class MainWidget(Widget): pass class boxLayoutExample(BoxLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.orientation = "vertical" item1 = Button(text='a button') self.add_widget(item1) item2 = Label(text='a label') self.add_widget(item2) item3 = Button(text='a button') self.add_widget(item3) class myApp(App): pass myApp().run()
[ "the key is the configuration which can either be modified using the config.ini file or the config object.\non my windows system the config file is in C:\\users<username>.kivy\\config.ini\nand you may have to change Windows Explorer to show hidden items if you want to see the .kivy directory.\ninside that configuration file, the borderless property being set to 1 could cause this behavior so set this to 0. The curios thing is that by default this should have been 0 so I don't know why it would have changed.\n[graphics]\nborderless = 0\n\nand you may also be interested in these properties\nwidth = 1200\nheight = 600\nfullscreen = 0\n\n" ]
[ 0 ]
[ "from kivy import Config\nConfig.set(\"graphics\", \"fullscreen\", \"0\")\n\nRTFM -> go here\n" ]
[ -2 ]
[ "kivy", "pycharm", "python" ]
stackoverflow_0070982783_kivy_pycharm_python.txt
Q: AttributeError: 'list' object has no attribute 'lower'. How to fix the code in order to have it convert to upper or lower? ` def removeDigits(str): return str.translate({ord(i): None for i in '0123456789'}) def fileinput(): with open('constant.txt') as f: lines = f.readlines() print('Initial string: ', lines) res = list(map(removeDigits, lines)) print('Final string: ', res) print('Make string upper or lower?') choice = input() if choice.upper() == 'UPPER': print(res.upper()) elif choice.lower() == 'lower': print(res.lower()) else: print('An error has occured') fileinput() AttributeError Traceback (most recent call last) Input In [1], in <cell line: 23>() 19 else: 20 print('An error has occured') ---> 23 fileinput() Input In [1], in fileinput() 15 print(res.upper()) 17 elif choice.lower() == 'lower': ---> 18 print(res.lower()) 19 else: 20 print('An error has occured') AttributeError: 'list' object has no attribute 'lower' ` I wanted the program to pull a string from a file and print it while removing the integers in that string, and then have the user choose whether they want upper or lower and convert the new string without the integers into either upper or lower. The first section where it needs to pull from a text file and remove integers work, but I get an attribute error for converting the text to upper or lower. A: It is because you can't make a list lower or upper case. You have to make the elements in the list lower or upper case. For example: res_lower = [item.lower() for item in res] print(res_lower) Or in one line: print([item.lower() for item in res]) Instead of: print(res.lower()) If you want to print each element of the list individually, use a for loop: for item in res: print(item.lower()) Good Luck!
AttributeError: 'list' object has no attribute 'lower'. How to fix the code in order to have it convert to upper or lower?
` def removeDigits(str): return str.translate({ord(i): None for i in '0123456789'}) def fileinput(): with open('constant.txt') as f: lines = f.readlines() print('Initial string: ', lines) res = list(map(removeDigits, lines)) print('Final string: ', res) print('Make string upper or lower?') choice = input() if choice.upper() == 'UPPER': print(res.upper()) elif choice.lower() == 'lower': print(res.lower()) else: print('An error has occured') fileinput() AttributeError Traceback (most recent call last) Input In [1], in <cell line: 23>() 19 else: 20 print('An error has occured') ---> 23 fileinput() Input In [1], in fileinput() 15 print(res.upper()) 17 elif choice.lower() == 'lower': ---> 18 print(res.lower()) 19 else: 20 print('An error has occured') AttributeError: 'list' object has no attribute 'lower' ` I wanted the program to pull a string from a file and print it while removing the integers in that string, and then have the user choose whether they want upper or lower and convert the new string without the integers into either upper or lower. The first section where it needs to pull from a text file and remove integers work, but I get an attribute error for converting the text to upper or lower.
[ "It is because you can't make a list lower or upper case. You have to make the elements in the list lower or upper case.\nFor example:\nres_lower = [item.lower() for item in res]\nprint(res_lower)\n\nOr in one line:\nprint([item.lower() for item in res])\n\nInstead of:\nprint(res.lower())\n\nIf you want to print each element of the list individually, use a for loop:\nfor item in res:\n print(item.lower())\n\nGood Luck!\n" ]
[ 1 ]
[]
[]
[ "anaconda", "jupyter_notebook", "list", "python", "python_3.x" ]
stackoverflow_0074600872_anaconda_jupyter_notebook_list_python_python_3.x.txt
Q: resample date end of month match with date from original dataframe I have data that i want to resample use end of month based on original df but when i use df.resample('M').last(). the end of month date that i got is different from original df. see the asterix marks. 2005-12-31 should be >> 2005-12-29. any suggestion ? what parameter should i add into .resample() ? orginal df = DATE 2005-12-27 1161.707 2005-12-28 1164.143 *2005-12-29 1162.635* 2006-01-02 1171.709 2006-01-03 1184.690 2006-01-04 1211.699 test_resample = df.resample('M').last() DATE 2005-11-30 1096.641 *2005-12-31 1162.635* 2006-01-31 1232.321 A: You can't directly with resample, you should instead groupby.agg after temporarily resetting the index: (df.reset_index() .groupby(df.index.to_period('M')) .agg({'DATE': 'last', 'value': 'last'}) .set_index('DATE') ) Output: value DATE 2005-12-29 1162.635 2006-01-04 1211.699 A: Example data = {'2005-12-27': 1161.707, '2005-12-28': 1164.143, '2005-12-29': 1162.635, '2006-01-02': 1171.709, '2006-01-03': 1184.69, '2006-01-04': 1211.699} s = pd.Series(data) s.index = pd.to_datetime(s.index) output(s): 2005-12-27 1161.707 2005-12-28 1164.143 2005-12-29 1162.635 2006-01-02 1171.709 2006-01-03 1184.690 2006-01-04 1211.699 dtype: float64 Code s.groupby(s.index.to_period('M')).tail(1) output: 2005-12-29 1162.635 2006-01-04 1211.699 dtype: float64 If s is not sorted by time order, sort index
resample date end of month match with date from original dataframe
I have data that i want to resample use end of month based on original df but when i use df.resample('M').last(). the end of month date that i got is different from original df. see the asterix marks. 2005-12-31 should be >> 2005-12-29. any suggestion ? what parameter should i add into .resample() ? orginal df = DATE 2005-12-27 1161.707 2005-12-28 1164.143 *2005-12-29 1162.635* 2006-01-02 1171.709 2006-01-03 1184.690 2006-01-04 1211.699 test_resample = df.resample('M').last() DATE 2005-11-30 1096.641 *2005-12-31 1162.635* 2006-01-31 1232.321
[ "You can't directly with resample, you should instead groupby.agg after temporarily resetting the index:\n(df.reset_index()\n .groupby(df.index.to_period('M'))\n .agg({'DATE': 'last', 'value': 'last'})\n .set_index('DATE')\n)\n\nOutput:\n value\nDATE \n2005-12-29 1162.635\n2006-01-04 1211.699\n\n", "Example\ndata = {'2005-12-27': 1161.707,\n '2005-12-28': 1164.143,\n '2005-12-29': 1162.635,\n '2006-01-02': 1171.709,\n '2006-01-03': 1184.69,\n '2006-01-04': 1211.699}\ns = pd.Series(data)\ns.index = pd.to_datetime(s.index)\n\noutput(s):\n2005-12-27 1161.707\n2005-12-28 1164.143\n2005-12-29 1162.635\n2006-01-02 1171.709\n2006-01-03 1184.690\n2006-01-04 1211.699\ndtype: float64\n\nCode\ns.groupby(s.index.to_period('M')).tail(1)\n\noutput:\n2005-12-29 1162.635\n2006-01-04 1211.699\ndtype: float64\n\nIf s is not sorted by time order, sort index\n" ]
[ 2, 2 ]
[]
[]
[ "pandas", "python", "resample" ]
stackoverflow_0074600712_pandas_python_resample.txt
Q: Embed Python source code in C++ as string I'm writing a C++ program that requires Python (3.11) code to be embedded into it and am using Python.h to try and accomplish this. The general idea is that my a python script, which will be stored by the C++ program as a string, as I'll be performing operations on the source at runtime, will contain a "main()" function which returns an array of known size. I'm aware I can do it via: ... PyObject *pName = PyString_FromString("main"); PyObject *pModule = PyImport_Import(pName) ... However, in order to actually execute the script, I would need to write it to a file just so that python could read it again. This adds extra time to execution that I'd prefer to avoid. Isn't there some way in which I can pass python the source code directly as a string and work from there? Or am I just screwed? EDIT: BTW, PyRun_SimpleString does not do what I want, as it doesn't return anything from the executed code. A: Found the answer thanks to nick in the comments. An example of usage of PyRun_String: https://schneide.blog/2011/10/10/embedding-python-into-cpp/, and extracting list variables from python script https://docs.python.org/3/c-api/list.html The final frankenstein: PyObject *main = PyImport_AddModule("__main__"); PyObject *globalDictionary = PyModule_GetDict(main); PyObject *localDictionary = PyDict_New(); PyRun_String("a=[0, 1, 2, 3, 4, 5]", Py_file_input, globalDictionary, localDictionary); PyObject *result = PyDict_GetItemString(localDictionary, "a"); double a[6]; for (int i = 0; i < PyList_Size(result); i++) { a[i] = PyFloat_AsDouble(PyList_GetItem(result, i)); }
Embed Python source code in C++ as string
I'm writing a C++ program that requires Python (3.11) code to be embedded into it and am using Python.h to try and accomplish this. The general idea is that my a python script, which will be stored by the C++ program as a string, as I'll be performing operations on the source at runtime, will contain a "main()" function which returns an array of known size. I'm aware I can do it via: ... PyObject *pName = PyString_FromString("main"); PyObject *pModule = PyImport_Import(pName) ... However, in order to actually execute the script, I would need to write it to a file just so that python could read it again. This adds extra time to execution that I'd prefer to avoid. Isn't there some way in which I can pass python the source code directly as a string and work from there? Or am I just screwed? EDIT: BTW, PyRun_SimpleString does not do what I want, as it doesn't return anything from the executed code.
[ "Found the answer thanks to nick in the comments.\nAn example of usage of PyRun_String: https://schneide.blog/2011/10/10/embedding-python-into-cpp/, and extracting list variables from python script https://docs.python.org/3/c-api/list.html\nThe final frankenstein:\nPyObject *main = PyImport_AddModule(\"__main__\");\nPyObject *globalDictionary = PyModule_GetDict(main);\nPyObject *localDictionary = PyDict_New();\nPyRun_String(\"a=[0, 1, 2, 3, 4, 5]\", Py_file_input, globalDictionary, localDictionary);\n \nPyObject *result = PyDict_GetItemString(localDictionary, \"a\");\n\ndouble a[6];\nfor (int i = 0; i < PyList_Size(result); i++) {\n a[i] = PyFloat_AsDouble(PyList_GetItem(result, i));\n}\n\n" ]
[ 1 ]
[]
[]
[ "c++", "python", "python_3.11", "python_embedding" ]
stackoverflow_0074600157_c++_python_python_3.11_python_embedding.txt
Q: Merging two list of dictionaries based on key dict1 = [{'id': 1.0, 'name': 'aa'}, {'id': 4.0, 'name': 'bb'}, {'id': 2.0, 'name': 'cc'}] and dict2 = [{'name': 'aa', 'dtype': 'StringType'}, {'name': 'bb', 'dtype': 'StringType'}, {'name': 'xx', 'dtype': 'StringType'}, {'name': 'cc', 'dtype': 'StringType'}] I would like to merge this two dictionaries based on their common key which is name. I would like to get the following desired result. merged_dict= [{'id': 1.0, 'name': 'aa', 'dtype': 'StringType'}, {'id': 4.0, 'name': 'bb', 'dtype': 'StringType'}, {'id': 2.0, 'name': 'cc', 'dtype': 'StringType'}] I was trying to get this using the following for loop. for i in dict1: for j in dict2: j.update(i) A: To avoid quadratic complexity, better first create a real dictionary (yours are lists of dictionaries), then update: tmp = {d['name']: d for d in dict2} for d in dict1: d.update(tmp.get(d['name'], {})) print(dict1) Output: [{'id': 1.0, 'name': 'aa', 'dtype': 'StringType'}, {'id': 4.0, 'name': 'bb', 'dtype': 'StringType'}, {'id': 2.0, 'name': 'cc', 'dtype': 'StringType'}] Intermediate tmp: {'aa': {'name': 'aa', 'dtype': 'StringType'}, 'bb': {'name': 'bb', 'dtype': 'StringType'}, 'xx': {'name': 'xx', 'dtype': 'StringType'}, 'cc': {'name': 'cc', 'dtype': 'StringType'}} If you want a copy (rather that modifying dict1 in place): tmp = {d['name']: d for d in dict2} merged_dict = [d|tmp.get(d['name'], {}) for d in dict1] A: You can use pandas and try following: import pandas as pd df1 = pd.DataFrame(dict1) df2 = pd.DataFrame(dict2) res = df1.merge(df2, on=['name']) The output: id name dtype 0 1.0 aa StringType 1 4.0 bb StringType 2 2.0 cc StringType If you need a dictionary, you can convert merged result pd.DataFrame() to dict. res.to_dict('records') Final output is: [ {'id': 1.0, 'name': 'aa', 'dtype': 'StringType'}, {'id': 4.0, 'name': 'bb', 'dtype': 'StringType'}, {'id': 2.0, 'name': 'cc', 'dtype': 'StringType'} ]
Merging two list of dictionaries based on key
dict1 = [{'id': 1.0, 'name': 'aa'}, {'id': 4.0, 'name': 'bb'}, {'id': 2.0, 'name': 'cc'}] and dict2 = [{'name': 'aa', 'dtype': 'StringType'}, {'name': 'bb', 'dtype': 'StringType'}, {'name': 'xx', 'dtype': 'StringType'}, {'name': 'cc', 'dtype': 'StringType'}] I would like to merge this two dictionaries based on their common key which is name. I would like to get the following desired result. merged_dict= [{'id': 1.0, 'name': 'aa', 'dtype': 'StringType'}, {'id': 4.0, 'name': 'bb', 'dtype': 'StringType'}, {'id': 2.0, 'name': 'cc', 'dtype': 'StringType'}] I was trying to get this using the following for loop. for i in dict1: for j in dict2: j.update(i)
[ "To avoid quadratic complexity, better first create a real dictionary (yours are lists of dictionaries), then update:\ntmp = {d['name']: d for d in dict2}\n\nfor d in dict1:\n d.update(tmp.get(d['name'], {}))\n\nprint(dict1)\n\nOutput:\n[{'id': 1.0, 'name': 'aa', 'dtype': 'StringType'},\n {'id': 4.0, 'name': 'bb', 'dtype': 'StringType'},\n {'id': 2.0, 'name': 'cc', 'dtype': 'StringType'}]\n\nIntermediate tmp:\n{'aa': {'name': 'aa', 'dtype': 'StringType'},\n 'bb': {'name': 'bb', 'dtype': 'StringType'},\n 'xx': {'name': 'xx', 'dtype': 'StringType'},\n 'cc': {'name': 'cc', 'dtype': 'StringType'}}\n\nIf you want a copy (rather that modifying dict1 in place):\ntmp = {d['name']: d for d in dict2}\nmerged_dict = [d|tmp.get(d['name'], {}) for d in dict1]\n\n", "You can use pandas and try following:\nimport pandas as pd\ndf1 = pd.DataFrame(dict1)\ndf2 = pd.DataFrame(dict2)\nres = df1.merge(df2, on=['name'])\n\nThe output:\n id name dtype\n0 1.0 aa StringType\n1 4.0 bb StringType\n2 2.0 cc StringType\n\nIf you need a dictionary, you can convert merged result pd.DataFrame() to dict.\nres.to_dict('records')\n\nFinal output is:\n[\n {'id': 1.0, 'name': 'aa', 'dtype': 'StringType'}, \n {'id': 4.0, 'name': 'bb', 'dtype': 'StringType'}, \n {'id': 2.0, 'name': 'cc', 'dtype': 'StringType'}\n]\n\n" ]
[ 1, 0 ]
[]
[]
[ "dictionary", "python" ]
stackoverflow_0074600779_dictionary_python.txt
Q: Combining multiple rows in pandas dataframe with sparse values After working on a pandas dataframe I have the following sparse situation Name ParamA ParamB ParamC ParamD A 1.0 NULL NULL NULL A NULL NULL 3.0 NULL A NULL NULL NULL 6.0 What I want to have is combining multiple rows under the column 'Name' and substituting the NULL to the value present in the next rows (if multiple rows have something in ParamX then, take the last). The output of the previous example would be: Name ParamA ParamB ParamC ParamD A 1.0 NULL 3.0 6.0 Any hint? A: You can achieve in a simple way, a grouped last(): df.groupby('Name',as_index=False).last() prints: Name ParamA ParamB ParamC ParamD 0 A 1.0 NaN 3.0 6.0 No need for apply.
Combining multiple rows in pandas dataframe with sparse values
After working on a pandas dataframe I have the following sparse situation Name ParamA ParamB ParamC ParamD A 1.0 NULL NULL NULL A NULL NULL 3.0 NULL A NULL NULL NULL 6.0 What I want to have is combining multiple rows under the column 'Name' and substituting the NULL to the value present in the next rows (if multiple rows have something in ParamX then, take the last). The output of the previous example would be: Name ParamA ParamB ParamC ParamD A 1.0 NULL 3.0 6.0 Any hint?
[ "You can achieve in a simple way, a grouped last():\ndf.groupby('Name',as_index=False).last()\n\nprints:\n Name ParamA ParamB ParamC ParamD\n0 A 1.0 NaN 3.0 6.0\n\nNo need for apply.\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074600920_dataframe_pandas_python.txt
Q: Jupyter Notebook: Autocomplete has too many suggestions When using autocomplete in Jupyter Notebooks it is super nice that you can use autocomplete out of the box, but the autocomplete makes too many suggestions that are not relevant. E.g. when autocompleting inside a function, then I only want relevant parameters to be autocompleted, not 60 random python values. People have suggested using %config Completer.use_jedi = True, but that turns everything off and is even worse. A: You can try installing nbextension for suggestions in jupyter notebook. for more info plz Click here
Jupyter Notebook: Autocomplete has too many suggestions
When using autocomplete in Jupyter Notebooks it is super nice that you can use autocomplete out of the box, but the autocomplete makes too many suggestions that are not relevant. E.g. when autocompleting inside a function, then I only want relevant parameters to be autocompleted, not 60 random python values. People have suggested using %config Completer.use_jedi = True, but that turns everything off and is even worse.
[ "You can try installing nbextension for suggestions in jupyter notebook.\nfor more info plz Click here\n" ]
[ 0 ]
[]
[]
[ "autocomplete", "jedi", "jupyter", "python" ]
stackoverflow_0074600954_autocomplete_jedi_jupyter_python.txt
Q: scikit-learn column transformer- columns with different discrete values I have dataset with about 10 columns with discrete data and I have troubles with transforming them to the to form where its possible to perform machine learning I was able to transoform one column which contain only YES/NO values in this way: le = LabelEncoder() X['ABC'] = le.fit_transform(X['ABC']) and it seems okay However if i have something different than YES/NO, for example localisation with 10 different values i have only errors from sklearn.feature_extraction import FeatureHasher h = FeatureHasher(n_features=) D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}] f = h.transform(D) f.toarray() I tried using featurehasher bun im not sure if thats good idea, I've changed example code to get data from column but got an error with info: input can be only dict i've also tried something like that: ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough') X = np.array(ct.fit_transform(X)) X but it also dont work Could someone send me some tip or lonk for good tutorial? I found a lot but they deosnt seem to match my situation A: You are almost there with ColumnTransformer and OneHotEncoder, refer to examples here (https://www.geeksforgeeks.org/prediction-using-columntransformer-onehotencoder-and-pipeline/) as well as their respective docs to get it working. Also when you say it doesn't work, please share what the error was. Use OneHotEncoder for nominal cat features, and OrdinalEncoder for ordinal cat features. There is a somewhat easier option of using pandas.get_dummies() (but typically is only used in notebooks and EDAs, rather than in a production environment) which is simpler syntactically. The lines of code you used for LabelEncoder initially, you can also just apply OneHotEncoder the same way, without having to use ColumnTransformer. So that could work for you as well.
scikit-learn column transformer- columns with different discrete values
I have dataset with about 10 columns with discrete data and I have troubles with transforming them to the to form where its possible to perform machine learning I was able to transoform one column which contain only YES/NO values in this way: le = LabelEncoder() X['ABC'] = le.fit_transform(X['ABC']) and it seems okay However if i have something different than YES/NO, for example localisation with 10 different values i have only errors from sklearn.feature_extraction import FeatureHasher h = FeatureHasher(n_features=) D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}] f = h.transform(D) f.toarray() I tried using featurehasher bun im not sure if thats good idea, I've changed example code to get data from column but got an error with info: input can be only dict i've also tried something like that: ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough') X = np.array(ct.fit_transform(X)) X but it also dont work Could someone send me some tip or lonk for good tutorial? I found a lot but they deosnt seem to match my situation
[ "You are almost there with ColumnTransformer and OneHotEncoder, refer to examples here (https://www.geeksforgeeks.org/prediction-using-columntransformer-onehotencoder-and-pipeline/) as well as their respective docs to get it working. Also when you say it doesn't work, please share what the error was.\nUse OneHotEncoder for nominal cat features, and OrdinalEncoder for ordinal cat features.\nThere is a somewhat easier option of using pandas.get_dummies() (but typically is only used in notebooks and EDAs, rather than in a production environment) which is simpler syntactically.\nThe lines of code you used for LabelEncoder initially, you can also just apply OneHotEncoder the same way, without having to use ColumnTransformer. So that could work for you as well.\n" ]
[ 0 ]
[]
[]
[ "column_tansformer", "machine_learning", "pandas", "python", "scikit_learn" ]
stackoverflow_0074507838_column_tansformer_machine_learning_pandas_python_scikit_learn.txt
Q: How can I read multiple text files and save them individually as a Pandas Dataframe? I have multiple txt files and I would like to convert them to a dataframe by creating a new column using header. My data looks like: Person:?,?;F dob. ? MT: ? Z:C NewYork Mon.:S St.? 144 cm/35 Kg/5 YearsOld 45,34,22,26,0 78,74,82,11,0 I use the following code to create a dataframe out of a single text file. with open('file_directory', 'r') as f: heading_rows = [next(f) for _ in range(3)] city = re.findall(pattern = ' \w+ ', string = heading_rows[0])[0].strip() numbers_list = [re.findall(pattern='\d+', string=row) for row in heading_rows if 'cm' and 'kg' in row.lower()][0] height, weight, age = [int(numbers_list[i]) for i in range(3)] df = pd.read_csv('file_directory', sep='\s+|;|,', engine='python', skiprows=8,comment='cm', index_col=None, names=list('ABCDEF')) #df.dropna(inplace=True) df['HEIGHT'] = height df['WEIGHT'] = weight df['AGE'] = age df['CENTER'] = city I tried to put the code (above) in a for loop so that I can read all text files in the folder so that I can convert them into a Pandas dataframe individually and save as a csv file. lst = [] for name in glob.glob('my_directory/*'): with open(name, 'r') as f: heading_rows = [next(f) for _ in range(1)] lst.append(heading_rows) Bu, I end up with StopIteration error in next(f) aprt of my code. How can I obtain the following dataframe while reading multiple text files? Then I would like to save each file as CSV file. My expectation is to have the following dataframe type: A, B, C, D, E, height, weight, age, city 45,34,22,26,0, 144, 35, 5, NewYork 78,74,82,11,0, 144, 35, 5, NewYork A: You should use chardet which articulates encoding readings. Then add the read_Csv part in for loop. import chardet for name in glob.glob('file_directory/*'): with open(name, 'r') as f: heading_rows = [next(f) for _ in range(5)] #print(re.findall(pattern = ' \w+ ', string = heading_rows[0])[0]) # to escape errors try: city = re.findall(pattern = ' \w+ ', string = heading_rows[0])[0].strip() except IndexError: pass numbers_list = [re.findall(pattern='\d+', string=row) for row in heading_rows if 'cm' and 'kg' in row.lower()][0] height, weight, age = [int(numbers_list[i]) for i in range(3)] with open(name, 'rb') as file: encodings = chardet.detect(file.read())["encoding"] df = pd.read_csv(name,sep='\s+|;|,', engine='python', encoding=encodings, skiprows=1,comment='cm', index_col=None, names=list('ABCDEF')) df.to_csv(name+'.csv',index=False) A: Try: import re import pandas as pd text = """\ Person:?,?;F dob. ? MT: ? Z:C NewYork Mon.:S St.? 144 cm/35 Kg/5 YearsOld 45,34,22,26,0 78,74,82,11,0 """ pat = re.compile( r"(?sim)Z:C (\S+).*(\d+)\s*cm\D+(\d+)\s*kg\D+(\d+).*?((?:^[\d,]+\n)+)" ) m = pat.search(text) if m: city, height, weight, age, data = m.groups() all_data = [] for row in data.splitlines(): all_data.append( list(map(int, row.split(","))) + [height, weight, age, city] ) df = pd.DataFrame( all_data, columns=["A", "B", "C", "D", "E", "height", "weight", "age", "city"], ) print(df) Prints: A B C D E height weight age city 0 45 34 22 26 0 4 35 5 NewYork 1 78 74 82 11 0 4 35 5 NewYork
How can I read multiple text files and save them individually as a Pandas Dataframe?
I have multiple txt files and I would like to convert them to a dataframe by creating a new column using header. My data looks like: Person:?,?;F dob. ? MT: ? Z:C NewYork Mon.:S St.? 144 cm/35 Kg/5 YearsOld 45,34,22,26,0 78,74,82,11,0 I use the following code to create a dataframe out of a single text file. with open('file_directory', 'r') as f: heading_rows = [next(f) for _ in range(3)] city = re.findall(pattern = ' \w+ ', string = heading_rows[0])[0].strip() numbers_list = [re.findall(pattern='\d+', string=row) for row in heading_rows if 'cm' and 'kg' in row.lower()][0] height, weight, age = [int(numbers_list[i]) for i in range(3)] df = pd.read_csv('file_directory', sep='\s+|;|,', engine='python', skiprows=8,comment='cm', index_col=None, names=list('ABCDEF')) #df.dropna(inplace=True) df['HEIGHT'] = height df['WEIGHT'] = weight df['AGE'] = age df['CENTER'] = city I tried to put the code (above) in a for loop so that I can read all text files in the folder so that I can convert them into a Pandas dataframe individually and save as a csv file. lst = [] for name in glob.glob('my_directory/*'): with open(name, 'r') as f: heading_rows = [next(f) for _ in range(1)] lst.append(heading_rows) Bu, I end up with StopIteration error in next(f) aprt of my code. How can I obtain the following dataframe while reading multiple text files? Then I would like to save each file as CSV file. My expectation is to have the following dataframe type: A, B, C, D, E, height, weight, age, city 45,34,22,26,0, 144, 35, 5, NewYork 78,74,82,11,0, 144, 35, 5, NewYork
[ "You should use chardet which articulates encoding readings. Then add the read_Csv part in for loop.\nimport chardet\nfor name in glob.glob('file_directory/*'):\n with open(name, 'r') as f:\n heading_rows = [next(f) for _ in range(5)]\n #print(re.findall(pattern = ' \\w+ ', string = heading_rows[0])[0])\n\n# to escape errors\n try:\n city = re.findall(pattern = ' \\w+ ', string = heading_rows[0])[0].strip()\n except IndexError:\n pass\n\n numbers_list = [re.findall(pattern='\\d+', string=row) for row in heading_rows if 'cm' and 'kg' in row.lower()][0]\n\n height, weight, age = [int(numbers_list[i]) for i in range(3)]\n\n with open(name, 'rb') as file:\n encodings = chardet.detect(file.read())[\"encoding\"]\n df = pd.read_csv(name,sep='\\s+|;|,', engine='python', encoding=encodings, skiprows=1,comment='cm', index_col=None, names=list('ABCDEF'))\n\n\n df.to_csv(name+'.csv',index=False)\n\n", "Try:\nimport re\nimport pandas as pd\n\n\ntext = \"\"\"\\\nPerson:?,?;F dob. ? MT: ? Z:C NewYork Mon.:S St.?\n\n144 cm/35 Kg/5 YearsOld\n\n\n45,34,22,26,0\n78,74,82,11,0\n\"\"\"\n\npat = re.compile(\n r\"(?sim)Z:C (\\S+).*(\\d+)\\s*cm\\D+(\\d+)\\s*kg\\D+(\\d+).*?((?:^[\\d,]+\\n)+)\"\n)\n\nm = pat.search(text)\nif m:\n city, height, weight, age, data = m.groups()\n all_data = []\n for row in data.splitlines():\n all_data.append(\n list(map(int, row.split(\",\"))) + [height, weight, age, city]\n )\n\ndf = pd.DataFrame(\n all_data,\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\", \"height\", \"weight\", \"age\", \"city\"],\n)\nprint(df)\n\nPrints:\n A B C D E height weight age city\n0 45 34 22 26 0 4 35 5 NewYork\n1 78 74 82 11 0 4 35 5 NewYork\n\n" ]
[ 1, 0 ]
[]
[]
[ "pandas", "python", "python_re" ]
stackoverflow_0074574356_pandas_python_python_re.txt
Q: Python3 convert json one-line to multi-line format Who will help me with the code? I have a json file that looks like this: {"entries": [{"attributes": {"cn": ["John Doe"], "lastLogon": ["133137573913265630"], "sn": ["Doe"], "userAccountControl": ["4096"]},"dn": "CN=John Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local"}, {"attributes": {"cn": ["Jane Doe"], "lastLogon": [], "sn": ["Doe"], "userAccountControl": ["514"]}, "dn": "CN=Jane Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local"}]} which I create with the json module for dc in dcList: LDAP_HOST = dc['hostName'] def ldap_server(): return Server(LDAP_HOST, use_ssl=True, tls=tls_configuration, get_info=ALL_ATTRIBUTES) conn = ldap_connection() conn.search(LDAP_BASE_DN, LDAP_OBJECT_FILTER, attributes=user_attr_list) ### write data from addc to JSON file jsonFile = rootPath + dataPath + LDAP_HOST +"-"+ jsonUsersData data = json.loads(conn.response_to_json()) with open(jsonFile, 'w') as f: json.dump(data, f) I would like the file to look more readable, for example: { "entries": [ { "attributes": { "cn": ["John Doe"], "lastLogon": ["133137573913265630"], "sn": ["Doe"], "userAccountControl": ["4096"] }, "dn": "CN=John Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" }, { "attributes": { "cn": ["Jane Doe"], "lastLogon": [], "sn": ["Doe"], "userAccountControl": ["514"] }, "dn": "CN=Jane Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" } ] } and ideally, the file should be converted to the following format: "users": [ { "cn": ["John Doe"], "lastLogon": ["133137573913265630"], "sn": ["Doe"], "userAccountControl": ["4096"] "dn": "CN=John Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" }, { "cn": ["Jane Doe"], "lastLogon": [], "sn": ["Doe"], "userAccountControl": ["514"] "dn": "CN=Jane Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" } ] } A: You can use json.dump arguments like json.dump(data, indent=2). The second ("ideal") format is not a valid JSON, so it's (AFAIK) achievable only using some other string processing methods (if it's a typo, the JSON format might be valid, however it's not possible to change format using json.dump arguments and it will require few lines of Python code in order to change the json structure). EDIT: Also there is more possible using json.dump options, some sorting etc. See docs here. A: You can achieve this using below line of code json.dump(data, f,indent=4,sort_keys=True) A: you already have a few answers but here is the full python code import json json_string='SOME_JSON_HERE' try: parsed_json=json.loads(json_string) out=(json.dumps(parsed_json, indent=4,sort_keys=False)) print(out) except Exception as e: print(repr(e))
Python3 convert json one-line to multi-line format
Who will help me with the code? I have a json file that looks like this: {"entries": [{"attributes": {"cn": ["John Doe"], "lastLogon": ["133137573913265630"], "sn": ["Doe"], "userAccountControl": ["4096"]},"dn": "CN=John Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local"}, {"attributes": {"cn": ["Jane Doe"], "lastLogon": [], "sn": ["Doe"], "userAccountControl": ["514"]}, "dn": "CN=Jane Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local"}]} which I create with the json module for dc in dcList: LDAP_HOST = dc['hostName'] def ldap_server(): return Server(LDAP_HOST, use_ssl=True, tls=tls_configuration, get_info=ALL_ATTRIBUTES) conn = ldap_connection() conn.search(LDAP_BASE_DN, LDAP_OBJECT_FILTER, attributes=user_attr_list) ### write data from addc to JSON file jsonFile = rootPath + dataPath + LDAP_HOST +"-"+ jsonUsersData data = json.loads(conn.response_to_json()) with open(jsonFile, 'w') as f: json.dump(data, f) I would like the file to look more readable, for example: { "entries": [ { "attributes": { "cn": ["John Doe"], "lastLogon": ["133137573913265630"], "sn": ["Doe"], "userAccountControl": ["4096"] }, "dn": "CN=John Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" }, { "attributes": { "cn": ["Jane Doe"], "lastLogon": [], "sn": ["Doe"], "userAccountControl": ["514"] }, "dn": "CN=Jane Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" } ] } and ideally, the file should be converted to the following format: "users": [ { "cn": ["John Doe"], "lastLogon": ["133137573913265630"], "sn": ["Doe"], "userAccountControl": ["4096"] "dn": "CN=John Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" }, { "cn": ["Jane Doe"], "lastLogon": [], "sn": ["Doe"], "userAccountControl": ["514"] "dn": "CN=Jane Doe,OU=Users,OU=UNIVERSUM,DC=universum,DC=local" } ] }
[ "You can use json.dump arguments like json.dump(data, indent=2).\nThe second (\"ideal\") format is not a valid JSON, so it's (AFAIK) achievable only using some other string processing methods (if it's a typo, the JSON format might be valid, however it's not possible to change format using json.dump arguments and it will require few lines of Python code in order to change the json structure).\nEDIT: Also there is more possible using json.dump options, some sorting etc. See docs here.\n", "You can achieve this using below line of code\njson.dump(data, f,indent=4,sort_keys=True)\n", "you already have a few answers but here is the full python code\nimport json\n\njson_string='SOME_JSON_HERE'\n\ntry:\n parsed_json=json.loads(json_string)\n out=(json.dumps(parsed_json, indent=4,sort_keys=False))\n print(out)\nexcept Exception as e:\n print(repr(e))\n\n" ]
[ 2, 0, 0 ]
[]
[]
[ "json", "python", "python_3.x" ]
stackoverflow_0074600794_json_python_python_3.x.txt
Q: Plot nlargest is showing the inverse output I am trying to plot the feature importance generated using random forest algorithm using the below code. However, the largest values are shown at the bottom. But I want them to be at the top. feat_importances = pd.Series(g_search.best_estimator_.feature_importances_, index=X_train.columns) feat_importances.nlargest(20).plot(kind='barh') You can see the graph below that all large values are at the bottom. But, I want them to appear on top of the graph. Why is the output showing in reverse order? A: You can reverse your y-axis: plt.gca().invert_yaxis()
Plot nlargest is showing the inverse output
I am trying to plot the feature importance generated using random forest algorithm using the below code. However, the largest values are shown at the bottom. But I want them to be at the top. feat_importances = pd.Series(g_search.best_estimator_.feature_importances_, index=X_train.columns) feat_importances.nlargest(20).plot(kind='barh') You can see the graph below that all large values are at the bottom. But, I want them to appear on top of the graph. Why is the output showing in reverse order?
[ "You can reverse your y-axis:\nplt.gca().invert_yaxis()\n\n" ]
[ 1 ]
[]
[]
[ "machine_learning", "matplotlib", "pandas", "plot", "python" ]
stackoverflow_0074600993_machine_learning_matplotlib_pandas_plot_python.txt
Q: Flatten columns with lists in python I have a dataframe with some columns in lists and I would like to flatten these list columns. Below is my dataframe: df = pd.DataFrame({ 'col_1': ['abcd3', 'd4fs3'], 'col_2': ['vfce157', 'dfde28'], 'col_3': [['id_1','id_2'],['id_4','id_6','id_7']], 'col_4': [['p_1','p_2'],['p_3','p_5','p_0']], 'col_5': [['d_1','d_2'],['d_4','d_7','d_8']] }) df col_1 col_2 col_3 col_4 col_5 abcd3 vfce157 [id_1,id_2] [p_1,p_2] [d_1,d_2] d4fs3 dfde28 [id_4,id_6,id_7] [p_3,p_5,p_0] [d_4,d_7,d_8] The result expected: col_1 col_2 col_3 col_4 col_5 abcd3 vfce157 id_1 p_1 d_1 abcd3 vfce157 id_2 p_2 d_2 d4fs3 dfde28 id_4 p_3 d_4 d4fs3 dfde28 id_6 p_5 d_7 d4fs3 dfde28 id_7 p_0 d_8 Thank you for your help and time! A: You are looking for the explode Pandas method df.explode(['col3', 'col4', 'col5']) should do the trick
Flatten columns with lists in python
I have a dataframe with some columns in lists and I would like to flatten these list columns. Below is my dataframe: df = pd.DataFrame({ 'col_1': ['abcd3', 'd4fs3'], 'col_2': ['vfce157', 'dfde28'], 'col_3': [['id_1','id_2'],['id_4','id_6','id_7']], 'col_4': [['p_1','p_2'],['p_3','p_5','p_0']], 'col_5': [['d_1','d_2'],['d_4','d_7','d_8']] }) df col_1 col_2 col_3 col_4 col_5 abcd3 vfce157 [id_1,id_2] [p_1,p_2] [d_1,d_2] d4fs3 dfde28 [id_4,id_6,id_7] [p_3,p_5,p_0] [d_4,d_7,d_8] The result expected: col_1 col_2 col_3 col_4 col_5 abcd3 vfce157 id_1 p_1 d_1 abcd3 vfce157 id_2 p_2 d_2 d4fs3 dfde28 id_4 p_3 d_4 d4fs3 dfde28 id_6 p_5 d_7 d4fs3 dfde28 id_7 p_0 d_8 Thank you for your help and time!
[ "You are looking for the explode Pandas method\ndf.explode(['col3', 'col4', 'col5']) should do the trick\n" ]
[ 1 ]
[]
[]
[ "dataframe", "flatten", "functional_programming", "list", "python" ]
stackoverflow_0074600931_dataframe_flatten_functional_programming_list_python.txt
Q: How to reverse the elements in numpy.ndarray Python I have a numpy.ndarray in Python has the following elements e.g[[-0.85] [ 0.95]]. How can I reverse it so it can be [ [ 0.95][-0.85]]. Keep in mind that the length always two but for sure the values are changing. <class 'numpy.ndarray'> [[-0.85] [ 0.95]] A: numpy.flip() should do the job array = numpy.flip(array) returns [[ 0.95] [-0.85]] A: You can do this by using flip() function. import numpy as np l=[12,45,10,78,100] m=np.flip(l) print(m) Alternatively, you can also go this approach. m=l[::-1] print(m) You can find something informative here. Happy Coding!
How to reverse the elements in numpy.ndarray Python
I have a numpy.ndarray in Python has the following elements e.g[[-0.85] [ 0.95]]. How can I reverse it so it can be [ [ 0.95][-0.85]]. Keep in mind that the length always two but for sure the values are changing. <class 'numpy.ndarray'> [[-0.85] [ 0.95]]
[ "numpy.flip() should do the job\narray = numpy.flip(array)\n\nreturns\n[[ 0.95] [-0.85]]\n\n", "You can do this by using flip() function.\nimport numpy as np \nl=[12,45,10,78,100]\nm=np.flip(l)\nprint(m)\n\nAlternatively, you can also go this approach.\nm=l[::-1]\nprint(m)\n\nYou can find something informative here.\nHappy Coding!\n" ]
[ 0, 0 ]
[]
[]
[ "arrays", "list", "multidimensional_array", "python" ]
stackoverflow_0074600765_arrays_list_multidimensional_array_python.txt
Q: Warning messages from scapy Using this: from scapy.all import * I've got these two warnings which I want to remove Warning (from warnings module): File "C:\Users\localfp\AppData\Local\Programs\Python\Python310\lib\site-packages\scapy\layers\ipsec.py", line 471 cipher=algorithms.Blowfish, CryptographyDeprecationWarning: Blowfish has been deprecated Warning (from warnings module): File "C:\Users\localfp\AppData\Local\Programs\Python\Python310\lib\site-packages\scapy\layers\ipsec.py", line 485 cipher=algorithms.CAST5, CryptographyDeprecationWarning: CAST5 has been deprecated Unfortunately I've found the solution for this kind of error only for paramiko. I'm using this in order to sniff packets from an ethernet II connection. Is there a way to remove these two warnings? A: It worked using code like this (I'm using python 3): from warnings import filterwarnings filterwarnings("ignore") A: This is apparently fixed in https://github.com/secdev/scapy/pull/3645 and will be included in Scapy 2.5.0+ (use the github version in the meantime) A: A more general solution (if you only want to ignore the CryptographyDeprecationWarning) but keep the rest of the warnings: import warnings from cryptography.utils import CryptographyDeprecationWarning warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning) from scapy.all import *
Warning messages from scapy
Using this: from scapy.all import * I've got these two warnings which I want to remove Warning (from warnings module): File "C:\Users\localfp\AppData\Local\Programs\Python\Python310\lib\site-packages\scapy\layers\ipsec.py", line 471 cipher=algorithms.Blowfish, CryptographyDeprecationWarning: Blowfish has been deprecated Warning (from warnings module): File "C:\Users\localfp\AppData\Local\Programs\Python\Python310\lib\site-packages\scapy\layers\ipsec.py", line 485 cipher=algorithms.CAST5, CryptographyDeprecationWarning: CAST5 has been deprecated Unfortunately I've found the solution for this kind of error only for paramiko. I'm using this in order to sniff packets from an ethernet II connection. Is there a way to remove these two warnings?
[ "It worked using code like this (I'm using python 3):\nfrom warnings import filterwarnings\nfilterwarnings(\"ignore\")\n\n", "This is apparently fixed in https://github.com/secdev/scapy/pull/3645 and will be included in Scapy 2.5.0+ (use the github version in the meantime)\n", "A more general solution (if you only want to ignore the CryptographyDeprecationWarning) but keep the rest of the warnings:\nimport warnings\nfrom cryptography.utils import CryptographyDeprecationWarning\nwarnings.filterwarnings(\"ignore\", category=CryptographyDeprecationWarning)\n\nfrom scapy.all import *\n\n" ]
[ 2, 2, 1 ]
[]
[]
[ "python", "scapy", "warnings" ]
stackoverflow_0073075947_python_scapy_warnings.txt
Q: "No module named manage" error when trying to debug a Werkzeug Django app in VSCode As the title says. I have a Django 4.1 app, which uses Werkzeug to enable https. I have the following launch.json set up: { "version": "0.2.0", "configurations": [ { "name": "Python: Django", "type": "python", "request": "launch", "python": "${workspaceFolder}/venv/Scripts/python.exe", "program": "${workspaceFolder}\\appname\\manage.py", "args": [ "runserver_plus", "--cert-file", "${workspaceFolder}/certs/cert.pem", "--key-file", "${workspaceFolder}/certs/key.pem" ], "justMyCode": false, "django": true } ] } When I run this through the VSCode debugger it immediately quits in the get_wsgi_application() function with "No module named manage". I tried googling around, but no answer proved to be useful. Any ideas what I am doing wrong? A: This problem is only specific to VS Code's debugger and it is happening for wrong path in PYTHONPATH variable. Hence, this problem will not happen if you run it from shell. In your case, you need to add a new attribute named env in the launch.json configuration, which will add environment variable. In there you need to update the PYTHONPATH, because manage.py is not in the root folder of the project: "configurations": [ {"env": { "PYTHONPATH": "${workspaceRoot}\\appname" }, "name": "Python: Django", "type": "python", "request": "launch", "program": "${workspaceFolder}\\appname\\manage.py", "args": [ "runserver_plus" ], "django": true, "justMyCode": false } ] }
"No module named manage" error when trying to debug a Werkzeug Django app in VSCode
As the title says. I have a Django 4.1 app, which uses Werkzeug to enable https. I have the following launch.json set up: { "version": "0.2.0", "configurations": [ { "name": "Python: Django", "type": "python", "request": "launch", "python": "${workspaceFolder}/venv/Scripts/python.exe", "program": "${workspaceFolder}\\appname\\manage.py", "args": [ "runserver_plus", "--cert-file", "${workspaceFolder}/certs/cert.pem", "--key-file", "${workspaceFolder}/certs/key.pem" ], "justMyCode": false, "django": true } ] } When I run this through the VSCode debugger it immediately quits in the get_wsgi_application() function with "No module named manage". I tried googling around, but no answer proved to be useful. Any ideas what I am doing wrong?
[ "This problem is only specific to VS Code's debugger and it is happening for wrong path in PYTHONPATH variable. Hence, this problem will not happen if you run it from shell.\nIn your case, you need to add a new attribute named env in the launch.json configuration, which will add environment variable. In there you need to update the PYTHONPATH, because manage.py is not in the root folder of the project:\n\"configurations\": [\n {\"env\": {\n \"PYTHONPATH\": \"${workspaceRoot}\\\\appname\"\n },\n \"name\": \"Python: Django\",\n \"type\": \"python\",\n \"request\": \"launch\",\n \"program\": \"${workspaceFolder}\\\\appname\\\\manage.py\",\n \"args\": [\n \"runserver_plus\"\n ],\n \"django\": true,\n \"justMyCode\": false\n }\n ]\n}\n\n" ]
[ 3 ]
[ "try this\n{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"name\": \"Python: Django\",\n \"type\": \"python\",\n \"request\": \"launch\",\n \"python\": \"${workspaceFolder}/venv/Scripts/python.exe\",\n \"program\": \"${workspaceFolder}/manage.py\",\n \"args\": [\n \"runserver\",\n ],\n \"justMyCode\": false,\n \"django\": true\n }\n ]\n}\n\n", "manage.py is located in the base directory of the project.\nSo\n \"program\": \"${workspaceFolder}\\\\manage.py\",\n\n" ]
[ -1, -1 ]
[ "django", "python", "visual_studio_code", "vscode_debugger", "werkzeug" ]
stackoverflow_0074503488_django_python_visual_studio_code_vscode_debugger_werkzeug.txt
Q: replace column names from one df with the rows from the other df df1: word merged green positive_green green energy positive_green_energy jets negative_jets green hydrogen positive_green_hydrogen renewable energy positive_renewable_energy df2: column1 column2 green green energy jets green hydrogen renewable energy xx xx xx xx xx xx xx I would like to replace columns' names in the df2 to the ones from df1 (rows from df1 matching columns with df2 and replace) A: Use DataFrame.rename with dictionary: df2 = df2.rename(columns=dict(zip(df1.word, df1.merged)))
replace column names from one df with the rows from the other df
df1: word merged green positive_green green energy positive_green_energy jets negative_jets green hydrogen positive_green_hydrogen renewable energy positive_renewable_energy df2: column1 column2 green green energy jets green hydrogen renewable energy xx xx xx xx xx xx xx I would like to replace columns' names in the df2 to the ones from df1 (rows from df1 matching columns with df2 and replace)
[ "Use DataFrame.rename with dictionary:\ndf2 = df2.rename(columns=dict(zip(df1.word, df1.merged)))\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074601269_dataframe_pandas_python.txt
Q: How to run Django channels with StreamingHttpResponse in ASGI I have a simple app that streams images using open cv and the server set in wsgi. But whenever I introduce Django channels to the picture and change from WSGI to ASGI the streaming stops. How can I stream images from cv2 and in the same time use Django channels? Thanks you in advance My code for streaming: def camera_feed(request): stream = CameraStream() frames = stream.get_frames() return StreamingHttpResponse(frames, content_type='multipart/x-mixed-replace; boundary=frame') settings.py: ASGI_APPLICATION = 'photon.asgi.application' asgi.py application = ProtocolTypeRouter({ 'http': get_asgi_application(), 'websocket': AuthMiddlewareStack(URLRouter(ws_urlpatterns)) }) A: First, we don't need StramingHTTPResponse for sending image data at all ... For this, first, ensure you have a Django version with 3.x and Python 3.7+. Then, install django-channels third party package. Configure the ASGI application as follows: import os from channels.auth import AuthMiddlewareStack from channels.routing import ProtocolTypeRouter, URLRouter from django.core.asgi import get_asgi_application import .routing os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myproject.settings') application = ProtocolTypeRouter({ "http": get_asgi_application(), "websocket": AuthMiddlewareStack( URLRouter( app.routing.websocket_urlpatterns ) ) }) Then You need to set ASGI_APPLICATION constant in the settings.py file: ASGI_APPLICATION = "myproject.asgi.application" After that, just create an async WebSocket consumer in the consumers.py file present in the application : import json from channels.generic.websocket import AsyncWebsocketConsumer class PairingChat(AsyncWebsocketConsumer): async def connect(self): self.room_name = self.scope['url_route']['kwargs']['room_name'] self.room_group_name = 'chat_%s' % self.room_name await self.channel_layer.group_add( self.room_group_name, self.channel_name ) await self.accept() async def disconnect(self): await self.channel_layer.group_discard( self.room_group_name, self.channel_name ) # Asyncwebsocket consumer can send any type of data ... async def receive(self, text_data): data_json = json.loads(your_data) message = data_json['message'] await self.channel_layer.group_send( self.room_group_name, { 'type': '# send your data from here ...', 'message': message, 'user': self.scope['session']['name'] } ) async def chat_message(self, event): message = event['message'] await self.send(data=json.dumps({ 'user': event['user'], 'message': message, })) Create a route for asyncwebsocket consumers as well ... from django.urls import re_path from . import consumers websocket_urlpatterns = [ re_path(r'ws/chat1/(?P<room_name>\w+)/$', consumers.PairingChat.as_asgi()), ] Then, just create a WebSocket client in javascript ... and you are good to go ... Link for JS Websocket Create : javascript-websocket
How to run Django channels with StreamingHttpResponse in ASGI
I have a simple app that streams images using open cv and the server set in wsgi. But whenever I introduce Django channels to the picture and change from WSGI to ASGI the streaming stops. How can I stream images from cv2 and in the same time use Django channels? Thanks you in advance My code for streaming: def camera_feed(request): stream = CameraStream() frames = stream.get_frames() return StreamingHttpResponse(frames, content_type='multipart/x-mixed-replace; boundary=frame') settings.py: ASGI_APPLICATION = 'photon.asgi.application' asgi.py application = ProtocolTypeRouter({ 'http': get_asgi_application(), 'websocket': AuthMiddlewareStack(URLRouter(ws_urlpatterns)) })
[ "First, we don't need StramingHTTPResponse for sending image data at all ...\nFor this, first, ensure you have a Django version with 3.x and Python 3.7+.\nThen, install django-channels third party package.\nConfigure the ASGI application as follows:\nimport os\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\nfrom django.core.asgi import get_asgi_application\nimport .routing\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myproject.settings')\n\napplication = ProtocolTypeRouter({\n \"http\": get_asgi_application(),\n \"websocket\": AuthMiddlewareStack(\n URLRouter(\n app.routing.websocket_urlpatterns\n )\n )\n})\n\nThen You need to set ASGI_APPLICATION constant in the settings.py file:\nASGI_APPLICATION = \"myproject.asgi.application\"\n\nAfter that, just create an async WebSocket consumer in the consumers.py file present in the application :\nimport json\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\n\nclass PairingChat(AsyncWebsocketConsumer):\n\n async def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = 'chat_%s' % self.room_name\n\n await self.channel_layer.group_add(\n self.room_group_name,\n self.channel_name\n )\n \n await self.accept()\n\n async def disconnect(self):\n\n await self.channel_layer.group_discard(\n self.room_group_name,\n self.channel_name\n )\n\n\n # Asyncwebsocket consumer can send any type of data ...\n\n async def receive(self, text_data):\n data_json = json.loads(your_data)\n message = data_json['message']\n\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': '# send your data from here ...',\n 'message': message,\n 'user': self.scope['session']['name']\n }\n )\n\n\n async def chat_message(self, event):\n message = event['message']\n\n await self.send(data=json.dumps({\n 'user': event['user'],\n 'message': message,\n }))\n\nCreate a route for asyncwebsocket consumers as well ...\nfrom django.urls import re_path\nfrom . import consumers\n\nwebsocket_urlpatterns = [\n re_path(r'ws/chat1/(?P<room_name>\\w+)/$', consumers.PairingChat.as_asgi()),\n]\n\nThen, just create a WebSocket client in javascript ... and you are good to go ...\nLink for JS Websocket Create : javascript-websocket\n" ]
[ 0 ]
[]
[]
[ "django", "django_channels", "opencv", "python" ]
stackoverflow_0067876456_django_django_channels_opencv_python.txt
Q: Command PhaseScriptExecution failed with a nonzero exit code[How to solve?] Build Log sent 2291848470 bytes received 1463928 bytes 8509507.97 bytes/sec total size is 10369715881 speedup is 4.52 rsync warning: some files vanished before they could be transferred (code 24) at /AppleInternal/Library/BuildRoots/810eba08-405a-11ed-86e9-6af958a02716/Library/Caches/com.apple.xbs/Sources/rsync/rsync/main.c(996) [sender=2.6.9] Command PhaseScriptExecution failed with a nonzero exit code I have try different ways to solve it but it seems doesn't work for me. For example, Solution 1 Open the Xcode project folder in your Terminal app. Enter and execute the following command: pod deintegrate Execute this command: pod install Re-open Xcode > go to Product > Clean Build Folder. Run your app again. Solution #2 Head over to Keychain Access. Select Lock & unlock again from the login option. Open Xcode > Clean Xcode Project > Run your build again. Do anyone have other solution to solve it? Thanks! A: I finally find the solution. I think this is because the "build" and "dist" directories that are created in the current working directory by the "toolchain build" process get recursively included in the project created by "toolchain create", which in turn confuses rsync when it's copying files around. So, for me, the key was to do the "toolchain build" outside my application source directory tree. This solved it: https://github.com/kivy/kivy-ios/issues/513
Command PhaseScriptExecution failed with a nonzero exit code[How to solve?]
Build Log sent 2291848470 bytes received 1463928 bytes 8509507.97 bytes/sec total size is 10369715881 speedup is 4.52 rsync warning: some files vanished before they could be transferred (code 24) at /AppleInternal/Library/BuildRoots/810eba08-405a-11ed-86e9-6af958a02716/Library/Caches/com.apple.xbs/Sources/rsync/rsync/main.c(996) [sender=2.6.9] Command PhaseScriptExecution failed with a nonzero exit code I have try different ways to solve it but it seems doesn't work for me. For example, Solution 1 Open the Xcode project folder in your Terminal app. Enter and execute the following command: pod deintegrate Execute this command: pod install Re-open Xcode > go to Product > Clean Build Folder. Run your app again. Solution #2 Head over to Keychain Access. Select Lock & unlock again from the login option. Open Xcode > Clean Xcode Project > Run your build again. Do anyone have other solution to solve it? Thanks!
[ "I finally find the solution.\nI think this is because the \"build\" and \"dist\" directories that are created in the current working directory by the \"toolchain build\" process get recursively included in the project created by \"toolchain create\", which in turn confuses rsync when it's copying files around.\nSo, for me, the key was to do the \"toolchain build\" outside my application source directory tree.\nThis solved it: https://github.com/kivy/kivy-ios/issues/513\n" ]
[ 0 ]
[]
[]
[ "kivy", "python", "xcode" ]
stackoverflow_0074599541_kivy_python_xcode.txt
Q: compare rows in 2d list and store the unique row having same elements in a row in another list I have a 2D list from which I am trying to extract the unique rows example: list = [['16', 'jun', 'jun', '18'], ['jun', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] should return desired_list = [['16', 'jun', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] explanation: So, if we compare row 1 with row 2 in list we see the items inside the two rows is same hence, I will take one of the row and store in desired_list row 3 and 4 in list are exactly same therefore, I'll store any one row in desired_list. row 5 is totally unique therefore, I'll add in desired_list. My only target is to remove duplicate value rows(even if items inside rows have different order) and only store the unique rows. print('LP:',lp, "\n") l=[] for i in range(len(lp)): for j in range(i+1, len(lp)): k=i print(set(lp[j]) == set(lp[k]), lp[j] not in l, lp[j], lp[k],l) if set(lp[j]) != set(lp[k]): if lp[j] not in l: l.append(lp[j]) print('\n', l) I am only half successful in achieving this. Below I am attaching the screenshot of the output so far: A: #input in_list = [['16', 'jun', 'jun', '18'], ['jun', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] #output np.array(in_list)[np.sort(np.unique(np.sort(in_list), axis=0, return_index=True)[1])].tolist() Explanation: np.sort the rows of in_list in order to find the rows having the same information; take the index from np.unique to find the indices of unique (sorted) rows; np.sort the indices to hold the starting order in in_list; slice the list (after converting it in np.array) with the sorted indices to only retain the unique rows; converting the result in a list (.tolist()) A: Pure pythonic solution: res = [] for row in your_list: sorted_row = sorted(row) if sorted_row not in [sorted(x) for x in res]: res.append(row) print(res) >>> [['16', 'jun', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] A: Here is the fixed code: lp = [['16', 'jun', 'jun', '18'], ['jun', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] print('LP:',lp, "\n") l=[] count = 0 for i in lp: for j in lp[count+1:]: if set(j) != set(i): if i not in l: l.append(i) count += 1 print(l) I would advise against using this strategy as your algorithm is O(n^2) because it needs to run through every element in your list for every element in your list aka x*x times. I would use this code: def remove_values_from_list(the_list, val): return [value for value in the_list if sorted(value) != sorted(val)] lp = [['16', 'jun', 'jun', '18'], ['jun', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] print('LP:',lp, "\n") l=[] count = 0 oof = 5 result_list = [] while True: if len(lp) == 1: break element = lp[1] lp = [element] + remove_values_from_list(lp[count+1:], element) result_list.append(element) print("Result") print(result_list) This code is a lot more efficient because it removes every instance of the current element in the rest of the list so it does not need to compare those copies anymore in future cases. Also that code also works if there are duplicate elements in your lists. When using set() you can not have duplicate elements because of the way sets work internally in python. Here is a simple script to test the speed increase: (it also checks the validity of the solutions by doing a manual O(n^2) check, but that is of course not part of the main program) import random import colorama from colorama import Fore, Style import time def generate_list_of_1000_integers(n_min, n_max, len): return [random.randint(n_min, n_max) for _ in range(len)] def remove_values_from_list(the_list, val): #return [value for value in the_list if set(value) != set(val)] return [value for value in the_list if sorted(value) != sorted(val)] def solution(lp): l=[] count = 0 for i in lp: for j in lp[count+1:]: if set(j) != set(i): if i not in l: l.append(i) count += 1 return l def anothersolution(lp): count = 0 result_list = [] while True: if len(lp) == 1: break element = lp[1] lp = [element] + remove_values_from_list(lp[count+1:], element) result_list.append(element) return result_list if __name__=="__main__": where_to_pick_from = [generate_list_of_1000_integers(0,100,10) for _ in range(100)] complete_list = [] how_many = int(2000) for _ in range(how_many): list_thing = random.choice(where_to_pick_from) random.shuffle(list_thing) complete_list.append(list_thing) print(complete_list) start = time.time() sol = solution(complete_list) print("First solution took "+str(time.time() - start)+str(" seconds.")) start = time.time() sol2 = anothersolution(complete_list) print("Second solution took "+str(time.time() - start)+str(" seconds.")) for j in range(len(sol)): for i in range(len(sol)): if set(sol[i]) == set(sol[j]) and j != i: print(Fore.RED+"[-] Solution 1 invalid.") exit(-1) for j in range(len(sol2)): for i in range(len(sol2)): if set(sol2[i]) == set(sol2[j]) and j != i: print(Fore.RED+"[-] Solution 2 invalid.") exit(-1) print(Fore.BLUE+str("[+] Solutions valid!")) Edit: I am also a beginner programmer myself so constructive criticism is welcome.
compare rows in 2d list and store the unique row having same elements in a row in another list
I have a 2D list from which I am trying to extract the unique rows example: list = [['16', 'jun', 'jun', '18'], ['jun', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] should return desired_list = [['16', 'jun', 'jun', '18'], ['aug', '16', 'jun', '18'], ['sep', '17', 'mar', '18']] explanation: So, if we compare row 1 with row 2 in list we see the items inside the two rows is same hence, I will take one of the row and store in desired_list row 3 and 4 in list are exactly same therefore, I'll store any one row in desired_list. row 5 is totally unique therefore, I'll add in desired_list. My only target is to remove duplicate value rows(even if items inside rows have different order) and only store the unique rows. print('LP:',lp, "\n") l=[] for i in range(len(lp)): for j in range(i+1, len(lp)): k=i print(set(lp[j]) == set(lp[k]), lp[j] not in l, lp[j], lp[k],l) if set(lp[j]) != set(lp[k]): if lp[j] not in l: l.append(lp[j]) print('\n', l) I am only half successful in achieving this. Below I am attaching the screenshot of the output so far:
[ "#input\nin_list = [['16', 'jun', 'jun', '18'],\n ['jun', '16', 'jun', '18'],\n ['aug', '16', 'jun', '18'],\n ['aug', '16', 'jun', '18'],\n ['sep', '17', 'mar', '18']]\n\n#output\nnp.array(in_list)[np.sort(np.unique(np.sort(in_list), axis=0, return_index=True)[1])].tolist()\n\nExplanation:\n\nnp.sort the rows of in_list in order to find the rows having the same information;\ntake the index from np.unique to find the indices of unique (sorted) rows;\nnp.sort the indices to hold the starting order in in_list;\nslice the list (after converting it in np.array) with the sorted indices to only retain the unique rows;\nconverting the result in a list (.tolist())\n\n", "Pure pythonic solution:\nres = []\nfor row in your_list:\n sorted_row = sorted(row)\n if sorted_row not in [sorted(x) for x in res]:\n res.append(row)\n\nprint(res)\n\n>>> [['16', 'jun', 'jun', '18'],\n ['aug', '16', 'jun', '18'],\n ['sep', '17', 'mar', '18']]\n\n", "Here is the fixed code:\nlp = [['16', 'jun', 'jun', '18'],\n ['jun', '16', 'jun', '18'],\n ['aug', '16', 'jun', '18'],\n ['aug', '16', 'jun', '18'],\n ['sep', '17', 'mar', '18']]\n\n\nprint('LP:',lp, \"\\n\")\nl=[]\ncount = 0\nfor i in lp:\n for j in lp[count+1:]:\n if set(j) != set(i):\n if i not in l:\n l.append(i)\n count += 1\nprint(l)\n\nI would advise against using this strategy as your algorithm is O(n^2) because it needs to run through every element in your list for every element in your list aka x*x times. I would use this code:\ndef remove_values_from_list(the_list, val):\n \n return [value for value in the_list if sorted(value) != sorted(val)]\n\nlp = [['16', 'jun', 'jun', '18'],\n ['jun', '16', 'jun', '18'],\n ['aug', '16', 'jun', '18'],\n ['aug', '16', 'jun', '18'],\n ['sep', '17', 'mar', '18']]\n\n\nprint('LP:',lp, \"\\n\")\nl=[]\ncount = 0\noof = 5\nresult_list = []\nwhile True:\n \n if len(lp) == 1:\n break\n\n element = lp[1]\n \n lp = [element] + remove_values_from_list(lp[count+1:], element)\n result_list.append(element)\n \n\n \nprint(\"Result\")\nprint(result_list)\n\nThis code is a lot more efficient because it removes every instance of the current element in the rest of the list so it does not need to compare those copies anymore in future cases. Also that code also works if there are duplicate elements in your lists. When using set() you can not have duplicate elements because of the way sets work internally in python.\nHere is a simple script to test the speed increase: (it also checks the validity of the solutions by doing a manual O(n^2) check, but that is of course not part of the main program)\nimport random\nimport colorama\nfrom colorama import Fore, Style\nimport time\n\n\ndef generate_list_of_1000_integers(n_min, n_max, len):\n return [random.randint(n_min, n_max) for _ in range(len)]\n\ndef remove_values_from_list(the_list, val):\n #return [value for value in the_list if set(value) != set(val)]\n return [value for value in the_list if sorted(value) != sorted(val)]\ndef solution(lp):\n l=[]\n count = 0\n for i in lp:\n for j in lp[count+1:]:\n if set(j) != set(i):\n if i not in l:\n l.append(i)\n count += 1\n return l\n\ndef anothersolution(lp):\n \n count = 0\n result_list = []\n while True:\n \n if len(lp) == 1:\n break\n\n \n element = lp[1]\n \n lp = [element] + remove_values_from_list(lp[count+1:], element)\n result_list.append(element)\n \n return result_list\n\nif __name__==\"__main__\":\n where_to_pick_from = [generate_list_of_1000_integers(0,100,10) for _ in range(100)]\n\n complete_list = []\n\n how_many = int(2000)\n\n for _ in range(how_many):\n list_thing = random.choice(where_to_pick_from)\n random.shuffle(list_thing)\n complete_list.append(list_thing)\n\n\n print(complete_list)\n start = time.time()\n sol = solution(complete_list)\n print(\"First solution took \"+str(time.time() - start)+str(\" seconds.\"))\n start = time.time()\n sol2 = anothersolution(complete_list)\n print(\"Second solution took \"+str(time.time() - start)+str(\" seconds.\"))\n\n\n \n\n\n for j in range(len(sol)):\n for i in range(len(sol)):\n if set(sol[i]) == set(sol[j]) and j != i:\n print(Fore.RED+\"[-] Solution 1 invalid.\")\n exit(-1)\n for j in range(len(sol2)):\n for i in range(len(sol2)):\n if set(sol2[i]) == set(sol2[j]) and j != i:\n print(Fore.RED+\"[-] Solution 2 invalid.\")\n exit(-1)\n\n print(Fore.BLUE+str(\"[+] Solutions valid!\"))\n\nEdit: I am also a beginner programmer myself so constructive criticism is welcome.\n" ]
[ 0, 0, 0 ]
[]
[]
[ "arrays", "list", "matrix", "multidimensional_array", "python" ]
stackoverflow_0074599482_arrays_list_matrix_multidimensional_array_python.txt
Q: Add numbers to tablewidget I have tablewidget named tableSum, it has 1 col and 5 rows, and 1D array of numbers(float) Sum_main. How to print in table this array? Tried this, but its not working: for n in range(5): self.ui.table_Sum.setItem(n, 0, QTableWidget(Sum_main[row][0])) A: if you are looping through n then the iterable parameter should be n and not row... basically one or the other. so change row to n (or n to row) if that is what you intended to do. for n in range(5): self.ui.table_Sum.setItem(n, 0, QTableWidget(Sum_main[n][0]))
Add numbers to tablewidget
I have tablewidget named tableSum, it has 1 col and 5 rows, and 1D array of numbers(float) Sum_main. How to print in table this array? Tried this, but its not working: for n in range(5): self.ui.table_Sum.setItem(n, 0, QTableWidget(Sum_main[row][0]))
[ "if you are looping through n then the iterable parameter should be n and not row... basically one or the other.\nso change row to n (or n to row) if that is what you intended to do.\nfor n in range(5):\n self.ui.table_Sum.setItem(n, 0, QTableWidget(Sum_main[n][0])) \n\n\n" ]
[ 0 ]
[]
[]
[ "python", "qt" ]
stackoverflow_0074600132_python_qt.txt
Q: How to set API access token to environment variable in python for Smartsheet API? I ran the repository for python-read-write-sheet by smartsheet sample in VisualStudioCode and had came across a message on the terminal. I had installed the SDK required in the virtual environment (.venv) before running the code. In Line 49, the initialize client uses the API token in the environment variable "SMARTSHEET_ACCESS_TOKEN" smart = smartsheet.Smartsheet() However, the terminal raised the error on the exact message where ValueError('Access Token must be set in the environment ' ValueError: Access Token must be set in the environment or passed to smartsheet.Smartsheet() as a parameter. The python script which I am using is python-read-write-sheet.py https://github.com/smartsheet-samples/python-read-write-sheet I had read the repository and I think that generating the access token could be the solution, I believe this is stated under the "Configure" section in the repo. https://github.com/smartsheet-samples/python-read-write-sheet A: The access token is what tells Smartsheet what user (account) to use to execute the API calls. Any time you're writing code that calls an API (which requires authentication), that code needs to specify the access token corresponding to the user (account) that should be used to run the API calls. First, if you haven't already done so, you'll need to login to Smartsheet (https://app.smartsheet.com) using the login credentials of the account you want to use for API calls and generate an Access Token (i.e., API Key). You can find instructions for how to do this here: https://help.smartsheet.com/articles/2482389-generate-API-key. Next, once you have generated an access token via the Smartsheet app, you need to store it in the SMARTSHEET_ACCESS_TOKEN environment variable in your code. To do this: Add this import statement to the top of your code: import os Add the following code to set the SMARTSHEET_ACCESS_TOKEN environment variable to your access token. (In the following example code, replace abc123 with your access token value.) NOTE: This line of code needs to be place before the line of code that attempts to read the value of that environment variable (smart = smartsheet.Smartsheet()). # Specify access token os.environ['SMARTSHEET_ACCESS_TOKEN'] = 'abc123'
How to set API access token to environment variable in python for Smartsheet API?
I ran the repository for python-read-write-sheet by smartsheet sample in VisualStudioCode and had came across a message on the terminal. I had installed the SDK required in the virtual environment (.venv) before running the code. In Line 49, the initialize client uses the API token in the environment variable "SMARTSHEET_ACCESS_TOKEN" smart = smartsheet.Smartsheet() However, the terminal raised the error on the exact message where ValueError('Access Token must be set in the environment ' ValueError: Access Token must be set in the environment or passed to smartsheet.Smartsheet() as a parameter. The python script which I am using is python-read-write-sheet.py https://github.com/smartsheet-samples/python-read-write-sheet I had read the repository and I think that generating the access token could be the solution, I believe this is stated under the "Configure" section in the repo. https://github.com/smartsheet-samples/python-read-write-sheet
[ "The access token is what tells Smartsheet what user (account) to use to execute the API calls. Any time you're writing code that calls an API (which requires authentication), that code needs to specify the access token corresponding to the user (account) that should be used to run the API calls.\nFirst, if you haven't already done so, you'll need to login to Smartsheet (https://app.smartsheet.com) using the login credentials of the account you want to use for API calls and generate an Access Token (i.e., API Key). You can find instructions for how to do this here: https://help.smartsheet.com/articles/2482389-generate-API-key.\nNext, once you have generated an access token via the Smartsheet app, you need to store it in the SMARTSHEET_ACCESS_TOKEN environment variable in your code. To do this:\n\nAdd this import statement to the top of your code:\n\nimport os\n\n\nAdd the following code to set the SMARTSHEET_ACCESS_TOKEN environment variable to your access token. (In the following example code, replace abc123 with your access token value.) NOTE: This line of code needs to be place before the line of code that attempts to read the value of that environment variable (smart = smartsheet.Smartsheet()).\n\n# Specify access token\nos.environ['SMARTSHEET_ACCESS_TOKEN'] = 'abc123'\n\n" ]
[ 0 ]
[]
[]
[ "api", "environment_variables", "python", "smartsheet_api", "token" ]
stackoverflow_0074596995_api_environment_variables_python_smartsheet_api_token.txt
Q: youtube-dl option for getting video titles and NOT downloading videos I want to get video titles from a video list. --flat-playlist option returns video id's, and I can't find an options that returns video titles. youtube-dl --flat-playlist "https://app.pluralsight.com/library/courses/openid-and-oauth2-securing-angular-apps" [pluralsight:course] openid-and-oauth2-securing-angular-apps: Downloading JSON metadata [download] Downloading playlist: Securing Angular Apps with OpenID Connect and OAuth 2 [pluralsight:course] playlist Securing Angular Apps with OpenID Connect and OAuth 2: Collected 58 video ids (downloading 58 of them) [download] Downloading video 1 of 58 [download] Downloading video 2 of 58 [download] Downloading video 3 of 58 ... --get-filename option and --get-title option, prints the name just for the first video, then they throw ExtractorError('No video formats found'): youtube-dl --get-title "https://app.pluralsight.com/library/courses/openid-and-oauth2-securing-angular-apps" --verbose A: That is indeed the way to pull a list of videos without downloading them, the issue with your query is that the URL you are using requires you to be signed in (and youtube-dl doesn't have access to your credentials) Both this youtube-dl https://www.youtube.com/@Wondrium/videos -e and this youtube-dl https://www.youtube.com/@Wondrium/videos --get-title works. For example:
youtube-dl option for getting video titles and NOT downloading videos
I want to get video titles from a video list. --flat-playlist option returns video id's, and I can't find an options that returns video titles. youtube-dl --flat-playlist "https://app.pluralsight.com/library/courses/openid-and-oauth2-securing-angular-apps" [pluralsight:course] openid-and-oauth2-securing-angular-apps: Downloading JSON metadata [download] Downloading playlist: Securing Angular Apps with OpenID Connect and OAuth 2 [pluralsight:course] playlist Securing Angular Apps with OpenID Connect and OAuth 2: Collected 58 video ids (downloading 58 of them) [download] Downloading video 1 of 58 [download] Downloading video 2 of 58 [download] Downloading video 3 of 58 ... --get-filename option and --get-title option, prints the name just for the first video, then they throw ExtractorError('No video formats found'): youtube-dl --get-title "https://app.pluralsight.com/library/courses/openid-and-oauth2-securing-angular-apps" --verbose
[ "That is indeed the way to pull a list of videos without downloading them, the issue with your query is that the URL you are using requires you to be signed in (and youtube-dl doesn't have access to your credentials)\nBoth this\n youtube-dl https://www.youtube.com/@Wondrium/videos -e\n\nand this\n youtube-dl https://www.youtube.com/@Wondrium/videos --get-title\n\nworks.\nFor example:\n\n" ]
[ 1 ]
[]
[]
[ "python", "youtube_dl" ]
stackoverflow_0059126649_python_youtube_dl.txt
Q: Why MinMaxScaler return all zeros in streamlit? I am trying to make an app using streamlit. Inside the script there is a preprocessing of MinMaxScaler using scikitlearn. But, after the transformation it return all the values with zero. Whats wrong with my code? Here is some of the script : contract = ['Proyek dibawah 100M','Proyek 100M-150M','Proyek 150M-500M','Proyek diatas 500M'] project_contract = st.selectbox("Select your project contract", contract) input_spec = pd.DataFrame(columns=['FC','SL','FA'], data=[[FC, SL, FA]]) input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA']) if area == 'Jakarta': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[1,0,0,0,0,0]]) elif area == 'Jawa': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,1,0,0,0,0]]) elif area == 'Kalimantan': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,1,0,0,0]]) elif area == 'Papua': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,1,0,0]]) elif area == 'Sulawesi': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,0,1,0]]) elif area == 'Sumatera': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,0,0,1]]) elif area == 'Bali & Nusa Tenggara': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,0,0,0]]) if project_contract == 'Proyek dibawah 100M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[0,0,1]]) elif project_contract == 'Proyek 150M-500M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[1,0,0]]) elif project_contract == 'Proyek diatas 500M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[0,1,0]]) elif project_contract == 'Proyek 100M-150M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[0,0,0]]) for i in input_area.columns: input_area[i] = input_area[i].astype('float') for j in input_project.columns: input_project[j] = input_project[j].astype('float') input_submit = pd.concat([input_spec, input_area, input_project], axis=1) st.dataframe(input_submit) scaler = MinMaxScaler() input_submit_scaled = pd.DataFrame(scaler.fit_transform(input_submit.values), columns=input_submit.columns) st.dataframe(input_submit_scaled) The input_submit dataframe The input_submit_scaled dataframe A: Your issue has nothing to do with streamlit but on scaler. When you instantiate the MinMaxScaler() with: scaler = MinMaxScaler() Use this scaler to fit the training data. When you have a test sample, use again this scaler to transform it. But do not fit. Here is a demo. Code def demo(): train_data = [[5.0, 16.0, 7.0, 4.0, 1.0, 0.0, 0.0, 0.0], [8.0, 7.0, 8.0, 1.0, 1.0, 0.0, 0.0, 0.0], [5.0, 9.0, 9.0, 0.0, 1.0, 0.0, 1.0, 1.0]] test_data = [[25.0, 12.0, 15.0, 0.0, 1.0, 0.0, 0.0, 0.0]] # input_submit.values # Scale, fit and transform the train data. min_max_scaler = MinMaxScaler() train_data_minmax = min_max_scaler.fit_transform(train_data) # Save the scaler to disk. import pickle # scaler_fn = 'scaler_project.pkl' # with open(scaler_fn, 'wb') as handle: # pickle.dump(min_max_scaler, handle) # Scale new data using scaler from distk. # with open(scaler_fn, 'rb') as handle: # loaded_scaler = pickle.load(handle) # test_data_loaded_minmax = loaded_scaler.transform(test_data) # Scale the single test or input data. test_data_minmax = min_max_scaler.transform(test_data) print(f'train_data:\n{train_data}') print(f'train_data_minmax:\n{train_data_minmax}\n') print(f'test_data:\n{test_data}') print(f'test_data_minmax:\n{test_data_minmax}') demo() Output train_data: [[5.0, 16.0, 7.0, 4.0, 1.0, 0.0, 0.0, 0.0], [8.0, 7.0, 8.0, 1.0, 1.0, 0.0, 0.0, 0.0], [5.0, 9.0, 9.0, 0.0, 1.0, 0.0, 1.0, 1.0]] train_data_minmax: [[0. 1. 0. 1. 0. 0. 0. 0. ] [1. 0. 0.5 0.25 0. 0. 0. 0. ] [0. 0.22222222 1. 0. 0. 0. 1. 1. ]] test_data: [[25.0, 12.0, 15.0, 0.0, 1.0, 0.0, 0.0, 0.0]] test_data_minmax: [[6.66666667 0.55555556 4. 0. 0. 0. 0. 0. ]] Not all scaled input data values are zero. Reference https://scikit-learn.org/stable/modules/preprocessing.html#scaling-features-to-a-range
Why MinMaxScaler return all zeros in streamlit?
I am trying to make an app using streamlit. Inside the script there is a preprocessing of MinMaxScaler using scikitlearn. But, after the transformation it return all the values with zero. Whats wrong with my code? Here is some of the script : contract = ['Proyek dibawah 100M','Proyek 100M-150M','Proyek 150M-500M','Proyek diatas 500M'] project_contract = st.selectbox("Select your project contract", contract) input_spec = pd.DataFrame(columns=['FC','SL','FA'], data=[[FC, SL, FA]]) input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA']) if area == 'Jakarta': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[1,0,0,0,0,0]]) elif area == 'Jawa': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,1,0,0,0,0]]) elif area == 'Kalimantan': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,1,0,0,0]]) elif area == 'Papua': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,1,0,0]]) elif area == 'Sulawesi': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,0,1,0]]) elif area == 'Sumatera': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,0,0,1]]) elif area == 'Bali & Nusa Tenggara': input_area = pd.DataFrame(columns=['area_JAKARTA', 'area_JAWA', 'area_KALIMANTAN', 'area_PAPUA', 'area_SULAWESI', 'area_SUMATERA'], data=[[0,0,0,0,0,0]]) if project_contract == 'Proyek dibawah 100M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[0,0,1]]) elif project_contract == 'Proyek 150M-500M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[1,0,0]]) elif project_contract == 'Proyek diatas 500M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[0,1,0]]) elif project_contract == 'Proyek 100M-150M': input_project = pd.DataFrame(columns=['project_contract_150M-500M', 'project_contract_above 500M', 'project_contract_below 100M'], data =[[0,0,0]]) for i in input_area.columns: input_area[i] = input_area[i].astype('float') for j in input_project.columns: input_project[j] = input_project[j].astype('float') input_submit = pd.concat([input_spec, input_area, input_project], axis=1) st.dataframe(input_submit) scaler = MinMaxScaler() input_submit_scaled = pd.DataFrame(scaler.fit_transform(input_submit.values), columns=input_submit.columns) st.dataframe(input_submit_scaled) The input_submit dataframe The input_submit_scaled dataframe
[ "Your issue has nothing to do with streamlit but on scaler. When you instantiate the MinMaxScaler() with:\nscaler = MinMaxScaler()\n\nUse this scaler to fit the training data. When you have a test sample, use again this scaler to transform it. But do not fit.\nHere is a demo.\nCode\ndef demo():\n train_data = [[5.0, 16.0, 7.0, 4.0, 1.0, 0.0, 0.0, 0.0],\n [8.0, 7.0, 8.0, 1.0, 1.0, 0.0, 0.0, 0.0],\n [5.0, 9.0, 9.0, 0.0, 1.0, 0.0, 1.0, 1.0]]\n\n test_data = [[25.0, 12.0, 15.0, 0.0, 1.0, 0.0, 0.0, 0.0]] # input_submit.values\n\n # Scale, fit and transform the train data.\n min_max_scaler = MinMaxScaler()\n train_data_minmax = min_max_scaler.fit_transform(train_data)\n\n # Save the scaler to disk. import pickle\n # scaler_fn = 'scaler_project.pkl'\n # with open(scaler_fn, 'wb') as handle:\n # pickle.dump(min_max_scaler, handle)\n\n # Scale new data using scaler from distk.\n # with open(scaler_fn, 'rb') as handle:\n # loaded_scaler = pickle.load(handle)\n # test_data_loaded_minmax = loaded_scaler.transform(test_data)\n\n # Scale the single test or input data.\n test_data_minmax = min_max_scaler.transform(test_data)\n\n print(f'train_data:\\n{train_data}')\n print(f'train_data_minmax:\\n{train_data_minmax}\\n')\n\n print(f'test_data:\\n{test_data}')\n print(f'test_data_minmax:\\n{test_data_minmax}')\n\ndemo()\n\nOutput\ntrain_data:\n[[5.0, 16.0, 7.0, 4.0, 1.0, 0.0, 0.0, 0.0], [8.0, 7.0, 8.0, 1.0, 1.0, 0.0, 0.0, 0.0], [5.0, 9.0, 9.0, 0.0, 1.0, 0.0, 1.0, 1.0]]\ntrain_data_minmax:\n[[0. 1. 0. 1. 0. 0.\n 0. 0. ]\n [1. 0. 0.5 0.25 0. 0.\n 0. 0. ]\n [0. 0.22222222 1. 0. 0. 0.\n 1. 1. ]]\n\ntest_data:\n[[25.0, 12.0, 15.0, 0.0, 1.0, 0.0, 0.0, 0.0]]\ntest_data_minmax:\n[[6.66666667 0.55555556 4. 0. 0. 0.\n 0. 0. ]]\n\nNot all scaled input data values are zero.\nReference\nhttps://scikit-learn.org/stable/modules/preprocessing.html#scaling-features-to-a-range\n" ]
[ 0 ]
[]
[]
[ "python", "scaling", "scikit_learn", "streamlit" ]
stackoverflow_0074598314_python_scaling_scikit_learn_streamlit.txt
Q: How to replace every '-' into space ' ' . Function .replace('-',' ',regex=True) is not working on this case Unable to convert every '-' into blank space. dataset = ['0000sh--_dsd' , '0000sd---_dsd' , '000ad-_512'] test1 = pd.DataFrame(dataset) I tried this `test1.replace('-',' ',regex=True) Input: 0000sh--_dsd I need this as Output: 0000sh _dsd (which is not happening) Python is not allowing to convert to space. Please advise how to sort out this situation.
How to replace every '-' into space ' ' . Function .replace('-',' ',regex=True) is not working on this case
Unable to convert every '-' into blank space. dataset = ['0000sh--_dsd' , '0000sd---_dsd' , '000ad-_512'] test1 = pd.DataFrame(dataset) I tried this `test1.replace('-',' ',regex=True) Input: 0000sh--_dsd I need this as Output: 0000sh _dsd (which is not happening) Python is not allowing to convert to space. Please advise how to sort out this situation.
[]
[]
[ "Sorry I realised to late it's a dataframe\nIn this case I would just solve it like this\ndataset = ['0000sh--_dsd', '0000sd---_dsd', '000ad-_512']\ndataset = [line.replace(\"-\", \"\") for line in dataset]\ntest1 = pd.DataFrame(dataset)\n\nIgnore my first answer below\n\nStrings in Python are immutable so you need to use this\ntest1 = test1.replace('-', '')\n\n" ]
[ -1 ]
[ "dataframe", "pandas", "python", "regex", "replace" ]
stackoverflow_0074601343_dataframe_pandas_python_regex_replace.txt
Q: When is an assignment necessary? Consider the following two sepearte scripts main.py # main.py import foo D = {} foo.add_key(D) print(D) and foo.py # foo.py def add_key(D: dict): D['key'] = 'value' return D Executing main.py yields {'keys' : 'value'}. I was wondering why this works, because I was thinking that I need to assign something along the line D = foo.add_key(D). Now I'm thinking that I define something in foo.py which gets somehow send into main.py as well. A: What you are using is known as an "output argument" or "output parameter". You alter the original object that you send to the foo.add_key method. Add some print(id(D)) statements, to see that the object is indeed the same everywhere. Assignment in this case would be a self assignment as a = a. Here is a good discussion / explanation why there is a controversy on this technique and why it is not considered "Clean Code".
When is an assignment necessary?
Consider the following two sepearte scripts main.py # main.py import foo D = {} foo.add_key(D) print(D) and foo.py # foo.py def add_key(D: dict): D['key'] = 'value' return D Executing main.py yields {'keys' : 'value'}. I was wondering why this works, because I was thinking that I need to assign something along the line D = foo.add_key(D). Now I'm thinking that I define something in foo.py which gets somehow send into main.py as well.
[ "What you are using is known as an \"output argument\" or \"output parameter\".\nYou alter the original object that you send to the foo.add_key method.\nAdd some print(id(D)) statements, to see that the object is indeed the same everywhere. Assignment in this case would be a self assignment as a = a.\nHere is a good discussion / explanation why there is a controversy on this technique and why it is not considered \"Clean Code\".\n" ]
[ 1 ]
[]
[]
[ "dictionary", "import", "methods", "python", "python_import" ]
stackoverflow_0074601264_dictionary_import_methods_python_python_import.txt
Q: Substract one datetime column after a groupby with a time reference for each group from a second Pandas dataframe I have one dataframe df1 with one admissiontime for each id. id admissiontime 1 2117-04-03 19:15:00 2 2117-10-18 22:35:00 3 2163-10-17 19:15:00 4 2149-01-08 15:30:00 5 2144-06-06 16:15:00 And an another dataframe df2 with several datetame for each id id datetime 1 2135-07-28 07:50:00.000 1 2135-07-28 07:50:00.000 2 2135-07-28 07:57:15.900 3 2135-07-28 07:57:15.900 3 2135-07-28 07:57:15.900 I would like to substract for each id, datetimes with his specific admissiontime, in a column of the second dataframe. I think I have to use d2.group.by('id')['datetime']- something but I struggle to connect with the df1. A: Use Series.sub with mapping by Series.map by another DataFrame: df1['admissiontime'] = pd.to_datetime(df1['admissiontime']) df2['datetime'] = pd.to_datetime(df2['datetime']) df2['diff'] = df2['datetime'].sub(df2['id'].map(df1.set_index('id')['admissiontime']))
Substract one datetime column after a groupby with a time reference for each group from a second Pandas dataframe
I have one dataframe df1 with one admissiontime for each id. id admissiontime 1 2117-04-03 19:15:00 2 2117-10-18 22:35:00 3 2163-10-17 19:15:00 4 2149-01-08 15:30:00 5 2144-06-06 16:15:00 And an another dataframe df2 with several datetame for each id id datetime 1 2135-07-28 07:50:00.000 1 2135-07-28 07:50:00.000 2 2135-07-28 07:57:15.900 3 2135-07-28 07:57:15.900 3 2135-07-28 07:57:15.900 I would like to substract for each id, datetimes with his specific admissiontime, in a column of the second dataframe. I think I have to use d2.group.by('id')['datetime']- something but I struggle to connect with the df1.
[ "Use Series.sub with mapping by Series.map by another DataFrame:\n df1['admissiontime'] = pd.to_datetime(df1['admissiontime'])\n df2['datetime'] = pd.to_datetime(df2['datetime'])\n\ndf2['diff'] = df2['datetime'].sub(df2['id'].map(df1.set_index('id')['admissiontime']))\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "pandas", "python", "timestamp" ]
stackoverflow_0074601393_dataframe_pandas_python_timestamp.txt
Q: Limited shape as output in tensorflow I am trying to randomly generate timeseries data using keras as follows: import tensorflow as tf import pandas as pd import random input_data = [random.uniform(10,100) for _ in range(350000)] targets = [random.uniform(10,100) for _ in range(350000)] dataset = tf.keras.utils.timeseries_dataset_from_array( input_data, targets, sequence_length=10000) for batch in dataset: inputs, targets = batch break But the final shape is reduced and coming as: <tf.Tensor: shape=(128, 10000), dtype=float32, numpy= array([[22.922523, 44.253967, 41.80049 , ..., 60.444836, 14.977458, 17.970036], [44.253967, 41.80049 , 34.09485 , ..., 14.977458, 17.970036, 68.27751 ], [41.80049 , 34.09485 , 37.27845 , ..., 17.970036, 68.27751 , 98.05703 ], ..., [13.941159, 51.48634 , 61.248505, ..., 98.093346, 67.3885 , 34.01148 ], [51.48634 , 61.248505, 77.34204 , ..., 67.3885 , 34.01148 , 27.165142], [61.248505, 77.34204 , 54.856853, ..., 34.01148 , 27.165142, 97.55085 ]], dtype=float32)> How can i increase size array or is there any limitation?
Limited shape as output in tensorflow
I am trying to randomly generate timeseries data using keras as follows: import tensorflow as tf import pandas as pd import random input_data = [random.uniform(10,100) for _ in range(350000)] targets = [random.uniform(10,100) for _ in range(350000)] dataset = tf.keras.utils.timeseries_dataset_from_array( input_data, targets, sequence_length=10000) for batch in dataset: inputs, targets = batch break But the final shape is reduced and coming as: <tf.Tensor: shape=(128, 10000), dtype=float32, numpy= array([[22.922523, 44.253967, 41.80049 , ..., 60.444836, 14.977458, 17.970036], [44.253967, 41.80049 , 34.09485 , ..., 14.977458, 17.970036, 68.27751 ], [41.80049 , 34.09485 , 37.27845 , ..., 17.970036, 68.27751 , 98.05703 ], ..., [13.941159, 51.48634 , 61.248505, ..., 98.093346, 67.3885 , 34.01148 ], [51.48634 , 61.248505, 77.34204 , ..., 67.3885 , 34.01148 , 27.165142], [61.248505, 77.34204 , 54.856853, ..., 34.01148 , 27.165142, 97.55085 ]], dtype=float32)> How can i increase size array or is there any limitation?
[]
[]
[ "change the VARIABLE with the number of sample that you want (batch_size).\nIf you want the whole data you can make batch_size=None\ndataset = tf.keras.utils.timeseries_dataset_from_array(\ninput_data, targets, batch_size=VARIABLE, sequence_length=10000)\n\n" ]
[ -1 ]
[ "keras", "python", "tensorflow" ]
stackoverflow_0074601289_keras_python_tensorflow.txt
Q: How can I get the first day of the next month in Python? How can I get the first date of the next month in Python? For example, if it's now 2019-12-31, the first day of the next month is 2020-01-01. If it's now 2019-08-01, the first day of the next month is 2019-09-01. I came up with this: import datetime def first_day_of_next_month(dt): '''Get the first day of the next month. Preserves the timezone. Args: dt (datetime.datetime): The current datetime Returns: datetime.datetime: The first day of the next month at 00:00:00. ''' if dt.month == 12: return datetime.datetime(year=dt.year+1, month=1, day=1, tzinfo=dt.tzinfo) else: return datetime.datetime(year=dt.year, month=dt.month+1, day=1, tzinfo=dt.tzinfo) # Example usage (assuming that today is 2021-01-28): first_day_of_next_month(datetime.datetime.now()) # Returns: datetime.datetime(2021, 2, 1, 0, 0) Is it correct? Is there a better way? A: Here is a 1-line solution using nothing more than the standard datetime library: (dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1) Examples: >>> dt = datetime.datetime(2016, 2, 29) >>> print((dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1)) 2016-03-01 00:00:00 >>> dt = datetime.datetime(2019, 12, 31) >>> print((dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1)) 2020-01-01 00:00:00 >>> dt = datetime.datetime(2019, 12, 1) >>> print((dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1)) 2020-01-01 00:00:00 A: Using dateutil you can do it the most literally possible: import datetime from dateutil import relativedelta today = datetime.date.today() next_month = today + relativedelta.relativedelta(months=1, day=1) In English: add 1 month(s) to the today's date and set the day (of the month) to 1. Note the usage of singular and plural forms of day(s) and month(s). Singular sets the attribute to a value, plural adds the number of periods. You can store this relativedelta.relativedelta object to a variable and the pass it around. Other answers involve more programming logic. EDIT You can do it with the standard datetime library as well, but it's not so beautiful: next_month = (today.replace(day=1) + datetime.timedelta(days=32)).replace(day=1) sets the date to the 1st of the current month, adds 32 days (or any number between 31 and 59 which guarantees to jump into the next month) and then sets the date to the 1st of that month. A: you can use calendar to get the number of days in a given month, then add timedelta(days=...), like this: from datetime import date, timedelta from calendar import monthrange days_in_month = lambda dt: monthrange(dt.year, dt.month)[1] today = date.today() first_day = today.replace(day=1) + timedelta(days_in_month(today)) print(first_day) if you're fine with external deps, you can use dateutil (which I love...) from datetime import date from dateutil.relativedelta import relativedelta today = date.today() first_day = today.replace(day=1) + relativedelta(months=1) print(first_day) A: Extract the year and month, add 1 and form a new date using the year, month and day=1: from datetime import date now = date(2020,12,18) y,m = divmod(now.year*12+now.month,12) nextMonth = date(y,m+1,1) print(now,nextMonth) # 2020-12-18 2021-01-01 A: Your way looks good yet I would have done it this way: import datetime from dateutil import relativedelta dt = datetime.datetime(year=1998, month=12, day=12) nextmonth = dt + relativedelta.relativedelta(months=1) nextmonth.replace(day=1) print(nextmonth) A: Using only python standard libraries: import datetime today = datetime.date.today() first_of_next_month = return date.replace( day=1, month=date.month % 12 + 1, year=date.year + (date.month // 12) ) could be generalized to... def get_first_of_month(date, month_offset=0): # zero based indexing of month to make math work month_count = date.month - 1 + month_offset return date.replace( day=1, month=month_count % 12 + 1, year=date.year + (month_count // 12) ) first_of_next_month = get_first_of_month(today, 1) Other solutions that don't require 3rd party libraries include: Toby Petty's answer is another good option. If the exact timedelta is helpful to you, a slight modification on Adam.Er8's answer might be convenient: import calendar, datetime today = datetime.date.today() time_until_next_month = datetime.timedelta( calendar.monthrange(today.year, today.month)[1] - today.day + 1 ) first_of_next_month = today + time_until_next_month A: With Zope's DateTime library a very simple solution is possible from DateTime.DateTime import DateTime date = DateTime() # today while date.day() != 1: date += 1 print(date) A: I see so many wonderful solutions to this problem I personally was looking for a solution for getting the first and last day of the previous month when I stmbled on this question. But here is a solution I like to think is quite simple and elegant: date = datetime.datetime.now().date() same_time_next_month = date + datetime.timedelta(days = date.day) first_day_of_next_month_from_date = same_time_next_month - datetime.timedelta(days = same_time_next_month.day - 1) Here we simply add the day of the target date to the date to get the same time of the next month, and then remove the number of days elapsed from the new date gotten. A: Try this, for starting day of each month, change MonthEnd(1) to MonthBegin(1): import pandas as pd from pandas.tseries.offsets import MonthBegin, MonthEnd date_list = (pd.date_range('2021-01-01', '2022-01-31', freq='MS') + MonthEnd(1)).strftime('%Y-%m-%d').tolist() date_list Out: ['2021-01-31', '2021-02-28', '2021-03-31', '2021-04-30', '2021-05-31', '2021-06-30', '2021-07-31', '2021-08-31', '2021-09-30', '2021-10-31', '2021-11-30', '2021-12-31', '2022-01-31'] A: With python-dateutil: from datetime import date from dateutil.relativedelta import relativedelta last day of current month: date.today() + relativedelta(day=31) first day of next month: date.today() + relativedelta(day=31) + relativedelta(days=1)
How can I get the first day of the next month in Python?
How can I get the first date of the next month in Python? For example, if it's now 2019-12-31, the first day of the next month is 2020-01-01. If it's now 2019-08-01, the first day of the next month is 2019-09-01. I came up with this: import datetime def first_day_of_next_month(dt): '''Get the first day of the next month. Preserves the timezone. Args: dt (datetime.datetime): The current datetime Returns: datetime.datetime: The first day of the next month at 00:00:00. ''' if dt.month == 12: return datetime.datetime(year=dt.year+1, month=1, day=1, tzinfo=dt.tzinfo) else: return datetime.datetime(year=dt.year, month=dt.month+1, day=1, tzinfo=dt.tzinfo) # Example usage (assuming that today is 2021-01-28): first_day_of_next_month(datetime.datetime.now()) # Returns: datetime.datetime(2021, 2, 1, 0, 0) Is it correct? Is there a better way?
[ "Here is a 1-line solution using nothing more than the standard datetime library:\n(dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1)\n\nExamples:\n>>> dt = datetime.datetime(2016, 2, 29)\n>>> print((dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1))\n2016-03-01 00:00:00\n\n>>> dt = datetime.datetime(2019, 12, 31)\n>>> print((dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1))\n2020-01-01 00:00:00\n\n>>> dt = datetime.datetime(2019, 12, 1)\n>>> print((dt.replace(day=1) + datetime.timedelta(days=32)).replace(day=1))\n2020-01-01 00:00:00\n\n", "Using dateutil you can do it the most literally possible:\nimport datetime\nfrom dateutil import relativedelta\ntoday = datetime.date.today()\n\nnext_month = today + relativedelta.relativedelta(months=1, day=1)\n\nIn English: add 1 month(s) to the today's date and set the day (of the month) to 1. Note the usage of singular and plural forms of day(s) and month(s). Singular sets the attribute to a value, plural adds the number of periods.\nYou can store this relativedelta.relativedelta object to a variable and the pass it around. Other answers involve more programming logic.\nEDIT You can do it with the standard datetime library as well, but it's not so beautiful:\nnext_month = (today.replace(day=1) + datetime.timedelta(days=32)).replace(day=1)\n\nsets the date to the 1st of the current month, adds 32 days (or any number between 31 and 59 which guarantees to jump into the next month) and then sets the date to the 1st of that month.\n", "you can use calendar to get the number of days in a given month, then add timedelta(days=...), like this:\nfrom datetime import date, timedelta\nfrom calendar import monthrange\n\ndays_in_month = lambda dt: monthrange(dt.year, dt.month)[1]\ntoday = date.today()\nfirst_day = today.replace(day=1) + timedelta(days_in_month(today))\nprint(first_day)\n\nif you're fine with external deps, you can use dateutil (which I love...)\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n\ntoday = date.today()\nfirst_day = today.replace(day=1) + relativedelta(months=1)\nprint(first_day)\n\n", "Extract the year and month, add 1 and form a new date using the year, month and day=1:\nfrom datetime import date\n\nnow = date(2020,12,18)\ny,m = divmod(now.year*12+now.month,12)\nnextMonth = date(y,m+1,1)\n\nprint(now,nextMonth)\n# 2020-12-18 2021-01-01\n\n", "Your way looks good yet I would have done it this way:\nimport datetime\nfrom dateutil import relativedelta\n\ndt = datetime.datetime(year=1998,\n month=12,\n day=12)\n\nnextmonth = dt + relativedelta.relativedelta(months=1)\nnextmonth.replace(day=1)\nprint(nextmonth)\n\n", "Using only python standard libraries:\nimport datetime\ntoday = datetime.date.today()\nfirst_of_next_month = return date.replace(\n day=1,\n month=date.month % 12 + 1,\n year=date.year + (date.month // 12)\n)\n\ncould be generalized to...\ndef get_first_of_month(date, month_offset=0):\n # zero based indexing of month to make math work\n month_count = date.month - 1 + month_offset\n return date.replace(\n day=1, month=month_count % 12 + 1, year=date.year + (month_count // 12)\n )\n\nfirst_of_next_month = get_first_of_month(today, 1)\n\nOther solutions that don't require 3rd party libraries include:\n\nToby Petty's answer is another good option.\nIf the exact timedelta is helpful to you,\na slight modification on Adam.Er8's answer might be convenient:\nimport calendar, datetime\n\ntoday = datetime.date.today()\ntime_until_next_month = datetime.timedelta(\n calendar.monthrange(today.year, today.month)[1] - today.day + 1\n)\nfirst_of_next_month = today + time_until_next_month\n\n\n\n", "With Zope's DateTime library a very simple solution is possible\nfrom DateTime.DateTime import DateTime\n\ndate = DateTime() # today\n\nwhile date.day() != 1:\n date += 1\n\nprint(date)\n\n", "I see so many wonderful solutions to this problem I personally was looking for a solution for getting the first and last day of the previous month when I stmbled on this question.\nBut here is a solution I like to think is quite simple and elegant:\ndate = datetime.datetime.now().date()\nsame_time_next_month = date + datetime.timedelta(days = date.day)\nfirst_day_of_next_month_from_date = same_time_next_month - datetime.timedelta(days = same_time_next_month.day - 1)\n\nHere we simply add the day of the target date to the date to get the same time of the next month, and then remove the number of days elapsed from the new date gotten.\n", "Try this, for starting day of each month, change MonthEnd(1) to MonthBegin(1):\nimport pandas as pd\nfrom pandas.tseries.offsets import MonthBegin, MonthEnd\n\ndate_list = (pd.date_range('2021-01-01', '2022-01-31', \n freq='MS') + MonthEnd(1)).strftime('%Y-%m-%d').tolist()\ndate_list\n\nOut:\n['2021-01-31',\n '2021-02-28',\n '2021-03-31',\n '2021-04-30',\n '2021-05-31',\n '2021-06-30',\n '2021-07-31',\n '2021-08-31',\n '2021-09-30',\n '2021-10-31',\n '2021-11-30',\n '2021-12-31',\n '2022-01-31']\n\n", "With python-dateutil:\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n\nlast day of current month:\ndate.today() + relativedelta(day=31)\n\nfirst day of next month:\ndate.today() + relativedelta(day=31) + relativedelta(days=1)\n\n" ]
[ 64, 12, 11, 2, 0, 0, 0, 0, 0, 0 ]
[]
[]
[ "python", "python_datetime" ]
stackoverflow_0057353919_python_python_datetime.txt
Q: How to make a dictionary from 2 lists from web scraping I want to make a dataframe from web scrapping this page : https://www.airlinequality.com/airline-reviews/british-airways. The value i have is reviews from passenger and rating that passenger give, but i dont know how to make it to be a dataframe this is my code : import requests from bs4 import BeautifulSoup import pandas as pd base_url = "https://www.airlinequality.com/airline-reviews/british-airways" pages = 5 #10 page_size = 1 #100 reviews = [] aircraft = [] seat_type = [] route = [] recommended = [] rating = [] category = [] for i in range(1, pages + 1): print(f"Scraping page {i}") # Create URL to collect links from paginated data url = f"{base_url}/page/{i}/?sortby=post_date%3ADesc&pagesize={page_size}" # Collect HTML data from this page response = requests.get(url) # Parse content content = response.content parsed_content = BeautifulSoup(content, 'html.parser') for para in parsed_content.find_all("div", {"class": "text_content"}): reviews.append(para.get_text()) for para2 in parsed_content.find_all("div", {"class" : "review-stats"}): for para3 in para2.find_all('td',{'class' : 'review-value'}): rating.append(para3.get_text()) recomend = rating[-1] rating = rating[:-1] for para4 in para2.find_all('td',{'class' : 'review-rating-stars stars'}): para5 = len(para4.find_all('span', {'class' : 'star fill'})) rating.append(para5) rating.append(recomend) #print(rating) for para6 in para2.find_all('td',{'class' : 'review-rating-header'}): category.append(para6.get_text()) #print(category) print(f" ---> {len(reviews)} total reviews") output i get : in a simple way, that is what I was asking : first looping: category is [a, b, c, d, e] rating is [1, 2, 3, 4, 5] second looping: the category will append with [a, c, e, o, p, q] and rating will append with [9, 8, 7, 6, 5, 4] so the final data : category = [a, b, c, d, e, a, c, e, o, p, q] rating = [1, 2, 3, 4, 5, 9, 8, 7, 6, 5, 4] output that I want: A: Since each review's rating categories start with either "Type of Traveller" or "Aircraft" followed by "Type of Traveller", you could split them up into a list of dictionaries with cr = [(k, v) for k, v in zip(category, rating)] si = [i for i, (k, v) in enumerate(cr) if k == 'Type Of Traveller'] si = [(i - 1) if i != 0 and cr[i - 1][0] == 'Aircraft' else i for i in si] splitCr = [dict(cr[start:end]) for start, end in zip(si, (si[1:] + [len(cr)]))] However, it would be better to build a single list of dictionaries as we scrape [rather than to try to zip and split lists that are not guaranteed to have consistent lengths or contents] base_url = "https://www.airlinequality.com/airline-reviews/british-airways" pages = 3 # 5 # 10 page_size = 5 # 1 # 100 revList = [] avgSelRef = { 'rating10': '.rating-10 span[itemprop="ratingValue"]', 'header': 'div.info:has(h1[itemprop="name"])', 'subheader': '.review-count', 'reviewBody': '.skytrax-rating-mob img.skytrax-rating[alt]' } rbSel = '.body[id^="anchor"]' revSelRef = { 'rating10': '.rating-10 span[itemprop="ratingValue"]', 'header': f'{rbSel} h2.text_header', 'subheader': f'{rbSel} h3.text_sub_header', 'reviewBody': f'{rbSel} div[itemprop="reviewBody"]' } avgAdded = False for i in range(1, pages + 1): print("", end=f"Scraping page {i} of {pages} ") # Create URL to collect links from paginated data url = f"{base_url}/page/{i}/?sortby=post_date%3ADesc&pagesize={page_size}" # Collect HTML data from this page response = requests.get(url) if response.status_code != 200: print(f' -- !ERROR: "{response.raise_for_status()}"" getting {url}') continue content = response.content parsed_content = BeautifulSoup(content, 'html.parser') avSoups = parsed_content.select('div.review-info') rvSoups = parsed_content.select(f'article[itemprop="review"]:has({rbSel})') if avSoups and not avgAdded: rvSoups += avSoups for r in rvSoups: isAvg = r.name == 'div' if isAvg: rDets = {'reviewId': '[Average]'} selRef = avgSelRef.items() avgAdded = True else: revId = r.select_one(rbSel).get('id').replace('anchor', '', 1) selRef = revSelRef.items() rDets = {'reviewId': revId} for k, s in selRef: rdt = r.select_one(s) if rdt is None: continue if 'img' in s and s.endswith('[alt]'): rDets[k] = rdt.get('alt') else: rDets[k] = ' '.join(w for w in rdt.get_text(' ').split() if w) rhSel = 'td.review-rating-header' rRows = r.select(f'tr:has({rhSel} + td:is(.stars, .review-value))') for rr in rRows: k = rr.select_one(rhSel).get_text(' ').strip() k = k.replace(' For ', ' for ').replace(' & ', ' + ') # bit of cleanup if k.endswith('Staff Service'): k = 'Staff Service' # bit of cleanup if rr.select('td.stars'): rDets[f'[stars] {k}'] = len(rr.select('td.stars span.star.fill')) else: rDets[k] = rr.select_one('td.review-value').get_text().strip() revList = ([rDets] + revList) if isAvg else (revList + [rDets]) print(' - ', len(rvSoups), 'reviews --->', len(revList), 'total reviews') You could also view just the star ratings: A: You have the column values, just build the DataFrame Eg., from pandas import DataFrame category = ["Aircraft", 'Type of Traveller', 'Seat Type'] rating = ['A320', 'Solo', 'Business Class'] # Create the records from both list, using zip and dict calls. data_dict = dict(zip(category, rating)) # Build the dataframe from the dictionary. df = DataFrame.from_records(data_dict, columns=category, index=[0]) print(df) Looks like this.
How to make a dictionary from 2 lists from web scraping
I want to make a dataframe from web scrapping this page : https://www.airlinequality.com/airline-reviews/british-airways. The value i have is reviews from passenger and rating that passenger give, but i dont know how to make it to be a dataframe this is my code : import requests from bs4 import BeautifulSoup import pandas as pd base_url = "https://www.airlinequality.com/airline-reviews/british-airways" pages = 5 #10 page_size = 1 #100 reviews = [] aircraft = [] seat_type = [] route = [] recommended = [] rating = [] category = [] for i in range(1, pages + 1): print(f"Scraping page {i}") # Create URL to collect links from paginated data url = f"{base_url}/page/{i}/?sortby=post_date%3ADesc&pagesize={page_size}" # Collect HTML data from this page response = requests.get(url) # Parse content content = response.content parsed_content = BeautifulSoup(content, 'html.parser') for para in parsed_content.find_all("div", {"class": "text_content"}): reviews.append(para.get_text()) for para2 in parsed_content.find_all("div", {"class" : "review-stats"}): for para3 in para2.find_all('td',{'class' : 'review-value'}): rating.append(para3.get_text()) recomend = rating[-1] rating = rating[:-1] for para4 in para2.find_all('td',{'class' : 'review-rating-stars stars'}): para5 = len(para4.find_all('span', {'class' : 'star fill'})) rating.append(para5) rating.append(recomend) #print(rating) for para6 in para2.find_all('td',{'class' : 'review-rating-header'}): category.append(para6.get_text()) #print(category) print(f" ---> {len(reviews)} total reviews") output i get : in a simple way, that is what I was asking : first looping: category is [a, b, c, d, e] rating is [1, 2, 3, 4, 5] second looping: the category will append with [a, c, e, o, p, q] and rating will append with [9, 8, 7, 6, 5, 4] so the final data : category = [a, b, c, d, e, a, c, e, o, p, q] rating = [1, 2, 3, 4, 5, 9, 8, 7, 6, 5, 4] output that I want:
[ "Since each review's rating categories start with either \"Type of Traveller\" or \"Aircraft\" followed by \"Type of Traveller\", you could split them up into a list of dictionaries with\ncr = [(k, v) for k, v in zip(category, rating)]\nsi = [i for i, (k, v) in enumerate(cr) if k == 'Type Of Traveller']\nsi = [(i - 1) if i != 0 and cr[i - 1][0] == 'Aircraft' else i for i in si]\nsplitCr = [dict(cr[start:end]) for start, end in zip(si, (si[1:] + [len(cr)]))]\n\n\n\n\nHowever, it would be better to build a single list of dictionaries as we scrape [rather than to try to zip and split lists that are not guaranteed to have consistent lengths or contents]\nbase_url = \"https://www.airlinequality.com/airline-reviews/british-airways\"\npages = 3 # 5 # 10\npage_size = 5 # 1 # 100\n\nrevList = []\navgSelRef = {\n 'rating10': '.rating-10 span[itemprop=\"ratingValue\"]',\n 'header': 'div.info:has(h1[itemprop=\"name\"])',\n 'subheader': '.review-count',\n 'reviewBody': '.skytrax-rating-mob img.skytrax-rating[alt]'\n}\nrbSel = '.body[id^=\"anchor\"]'\nrevSelRef = {\n 'rating10': '.rating-10 span[itemprop=\"ratingValue\"]',\n 'header': f'{rbSel} h2.text_header',\n 'subheader': f'{rbSel} h3.text_sub_header',\n 'reviewBody': f'{rbSel} div[itemprop=\"reviewBody\"]'\n} \n\navgAdded = False\nfor i in range(1, pages + 1): \n print(\"\", end=f\"Scraping page {i} of {pages} \")\n\n # Create URL to collect links from paginated data\n url = f\"{base_url}/page/{i}/?sortby=post_date%3ADesc&pagesize={page_size}\"\n\n # Collect HTML data from this page\n response = requests.get(url)\n if response.status_code != 200: \n print(f' -- !ERROR: \"{response.raise_for_status()}\"\" getting {url}')\n continue\n content = response.content\n parsed_content = BeautifulSoup(content, 'html.parser')\n\n avSoups = parsed_content.select('div.review-info')\n rvSoups = parsed_content.select(f'article[itemprop=\"review\"]:has({rbSel})')\n if avSoups and not avgAdded: rvSoups += avSoups\n for r in rvSoups:\n isAvg = r.name == 'div'\n if isAvg:\n rDets = {'reviewId': '[Average]'} \n selRef = avgSelRef.items()\n avgAdded = True\n else:\n revId = r.select_one(rbSel).get('id').replace('anchor', '', 1)\n selRef = revSelRef.items()\n rDets = {'reviewId': revId} \n \n for k, s in selRef:\n rdt = r.select_one(s) \n if rdt is None: continue\n if 'img' in s and s.endswith('[alt]'):\n rDets[k] = rdt.get('alt') \n else:\n rDets[k] = ' '.join(w for w in rdt.get_text(' ').split() if w)\n\n rhSel = 'td.review-rating-header'\n rRows = r.select(f'tr:has({rhSel} + td:is(.stars, .review-value))')\n for rr in rRows: \n k = rr.select_one(rhSel).get_text(' ').strip()\n k = k.replace(' For ', ' for ').replace(' & ', ' + ') # bit of cleanup\n if k.endswith('Staff Service'): k = 'Staff Service' # bit of cleanup\n if rr.select('td.stars'): \n rDets[f'[stars] {k}'] = len(rr.select('td.stars span.star.fill'))\n else: \n rDets[k] = rr.select_one('td.review-value').get_text().strip()\n\n revList = ([rDets] + revList) if isAvg else (revList + [rDets])\n print(' - ', len(rvSoups), 'reviews --->', len(revList), 'total reviews') \n\n\n\nYou could also view just the star ratings: \n", "You have the column values, just build the DataFrame\nEg.,\nfrom pandas import DataFrame\n\ncategory = [\"Aircraft\", 'Type of Traveller', 'Seat Type']\nrating = ['A320', 'Solo', 'Business Class']\n\n# Create the records from both list, using zip and dict calls.\ndata_dict = dict(zip(category, rating))\n\n# Build the dataframe from the dictionary.\ndf = DataFrame.from_records(data_dict, columns=category, index=[0])\n\nprint(df)\n\nLooks like this. \n" ]
[ 1, 0 ]
[]
[]
[ "beautifulsoup", "dataframe", "python", "web_scraping" ]
stackoverflow_0074596046_beautifulsoup_dataframe_python_web_scraping.txt
Q: Slow opening of files in python Currently, I'm writing program which needs to load over 13K "*.json" files of different sizes from few lines to 100K lines. Reading looks like: [read_one_JSON(p) for p in filenames] def read_one_JSON(path: str): with open(path, encoding='utf-8') as fh: data = json.load(fh) return File(data["_File__name"], data['_File__statements'], data['_File__matches']) I load file, pass it into class File and read another file. Currently it takes about 2 minutes 20 seconds. I found out, that when I remove processing of the data into class and make just: [read_one_JSON(p) for p in filenames] def read_one_JSON(path: str): with open(path, encoding='utf-8') as fh: data = json.load(fh) It reduces time just by 10 seconds to 2 minutes and 10 seconds. So, then I removed also json.load to see what causes the time of reading. So, when leaving just: [read_one_JSON(p) for p in filenames] def read_one_JSON(path: str): with open(path, encoding='utf-8') as fh: and not reading the data it still lasts 1 minute 45 seconds. It means, the opening of the files is slow. Is there any way to speed up the opening part of the process, without putting everything into one file or parallelization? It is an option, but I would like to know if there is something else to do about that. Before, I realised such bottle neck I tried libraries like ujson, orjson, msgspec, but since the opening phase is slow, it made just small differences. A: Creating 13000 files in the current directory : import json from tqdm import tqdm # pip install tqdm for i in tqdm(range(13_000)): filename = f"data_{i}.json" data = {"filename": filename} with open(filename, "w") as file: json.dump(data, file) 100%|██████████| 13000/13000 [00:01<00:00, 8183.74it/s] Which means it ran for less than 2 seconds on my computer. tqdm is just a very simple way to see throughput. The script produced files like : {"filename": "data_0.json"} Then reading them : import json from tqdm import tqdm # pip install tqdm for i in tqdm(range(13_000)): filename = f"data_{i}.json" with open(filename, "rt") as file: data = json.load(file) print(data) 100%|██████████| 13000/13000 [00:00<00:00, 16472.00it/s] {'filename': 'data_12999.json'} Which means that they were all read in less than one second. Maybe it comes from the size of the files you read. If you have many large files, indeed it will take more time. But your disk does not seem like the only cause for the slowness.
Slow opening of files in python
Currently, I'm writing program which needs to load over 13K "*.json" files of different sizes from few lines to 100K lines. Reading looks like: [read_one_JSON(p) for p in filenames] def read_one_JSON(path: str): with open(path, encoding='utf-8') as fh: data = json.load(fh) return File(data["_File__name"], data['_File__statements'], data['_File__matches']) I load file, pass it into class File and read another file. Currently it takes about 2 minutes 20 seconds. I found out, that when I remove processing of the data into class and make just: [read_one_JSON(p) for p in filenames] def read_one_JSON(path: str): with open(path, encoding='utf-8') as fh: data = json.load(fh) It reduces time just by 10 seconds to 2 minutes and 10 seconds. So, then I removed also json.load to see what causes the time of reading. So, when leaving just: [read_one_JSON(p) for p in filenames] def read_one_JSON(path: str): with open(path, encoding='utf-8') as fh: and not reading the data it still lasts 1 minute 45 seconds. It means, the opening of the files is slow. Is there any way to speed up the opening part of the process, without putting everything into one file or parallelization? It is an option, but I would like to know if there is something else to do about that. Before, I realised such bottle neck I tried libraries like ujson, orjson, msgspec, but since the opening phase is slow, it made just small differences.
[ "Creating 13000 files in the current directory :\nimport json\n\nfrom tqdm import tqdm # pip install tqdm\n\nfor i in tqdm(range(13_000)):\n filename = f\"data_{i}.json\"\n data = {\"filename\": filename}\n with open(filename, \"w\") as file:\n json.dump(data, file)\n\n100%|██████████| 13000/13000 [00:01<00:00, 8183.74it/s]\n\nWhich means it ran for less than 2 seconds on my computer. tqdm is just a very simple way to see throughput.\nThe script produced files like :\n{\"filename\": \"data_0.json\"}\n\nThen reading them :\nimport json\n\nfrom tqdm import tqdm # pip install tqdm\n\nfor i in tqdm(range(13_000)):\n filename = f\"data_{i}.json\"\n with open(filename, \"rt\") as file:\n data = json.load(file)\nprint(data)\n\n100%|██████████| 13000/13000 [00:00<00:00, 16472.00it/s]\n{'filename': 'data_12999.json'}\n\nWhich means that they were all read in less than one second.\nMaybe it comes from the size of the files you read. If you have many large files, indeed it will take more time. But your disk does not seem like the only cause for the slowness.\n" ]
[ 1 ]
[]
[]
[ "file", "json", "python", "python_3.x" ]
stackoverflow_0074593731_file_json_python_python_3.x.txt
Q: Better way to use pandas DataFrameGroupBy objects Ok so this is more of a question about how to properly use the groupby method since I am kinda struggling to use the DataFrameGroupBy object itself. Basically I have a big DataFrame with the following structure: DATE PRODUCT PRICE CAPACITY 01.07.2022 NEG_00_04 3,7 7 01.07.2022 NEG_00_04 1,7 3 01.07.2022 NEG_00_04 2,4 5 01.07.2022 NEG_00_04 2,2 7 01.07.2022 POS_00_04 3,7 2 01.07.2022 POS_00_04 3,2 5 01.07.2022 POS_00_04 1,5 2 01.07.2022 POS_00_04 2,4 3 My goal is to groupby the 'DATE' and 'PRODUCT' columns and get a cumulative capacity based on an ascending price. So basically the order of operation is to groupby the two columns then sort each group by the 'PRICE' column and calculate the cumulative capacity. the end result based on the sample table should look like this: DATE PRODUCT PRICE CAPACITY CUMULATIVE 01.07.2022 NEG_00_04 1,7 3 3 01.07.2022 NEG_00_04 2,2 7 10 01.07.2022 NEG_00_04 2,4 5 15 01.07.2022 NEG_00_04 3,7 7 22 01.07.2022 POS_00_04 1,5 2 2 01.07.2022 POS_00_04 2,4 3 5 01.07.2022 POS_00_04 3,2 5 10 01.07.2022 POS_00_04 3,7 2 12 I already have a solution that does work but I was wondering if there isn't a better way to work with DataFrameGroupBy objects since I always just iterate through them with a for loop and it just doesn't seem right. This is how I did it: df_result = pd.DataFrame() for i, group in df.groupby(by=['DATE', 'PRODUCT']): group.sort_values('PRICE', inplace=True) group['CUMULATIVE'] = group['CAPACITY'].cumsum() df_result = pd.concat([df_result, group], ignore_index=True) I would appreciate any suggestions for improvement :) A: Use: df = df.sort_values('PRICE') df['CUMULATIVE'] = df.groupby(by=['DATE', 'PRODUCT'])['CAPACITY'].cumsum() Or: df = df.sort_values(['PRICE','DATE', 'PRODUCT']) df['CUMULATIVE'] = df.groupby(by=['DATE', 'PRODUCT'])['CAPACITY'].cumsum()
Better way to use pandas DataFrameGroupBy objects
Ok so this is more of a question about how to properly use the groupby method since I am kinda struggling to use the DataFrameGroupBy object itself. Basically I have a big DataFrame with the following structure: DATE PRODUCT PRICE CAPACITY 01.07.2022 NEG_00_04 3,7 7 01.07.2022 NEG_00_04 1,7 3 01.07.2022 NEG_00_04 2,4 5 01.07.2022 NEG_00_04 2,2 7 01.07.2022 POS_00_04 3,7 2 01.07.2022 POS_00_04 3,2 5 01.07.2022 POS_00_04 1,5 2 01.07.2022 POS_00_04 2,4 3 My goal is to groupby the 'DATE' and 'PRODUCT' columns and get a cumulative capacity based on an ascending price. So basically the order of operation is to groupby the two columns then sort each group by the 'PRICE' column and calculate the cumulative capacity. the end result based on the sample table should look like this: DATE PRODUCT PRICE CAPACITY CUMULATIVE 01.07.2022 NEG_00_04 1,7 3 3 01.07.2022 NEG_00_04 2,2 7 10 01.07.2022 NEG_00_04 2,4 5 15 01.07.2022 NEG_00_04 3,7 7 22 01.07.2022 POS_00_04 1,5 2 2 01.07.2022 POS_00_04 2,4 3 5 01.07.2022 POS_00_04 3,2 5 10 01.07.2022 POS_00_04 3,7 2 12 I already have a solution that does work but I was wondering if there isn't a better way to work with DataFrameGroupBy objects since I always just iterate through them with a for loop and it just doesn't seem right. This is how I did it: df_result = pd.DataFrame() for i, group in df.groupby(by=['DATE', 'PRODUCT']): group.sort_values('PRICE', inplace=True) group['CUMULATIVE'] = group['CAPACITY'].cumsum() df_result = pd.concat([df_result, group], ignore_index=True) I would appreciate any suggestions for improvement :)
[ "Use:\ndf = df.sort_values('PRICE')\ndf['CUMULATIVE'] = df.groupby(by=['DATE', 'PRODUCT'])['CAPACITY'].cumsum()\n\nOr:\ndf = df.sort_values(['PRICE','DATE', 'PRODUCT'])\ndf['CUMULATIVE'] = df.groupby(by=['DATE', 'PRODUCT'])['CAPACITY'].cumsum()\n\n" ]
[ 1 ]
[]
[]
[ "data_science", "dataframe", "group_by", "pandas", "python" ]
stackoverflow_0074601457_data_science_dataframe_group_by_pandas_python.txt
Q: Pip install opencv-python stuck on installing build dependencies I am using latest pip version 22.3.1 and trying to install opencv-python but it's always stuck on Installing Build dependencies after which this error comes out. `pip install -r src/requirements.txt Collecting opencv-python==4.3.0.38 Downloading opencv-python-4.3.0.38.tar.gz (88.0 MB) ━━━━━━━━━━━━ 88.0/88.0 1.6 MB/s eta 0:00:00 MB Installing build dependencies ... error error: subprocess-exited-with-error × pip subprocess to install build dependencies did not run successfully. │ exit code: 1 ╰─> [354 lines of output] Ignoring numpy: markers 'python_version == "3.5"' don't match your environment Ignoring numpy: markers 'python_version == "3.6"' don't match your environment Ignoring numpy: markers 'python_version == "3.7"' don't match your environment Collecting setuptools Using cached setuptools-65.6.3-py3-none-any.whl (1.2 MB) Collecting wheel Using cached wheel-0.38.4-py3-none-any.whl (36 kB) Collecting scikit-build Using cached scikit_build-0.16.2-py3-none-any.whl (78 kB) Collecting cmake Using cached cmake-3.25.0.tar.gz (33 kB) Installing build dependencies: started Installing build dependencies: finished with status 'done' Getting requirements to build wheel: started Getting requirements to build wheel: finished with status 'done' Preparing metadata (pyproject.toml): started Preparing metadata (pyproject.toml): finished with status 'done' Collecting pip Using cached pip-22.3.1-py3-none-any.whl (2.1 MB) Collecting numpy==1.17.3 Downloading numpy-1.17.3.zip (6.4 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.4/6.4 MB 3.3 MB/s eta 0:00:00 Preparing metadata (setup.py): started Preparing metadata (setup.py): finished with status 'done' Collecting distro Using cached distro-1.8.0-py3-none-any.whl (20 kB) Collecting packaging Using cached packaging-21.3-py3-none-any.whl (40 kB) Collecting pyparsing!=3.0.5,>=2.0.2 Using cached pyparsing-3.0.9-py3-none-any.whl (98 kB) Building wheels for collected packages: numpy, cmake Building wheel for numpy (setup.py): started Building wheel for numpy (setup.py): finished with status 'error' error: subprocess-exited-with-error × python setup.py bdist_wheel did not run successfully. │ exit code: 1 ╰─> [247 lines of output] Running from numpy source directory. blas_opt_info: blas_mkl_info: customize UnixCCompiler libraries mkl_rt not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE blis_info: customize UnixCCompiler libraries blis not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE openblas_info: customize UnixCCompiler customize UnixCCompiler libraries openblas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_3_10_blas_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries tatlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_3_10_blas_info: customize UnixCCompiler libraries satlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_blas_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries ptf77blas,ptcblas,atlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_blas_info: customize UnixCCompiler libraries f77blas,cblas,atlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE accelerate_info: NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:690: UserWarning: Optimized (vendor) Blas libraries are not found. Falls back to netlib Blas library which has worse performance. A better performance should be easily gained by switching Blas library. self.calc_info() blas_info: customize UnixCCompiler libraries blas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:690: UserWarning: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. self.calc_info() blas_src_info: NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:690: UserWarning: Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable. self.calc_info() NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/bin/sh: 1: svnversion: not found non-existing path in 'numpy/distutils': 'site.cfg' lapack_opt_info: lapack_mkl_info: customize UnixCCompiler libraries mkl_rt not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE openblas_lapack_info: customize UnixCCompiler customize UnixCCompiler libraries openblas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE openblas_clapack_info: customize UnixCCompiler customize UnixCCompiler libraries openblas,lapack not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE flame_info: customize UnixCCompiler libraries flame not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_3_10_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries tatlas,tatlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_3_10_threads_info'> NOT AVAILABLE atlas_3_10_info: customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries satlas,satlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_3_10_info'> NOT AVAILABLE atlas_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries ptf77blas,ptcblas,atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_threads_info'> NOT AVAILABLE atlas_info: customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries f77blas,cblas,atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_info'> NOT AVAILABLE lapack_info: customize UnixCCompiler libraries lapack not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:1712: UserWarning: Lapack (http://www.netlib.org/lapack/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [lapack]) or by setting the LAPACK environment variable. if getattr(self, '_calc_info_{}'.format(lapack))(): lapack_src_info: NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:1712: UserWarning: Lapack (http://www.netlib.org/lapack/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [lapack_src]) or by setting the LAPACK_SRC environment variable. if getattr(self, '_calc_info_{}'.format(lapack))(): NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py:274: UserWarning: Unknown distribution option: 'define_macros' warnings.warn(msg) running bdist_wheel running build running config_cc unifing config_cc, config, build_clib, build_ext, build commands --compiler options running config_fc unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options running build_src build_src building py_modules sources creating build creating build/src.linux-aarch64-3.1 creating build/src.linux-aarch64-3.1/numpy creating build/src.linux-aarch64-3.1/numpy/distutils building library "npymath" sources get_default_fcompiler: matching types: '['gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor']' customize Gnu95FCompiler Could not locate executable gfortran Could not locate executable f95 customize IntelFCompiler Could not locate executable ifort Could not locate executable ifc customize LaheyFCompiler Could not locate executable lf95 customize PGroupFCompiler Could not locate executable pgfortran customize AbsoftFCompiler Could not locate executable f90 Could not locate executable f77 customize NAGFCompiler customize VastFCompiler customize CompaqFCompiler Could not locate executable fort customize IntelItaniumFCompiler Could not locate executable efort Could not locate executable efc customize IntelEM64TFCompiler customize GnuFCompiler Could not locate executable g77 customize G95FCompiler Could not locate executable g95 customize PathScaleFCompiler Could not locate executable pathf95 customize NAGFORCompiler Could not locate executable nagfor don't know how to compile Fortran code on platform 'posix' C compiler: aarch64-linux-android-clang -Wno-unused-result -Wsign-compare -Wunreachable-code -DNDEBUG -g -fwrapv -O3 -Wall -fstack-protector-strong -O3 -fstack-protector-strong -O3 -fPIC compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/data/data/com.learnprogramming.codecamp/files/usr/include/python3.10 -c' aarch64-linux-android-clang: _configtest.c failure. removing: _configtest.c _configtest.o Traceback (most recent call last): File "<string>", line 2, in <module> File "<pip-setuptools-caller>", line 34, in <module> File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/setup.py", line 443, in <module> setup_package() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/setup.py", line 435, in setup_package setup(**metadata) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/core.py", line 171, in setup return old_setup(**new_attr) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/site-packages/setuptools/__init__.py", line 153, in setup return distutils.core.setup(**attrs) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/core.py", line 148, in setup dist.run_commands() File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 966, in run_commands self.run_command(cmd) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 985, in run_command cmd_obj.run() File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/site-packages/wheel/bdist_wheel.py", line 325, in run self.run_command("build") File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/cmd.py", line 313, in run_command self.distribution.run_command(command) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 985, in run_command cmd_obj.run() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build.py", line 47, in run old_build.run(self) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/command/build.py", line 135, in run self.run_command(cmd_name) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/cmd.py", line 313, in run_command self.distribution.run_command(command) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 985, in run_command cmd_obj.run() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 142, in run self.build_sources() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 153, in build_sources self.build_library_sources(*libname_info) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 286, in build_library_sources sources = self.generate_sources(sources, (lib_name, build_info)) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 369, in generate_sources source = func(extension, build_dir) File "numpy/core/setup.py", line 669, in get_mathlib_info raise RuntimeError("Broken toolchain: cannot link a simple C program") RuntimeError: Broken toolchain: cannot link a simple C program [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. ERROR: Failed building wheel for numpy Running setup.py clean for numpy error: subprocess-exited-with-error × python setup.py clean did not run successfully. │ exit code: 1 ╰─> [10 lines of output] Running from numpy source directory. `setup.py clean` is not supported, use one of the following instead: - `git clean -xdf` (cleans all files) - `git clean -Xdf` (cleans all versioned files, doesn't touch files that aren't checked into the git repo) Add `--force` to your command to use it anyway if you must (unsupported). [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. ERROR: Failed cleaning build dir for numpy Building wheel for cmake (pyproject.toml): started Building wheel for cmake (pyproject.toml): finished with status 'error' error: subprocess-exited-with-error × Building wheel for cmake (pyproject.toml) did not run successfully. │ exit code: 1 ╰─> [32 lines of output] Traceback (most recent call last): File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-build-env-7nym68sq/overlay/lib/python3.10/site-packages/skbuild/setuptools_wrap.py", line 612, in setup cmkr = cmaker.CMaker(cmake_executable) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-build-env-7nym68sq/overlay/lib/python3.10/site-packages/skbuild/cmaker.py", line 148, in __init__ self.cmake_version = get_cmake_version(self.cmake_executable) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-build-env-7nym68sq/overlay/lib/python3.10/site-packages/skbuild/cmaker.py", line 103, in get_cmake_version raise SKBuildError( =============================DEBUG ASSISTANCE============================= If you are seeing a compilation error please try the following steps to successfully install cmake: 1) Upgrade to the latest pip and try again. This will fix errors for most users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip 2) If on Linux, with glibc < 2.12, you can set PIP_ONLY_BINARY=cmake in order to retrieve the last manylinux1 compatible wheel. 3) If on Linux, with glibc < 2.12, you can cap "cmake<3.23" in your requirements in order to retrieve the last manylinux1 compatible wheel. 4) Open an issue with the debug information that follows at https://github.com/scikit-build/cmake-python-distributions/issues Python: 3.10.4 platform: Linux-4.19.127+-aarch64-with-libc machine: aarch64 bits: 64 pip: n/a setuptools: 65.6.3 scikit-build: 0.16.2 PEP517_BUILD_BACKEND=setuptools.build_meta =============================DEBUG ASSISTANCE============================= Problem with the CMake installation, aborting build. CMake executable is cmake [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. ERROR: Failed building wheel for cmake Failed to build numpy cmake ERROR: Could not build wheels for cmake, which is required to install pyproject.toml-based projects [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: subprocess-exited-with-error × pip subprocess to install build dependencies did not run successfully. │ exit code: 1 ╰─> See above for output. note: This error originates from a subprocess, and is likely not a problem with pip.` I tried upgrading pip, reinstalling it, installing a different version of opencv but nothing works. A: Try adding numpy to your requirements.txt, e.g. as follows: numpy==1.23.4 as this may be caused by a missing numpy installation, from which open-cv is depending. Important: therefore you have to add it before open-cv to the requirements! If that does not work, maybe try the solution presented here: No BLAS/LAPACK libraries found when installing SciPy
Pip install opencv-python stuck on installing build dependencies
I am using latest pip version 22.3.1 and trying to install opencv-python but it's always stuck on Installing Build dependencies after which this error comes out. `pip install -r src/requirements.txt Collecting opencv-python==4.3.0.38 Downloading opencv-python-4.3.0.38.tar.gz (88.0 MB) ━━━━━━━━━━━━ 88.0/88.0 1.6 MB/s eta 0:00:00 MB Installing build dependencies ... error error: subprocess-exited-with-error × pip subprocess to install build dependencies did not run successfully. │ exit code: 1 ╰─> [354 lines of output] Ignoring numpy: markers 'python_version == "3.5"' don't match your environment Ignoring numpy: markers 'python_version == "3.6"' don't match your environment Ignoring numpy: markers 'python_version == "3.7"' don't match your environment Collecting setuptools Using cached setuptools-65.6.3-py3-none-any.whl (1.2 MB) Collecting wheel Using cached wheel-0.38.4-py3-none-any.whl (36 kB) Collecting scikit-build Using cached scikit_build-0.16.2-py3-none-any.whl (78 kB) Collecting cmake Using cached cmake-3.25.0.tar.gz (33 kB) Installing build dependencies: started Installing build dependencies: finished with status 'done' Getting requirements to build wheel: started Getting requirements to build wheel: finished with status 'done' Preparing metadata (pyproject.toml): started Preparing metadata (pyproject.toml): finished with status 'done' Collecting pip Using cached pip-22.3.1-py3-none-any.whl (2.1 MB) Collecting numpy==1.17.3 Downloading numpy-1.17.3.zip (6.4 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.4/6.4 MB 3.3 MB/s eta 0:00:00 Preparing metadata (setup.py): started Preparing metadata (setup.py): finished with status 'done' Collecting distro Using cached distro-1.8.0-py3-none-any.whl (20 kB) Collecting packaging Using cached packaging-21.3-py3-none-any.whl (40 kB) Collecting pyparsing!=3.0.5,>=2.0.2 Using cached pyparsing-3.0.9-py3-none-any.whl (98 kB) Building wheels for collected packages: numpy, cmake Building wheel for numpy (setup.py): started Building wheel for numpy (setup.py): finished with status 'error' error: subprocess-exited-with-error × python setup.py bdist_wheel did not run successfully. │ exit code: 1 ╰─> [247 lines of output] Running from numpy source directory. blas_opt_info: blas_mkl_info: customize UnixCCompiler libraries mkl_rt not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE blis_info: customize UnixCCompiler libraries blis not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE openblas_info: customize UnixCCompiler customize UnixCCompiler libraries openblas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_3_10_blas_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries tatlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_3_10_blas_info: customize UnixCCompiler libraries satlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_blas_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries ptf77blas,ptcblas,atlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_blas_info: customize UnixCCompiler libraries f77blas,cblas,atlas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE accelerate_info: NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:690: UserWarning: Optimized (vendor) Blas libraries are not found. Falls back to netlib Blas library which has worse performance. A better performance should be easily gained by switching Blas library. self.calc_info() blas_info: customize UnixCCompiler libraries blas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:690: UserWarning: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. self.calc_info() blas_src_info: NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:690: UserWarning: Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable. self.calc_info() NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/bin/sh: 1: svnversion: not found non-existing path in 'numpy/distutils': 'site.cfg' lapack_opt_info: lapack_mkl_info: customize UnixCCompiler libraries mkl_rt not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE openblas_lapack_info: customize UnixCCompiler customize UnixCCompiler libraries openblas not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE openblas_clapack_info: customize UnixCCompiler customize UnixCCompiler libraries openblas,lapack not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE flame_info: customize UnixCCompiler libraries flame not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE atlas_3_10_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries tatlas,tatlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_3_10_threads_info'> NOT AVAILABLE atlas_3_10_info: customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries satlas,satlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_3_10_info'> NOT AVAILABLE atlas_threads_info: Setting PTATLAS=ATLAS customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries ptf77blas,ptcblas,atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_threads_info'> NOT AVAILABLE atlas_info: customize UnixCCompiler libraries lapack_atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib customize UnixCCompiler libraries f77blas,cblas,atlas not found in /data/data/com.learnprogramming.codecamp/files/usr/lib <class 'numpy.distutils.system_info.atlas_info'> NOT AVAILABLE lapack_info: customize UnixCCompiler libraries lapack not found in ['/data/data/com.learnprogramming.codecamp/files/usr/lib'] NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:1712: UserWarning: Lapack (http://www.netlib.org/lapack/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [lapack]) or by setting the LAPACK environment variable. if getattr(self, '_calc_info_{}'.format(lapack))(): lapack_src_info: NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/system_info.py:1712: UserWarning: Lapack (http://www.netlib.org/lapack/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [lapack_src]) or by setting the LAPACK_SRC environment variable. if getattr(self, '_calc_info_{}'.format(lapack))(): NOT AVAILABLE /data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py:274: UserWarning: Unknown distribution option: 'define_macros' warnings.warn(msg) running bdist_wheel running build running config_cc unifing config_cc, config, build_clib, build_ext, build commands --compiler options running config_fc unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options running build_src build_src building py_modules sources creating build creating build/src.linux-aarch64-3.1 creating build/src.linux-aarch64-3.1/numpy creating build/src.linux-aarch64-3.1/numpy/distutils building library "npymath" sources get_default_fcompiler: matching types: '['gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor']' customize Gnu95FCompiler Could not locate executable gfortran Could not locate executable f95 customize IntelFCompiler Could not locate executable ifort Could not locate executable ifc customize LaheyFCompiler Could not locate executable lf95 customize PGroupFCompiler Could not locate executable pgfortran customize AbsoftFCompiler Could not locate executable f90 Could not locate executable f77 customize NAGFCompiler customize VastFCompiler customize CompaqFCompiler Could not locate executable fort customize IntelItaniumFCompiler Could not locate executable efort Could not locate executable efc customize IntelEM64TFCompiler customize GnuFCompiler Could not locate executable g77 customize G95FCompiler Could not locate executable g95 customize PathScaleFCompiler Could not locate executable pathf95 customize NAGFORCompiler Could not locate executable nagfor don't know how to compile Fortran code on platform 'posix' C compiler: aarch64-linux-android-clang -Wno-unused-result -Wsign-compare -Wunreachable-code -DNDEBUG -g -fwrapv -O3 -Wall -fstack-protector-strong -O3 -fstack-protector-strong -O3 -fPIC compile options: '-Inumpy/core/src/common -Inumpy/core/src -Inumpy/core -Inumpy/core/src/npymath -Inumpy/core/src/multiarray -Inumpy/core/src/umath -Inumpy/core/src/npysort -I/data/data/com.learnprogramming.codecamp/files/usr/include/python3.10 -c' aarch64-linux-android-clang: _configtest.c failure. removing: _configtest.c _configtest.o Traceback (most recent call last): File "<string>", line 2, in <module> File "<pip-setuptools-caller>", line 34, in <module> File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/setup.py", line 443, in <module> setup_package() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/setup.py", line 435, in setup_package setup(**metadata) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/core.py", line 171, in setup return old_setup(**new_attr) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/site-packages/setuptools/__init__.py", line 153, in setup return distutils.core.setup(**attrs) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/core.py", line 148, in setup dist.run_commands() File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 966, in run_commands self.run_command(cmd) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 985, in run_command cmd_obj.run() File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/site-packages/wheel/bdist_wheel.py", line 325, in run self.run_command("build") File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/cmd.py", line 313, in run_command self.distribution.run_command(command) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 985, in run_command cmd_obj.run() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build.py", line 47, in run old_build.run(self) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/command/build.py", line 135, in run self.run_command(cmd_name) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/cmd.py", line 313, in run_command self.distribution.run_command(command) File "/data/data/com.learnprogramming.codecamp/files/usr/lib/python3.10/distutils/dist.py", line 985, in run_command cmd_obj.run() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 142, in run self.build_sources() File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 153, in build_sources self.build_library_sources(*libname_info) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 286, in build_library_sources sources = self.generate_sources(sources, (lib_name, build_info)) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-install-5maxp87n/numpy_a3b75f42d58644aa86df8f2e4a7c1672/numpy/distutils/command/build_src.py", line 369, in generate_sources source = func(extension, build_dir) File "numpy/core/setup.py", line 669, in get_mathlib_info raise RuntimeError("Broken toolchain: cannot link a simple C program") RuntimeError: Broken toolchain: cannot link a simple C program [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. ERROR: Failed building wheel for numpy Running setup.py clean for numpy error: subprocess-exited-with-error × python setup.py clean did not run successfully. │ exit code: 1 ╰─> [10 lines of output] Running from numpy source directory. `setup.py clean` is not supported, use one of the following instead: - `git clean -xdf` (cleans all files) - `git clean -Xdf` (cleans all versioned files, doesn't touch files that aren't checked into the git repo) Add `--force` to your command to use it anyway if you must (unsupported). [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. ERROR: Failed cleaning build dir for numpy Building wheel for cmake (pyproject.toml): started Building wheel for cmake (pyproject.toml): finished with status 'error' error: subprocess-exited-with-error × Building wheel for cmake (pyproject.toml) did not run successfully. │ exit code: 1 ╰─> [32 lines of output] Traceback (most recent call last): File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-build-env-7nym68sq/overlay/lib/python3.10/site-packages/skbuild/setuptools_wrap.py", line 612, in setup cmkr = cmaker.CMaker(cmake_executable) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-build-env-7nym68sq/overlay/lib/python3.10/site-packages/skbuild/cmaker.py", line 148, in __init__ self.cmake_version = get_cmake_version(self.cmake_executable) File "/data/data/com.learnprogramming.codecamp/files/usr/tmp/pip-build-env-7nym68sq/overlay/lib/python3.10/site-packages/skbuild/cmaker.py", line 103, in get_cmake_version raise SKBuildError( =============================DEBUG ASSISTANCE============================= If you are seeing a compilation error please try the following steps to successfully install cmake: 1) Upgrade to the latest pip and try again. This will fix errors for most users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip 2) If on Linux, with glibc < 2.12, you can set PIP_ONLY_BINARY=cmake in order to retrieve the last manylinux1 compatible wheel. 3) If on Linux, with glibc < 2.12, you can cap "cmake<3.23" in your requirements in order to retrieve the last manylinux1 compatible wheel. 4) Open an issue with the debug information that follows at https://github.com/scikit-build/cmake-python-distributions/issues Python: 3.10.4 platform: Linux-4.19.127+-aarch64-with-libc machine: aarch64 bits: 64 pip: n/a setuptools: 65.6.3 scikit-build: 0.16.2 PEP517_BUILD_BACKEND=setuptools.build_meta =============================DEBUG ASSISTANCE============================= Problem with the CMake installation, aborting build. CMake executable is cmake [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. ERROR: Failed building wheel for cmake Failed to build numpy cmake ERROR: Could not build wheels for cmake, which is required to install pyproject.toml-based projects [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: subprocess-exited-with-error × pip subprocess to install build dependencies did not run successfully. │ exit code: 1 ╰─> See above for output. note: This error originates from a subprocess, and is likely not a problem with pip.` I tried upgrading pip, reinstalling it, installing a different version of opencv but nothing works.
[ "Try adding numpy to your requirements.txt, e.g. as follows:\nnumpy==1.23.4\n\nas this may be caused by a missing numpy installation, from which open-cv is depending. Important: therefore you have to add it before open-cv to the requirements!\nIf that does not work, maybe try the solution presented here: No BLAS/LAPACK libraries found when installing SciPy\n" ]
[ 0 ]
[]
[]
[ "opencv", "pip", "python" ]
stackoverflow_0074585536_opencv_pip_python.txt
Q: How can I skip the first line in CSV files imported into a pandas df but keep the header for one of the files? I essentially want to preserve the header for one of the csv files to make them the column names in the csv but for the rest of the files I want to skip the header. Is there an easier solution to doing this except for the following: import as no headers, then change column names after all csv files are imported and deleted duplicate rows from df. My current code is: import glob import pandas as pd import os path = r"C:\Users\..." my_files = glob.glob(os.path.join(path, "filename*.xlsx")) file_li = [] for filename in my_files: df = pd.read_excel(filename, index_col=None, header=None) file_li.append(df) I am trying to append 365 files into one based on the condition that the file name meets the above criteria. The files looks like this: Colunn1 Colunn2 Colunn3 Colunn4 Colunn5 Colunn6 Colunn7 Colunn8 Colunn9 Colunn10 Colunn11 2 DATA DATA DATA DATA DATA DATA DATA DATA DATA DATA DATA 3 4 5 6 7 I want to keep the column names (column1, 2.,) for the first file but then skip it for the rest so I dont have to reindex it or change the df after. The reason for this is I dont want to have duplicate rows with column headers in the DF or have missing headers...is this complicating an easier solution? A: Why are you putting them in a list? Pandas concat lets you combine DF's while doing the column name management for you. https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html
How can I skip the first line in CSV files imported into a pandas df but keep the header for one of the files?
I essentially want to preserve the header for one of the csv files to make them the column names in the csv but for the rest of the files I want to skip the header. Is there an easier solution to doing this except for the following: import as no headers, then change column names after all csv files are imported and deleted duplicate rows from df. My current code is: import glob import pandas as pd import os path = r"C:\Users\..." my_files = glob.glob(os.path.join(path, "filename*.xlsx")) file_li = [] for filename in my_files: df = pd.read_excel(filename, index_col=None, header=None) file_li.append(df) I am trying to append 365 files into one based on the condition that the file name meets the above criteria. The files looks like this: Colunn1 Colunn2 Colunn3 Colunn4 Colunn5 Colunn6 Colunn7 Colunn8 Colunn9 Colunn10 Colunn11 2 DATA DATA DATA DATA DATA DATA DATA DATA DATA DATA DATA 3 4 5 6 7 I want to keep the column names (column1, 2.,) for the first file but then skip it for the rest so I dont have to reindex it or change the df after. The reason for this is I dont want to have duplicate rows with column headers in the DF or have missing headers...is this complicating an easier solution?
[ "Why are you putting them in a list?\nPandas concat lets you combine DF's while doing the column name management for you.\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html\n" ]
[ 0 ]
[]
[]
[ "csv", "pandas", "python" ]
stackoverflow_0074601366_csv_pandas_python.txt
Q: Append new values to JSON property in Python I have a JSON string which I retrieve from my database (MySQL)and I need to add another value to the daysOff property in the JSON string. Once I've appened the new value to the JSON string, I need to update my table with the new value. I'm new to Python, and I understand strings are immutable, the part I'm having trouble with is traversing through the array to add a new value to the daysOff property This is what I have so far: import mysql.connector as mysqlConnector import sys import json from datetime import date query = "SELECT option_value FROM my_tbl WHERE option_name='my_holiday_settings'" try: cur.execute(query) myresult = cur.fetchall() for x in myresult: dictionary = json.loads(*x) key = 'daysOff' checkKey(dictionary, key) print(dict) print(dictionary) print(type(dictionary)) except Exception as e: print('error:', e) finally: cur.close() def checkKey(dict, key): if key in dict.keys(): test_dict = {"name":"test","startDate":"9999-01-01","endDate":"9999-09-09","repeat":"0"} dict[key]=test_dict print("value updated =", 600) else: print("Not Exist") This is my JSON { "notifications": { "whatsAppBusinessID": "None", "whatsAppLanguage": "Alien" }, "daysOff": [ { "name": "Xmas", "startDate": "2022-01-09", "endDate": "2022-01-09", "repeat": true }, { "name": "Australia Day", "startDate": "2022-01-26", "endDate": "2022-01-26", "repeat": true }, { "name": "Good Friday", "startDate": "2022-04-15", "endDate": "2022-04-15", "repeat": true }, { "name": "Holy Saturday", "startDate": "2022-04-16", "endDate": "2022-04-16", "repeat": true } ] } A: cursor.fetchall() returns a list of tuples which means that x in your for loop is a tuple. You can convert tuples to dictionaries using this tuple_as_dict = dict(tuple) If store your data as json string you first need to unpack the tuple and the convert it into a string dictionary = json.loads(*tuple) json.loads(string) also returns a dictionary but takes strings and not tuples as argument
Append new values to JSON property in Python
I have a JSON string which I retrieve from my database (MySQL)and I need to add another value to the daysOff property in the JSON string. Once I've appened the new value to the JSON string, I need to update my table with the new value. I'm new to Python, and I understand strings are immutable, the part I'm having trouble with is traversing through the array to add a new value to the daysOff property This is what I have so far: import mysql.connector as mysqlConnector import sys import json from datetime import date query = "SELECT option_value FROM my_tbl WHERE option_name='my_holiday_settings'" try: cur.execute(query) myresult = cur.fetchall() for x in myresult: dictionary = json.loads(*x) key = 'daysOff' checkKey(dictionary, key) print(dict) print(dictionary) print(type(dictionary)) except Exception as e: print('error:', e) finally: cur.close() def checkKey(dict, key): if key in dict.keys(): test_dict = {"name":"test","startDate":"9999-01-01","endDate":"9999-09-09","repeat":"0"} dict[key]=test_dict print("value updated =", 600) else: print("Not Exist") This is my JSON { "notifications": { "whatsAppBusinessID": "None", "whatsAppLanguage": "Alien" }, "daysOff": [ { "name": "Xmas", "startDate": "2022-01-09", "endDate": "2022-01-09", "repeat": true }, { "name": "Australia Day", "startDate": "2022-01-26", "endDate": "2022-01-26", "repeat": true }, { "name": "Good Friday", "startDate": "2022-04-15", "endDate": "2022-04-15", "repeat": true }, { "name": "Holy Saturday", "startDate": "2022-04-16", "endDate": "2022-04-16", "repeat": true } ] }
[ "cursor.fetchall()\n\nreturns a list of tuples which means that x in your for loop is a tuple. You can convert tuples to dictionaries using this\ntuple_as_dict = dict(tuple)\n\n\nIf store your data as json string you first need to unpack the tuple and the convert it into a string\ndictionary = json.loads(*tuple)\n\n\njson.loads(string)\n\nalso returns a dictionary but takes strings and not tuples as argument\n" ]
[ 1 ]
[]
[]
[ "arrays", "json", "python", "string" ]
stackoverflow_0074601053_arrays_json_python_string.txt
Q: how to decrease the value of items in a list python I wanted to find similar items in a list with slightly lower or higher values ​​(0.01 or -0.01) but up to 0.1, example: real_list = [1.94, 4.72, 8.99, 5.37, 1.33] list_2 = [1.86, 4.78, 8.91, 5.41, 1.30] you can see that the values ​​of the two lists are similar, but they are not found in an if, example: for i in list_2: if i in real_list: print("found") else: print("not found") this code returns me this not found not found not found not found not found I tried to make some modifications but I wanted it to do some combinations decreasing and increasing values ​​in different parts until it found the value in the other list, example: list_2 = [1.86, 4.78, 8.91, 5.41, 1.30] 1.85 4.77 8.9 5.4 1.29 1.84 4.76 or increase the values ​​until you find it in the list, remembering that it has to be close to 0.1 and nothing more, in short is to find similar values ​​in a list. Can someone help me? A: You need to be sure what you mean when two values are "similar". If I understand you correctly you set an arbitrary threshold of 0.1, for this the code could look something like this: real_list = [1.94, 4.72, 8.99, 5.37, 1.33] list_2 = [1.86, 4.78, 8.91, 5.41, 1.30] threshold = 0.1 for i in list_2: found = False for j in real_list: if abs(i-j) <= threshold: print("found") found = True break if not found: print("not found") A: with a bit of numpy this becomes a rather compact import numpy as np real_list = np.array([1.94, 4.72, 8.99, 5.37, 1.33]) list_2 = [1.86, 4.78, 8.91, 5.41, 1.30] ["found" if np.any(np.isclose(x,real_list,atol=0.04)) else "not found" for x in list_2] np.isclose(x,real_list,atol=0.04) does the comparison of each value in list_2 against the whole vector with a given tolerance - can be atol = absolute tolerance and/or rtol: relative tolerance (https://numpy.org/doc/stable/reference/generated/numpy.isclose.html). np.any then reduces the comparison of each value into a single boolean.
how to decrease the value of items in a list python
I wanted to find similar items in a list with slightly lower or higher values ​​(0.01 or -0.01) but up to 0.1, example: real_list = [1.94, 4.72, 8.99, 5.37, 1.33] list_2 = [1.86, 4.78, 8.91, 5.41, 1.30] you can see that the values ​​of the two lists are similar, but they are not found in an if, example: for i in list_2: if i in real_list: print("found") else: print("not found") this code returns me this not found not found not found not found not found I tried to make some modifications but I wanted it to do some combinations decreasing and increasing values ​​in different parts until it found the value in the other list, example: list_2 = [1.86, 4.78, 8.91, 5.41, 1.30] 1.85 4.77 8.9 5.4 1.29 1.84 4.76 or increase the values ​​until you find it in the list, remembering that it has to be close to 0.1 and nothing more, in short is to find similar values ​​in a list. Can someone help me?
[ "You need to be sure what you mean when two values are \"similar\". If I understand you correctly you set an arbitrary threshold of 0.1, for this the code could look something like this:\nreal_list = [1.94, 4.72, 8.99, 5.37, 1.33]\nlist_2 = [1.86, 4.78, 8.91, 5.41, 1.30]\nthreshold = 0.1\nfor i in list_2:\n found = False\n for j in real_list:\n if abs(i-j) <= threshold:\n print(\"found\")\n found = True\n break\n if not found:\n print(\"not found\")\n\n", "with a bit of numpy this becomes a rather compact\nimport numpy as np\nreal_list = np.array([1.94, 4.72, 8.99, 5.37, 1.33])\nlist_2 = [1.86, 4.78, 8.91, 5.41, 1.30]\n[\"found\" if np.any(np.isclose(x,real_list,atol=0.04)) else \"not found\" for x in list_2]\n\nnp.isclose(x,real_list,atol=0.04) does the comparison of each value in list_2 against the whole vector with a given tolerance - can be atol = absolute tolerance and/or rtol: relative tolerance (https://numpy.org/doc/stable/reference/generated/numpy.isclose.html).\nnp.any then reduces the comparison of each value into a single boolean.\n" ]
[ 1, 1 ]
[ "You can do something like this:\nreal_list = [1.94, 4.72, 8.99, 5.37, 1.33]\nlist_2 = [1.86, 4.78, 8.91, 5.41, 1.30]\n\nfor x in real_list:\n for y in list_2:\n\n if abs(x-y)<0.1:\n print(\"found\",x, \"is close to\",y)\n else:\n print(\"not found\")\n\nIt will run all the itens in your first list, and all itens in your second list and compare if they are less than 0.1 away from eachother.\n" ]
[ -1 ]
[ "function", "list", "python" ]
stackoverflow_0074600832_function_list_python.txt
Q: Centering matrix I want to write a function for centering an input data matrix by multiplying it with the centering matrix. The function shall subtract the row-wise mean from the input. My code: import numpy as np def centering(data): n = data.shape()[0] centeringMatrix = np.identity(n) - 1/n * (np.ones(n) @ np.ones(n).T) data = centeringMatrix @ data data = np.array([[1,2,3], [3,4,5]]) center_with_matrix(data) But I get a wrong result matrix, it is not centered. Thanks! A: The centering matrix is np.eye(n) - np.ones((n, n)) / n Here is a list of issues in your original formulation: np.ones(n).T is the same as np.ones(n). The transpose of a 1D array is a no-op in numpy. If you want to turn a row vector into a column vector, add the dimension explicitly: np.ones((n, 1)) OR np.ones(n)[:, None] The normal definition is to subtract the column-wise mean, not the row-wise, so you will have to transpose and right-multiply the input to get row-wise operation: n = data.shape()[1] ... data = (centeringMatrix @ data.T).T Your function creates a new array for the output but does not currently return anything. You can either return the result, or perform the assignment in-place: return (centeringMatrix @ data.T).T OR data[:] = (centeringMatrix @ data.T).T OR np.matmul(centeringMatrix, data.T, out=data.T)
Centering matrix
I want to write a function for centering an input data matrix by multiplying it with the centering matrix. The function shall subtract the row-wise mean from the input. My code: import numpy as np def centering(data): n = data.shape()[0] centeringMatrix = np.identity(n) - 1/n * (np.ones(n) @ np.ones(n).T) data = centeringMatrix @ data data = np.array([[1,2,3], [3,4,5]]) center_with_matrix(data) But I get a wrong result matrix, it is not centered. Thanks!
[ "The centering matrix is\nnp.eye(n) - np.ones((n, n)) / n\n\nHere is a list of issues in your original formulation:\n\nnp.ones(n).T is the same as np.ones(n). The transpose of a 1D array is a no-op in numpy. If you want to turn a row vector into a column vector, add the dimension explicitly:\nnp.ones((n, 1))\n\nOR\nnp.ones(n)[:, None]\n\n\nThe normal definition is to subtract the column-wise mean, not the row-wise, so you will have to transpose and right-multiply the input to get row-wise operation:\nn = data.shape()[1]\n...\ndata = (centeringMatrix @ data.T).T\n\n\nYour function creates a new array for the output but does not currently return anything. You can either return the result, or perform the assignment in-place:\nreturn (centeringMatrix @ data.T).T\n\nOR\ndata[:] = (centeringMatrix @ data.T).T\n\nOR\nnp.matmul(centeringMatrix, data.T, out=data.T)\n\n\n\n" ]
[ 2 ]
[]
[]
[ "matrix_multiplication", "numpy", "python" ]
stackoverflow_0074601602_matrix_multiplication_numpy_python.txt
Q: Extracting .xlsx attachments from .msg files I know that this has been asked here several times, and I have tried what has apparently worked for others...I have more than 1000 Outlook .msg files with .xlsx file attachments stored in folders on my desktop and I only need to extract the .xlsx files to combine into a single dataframe. I have tried the VBA macro, and Python [Win32] (Parsing outlook .msg files with python) and msg-extractor. The best I can do is to extract a single attachment from a single .msg file Any advice is greatly appreciated. Thank you! import argparse import csv import os as os import pathlib import sys from datetime import date, datetime, timedelta, tzinfo from enum import Enum, IntEnum from tempfile import mkstemp import dateutil.parser as duparser from dateutil.rrule import rrulestr, rruleset import pywintypes import pytz import win32com.client path = r'C:\Users\Me\Desktop\MyFiles\feb_2018' files = [f for f in os.listdir(path) if '.msg' in f] print (files) for file in files: outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") msg = outlook.OpenSharedItem(os.path.join(path, file)) att=msg.Attachments for i in att: i.SaveAsFile(os.path.join(path, i.FileName)) A: I have not tried saving the attachments using win32com, so I can't tell why only a single attachment from a single file is getting saved. But I was able to save multiple attachments using msg-extractor import extract_msg for file in files: msg = extract_msg.Message(file) msg_attachment = msg.attachments attach_path = "path where the files have to be saved." for attachment in msg_attachment: if not os.path.exists(attach_path): os.makedirs(attach_path) attachment.save(customPath=attach_path) A: I figured out a solution to extract multiple files with Win32 by including a counter: path = r'C:\Users\filepath' #change path to directory where your msg files are located files = [f for f in os.listdir(path) if '.msg' in f] print (files) counter=0 for file in files: outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") msg = outlook.OpenSharedItem(os.path.join(path, file)) att=msg.Attachments for i in att: counter +=1 i.SaveAsFile(os.path.join(path, str(counter)+i.FileName)) A: This topic is quite old now but no need to use the counter: path = r'C:\Users\filepath' #change path to directory where your msg files are located files = [f for f in os.listdir(path) if '.msg' in f] print (files) outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") for file in files: msg = outlook.OpenSharedItem(os.path.join(path, file)) for att in msg.Attachments: fullPath = os.path.join(path, att.FileName) if not os.path.isfile(fullPath): att.SaveAsFile(fullPath)
Extracting .xlsx attachments from .msg files
I know that this has been asked here several times, and I have tried what has apparently worked for others...I have more than 1000 Outlook .msg files with .xlsx file attachments stored in folders on my desktop and I only need to extract the .xlsx files to combine into a single dataframe. I have tried the VBA macro, and Python [Win32] (Parsing outlook .msg files with python) and msg-extractor. The best I can do is to extract a single attachment from a single .msg file Any advice is greatly appreciated. Thank you! import argparse import csv import os as os import pathlib import sys from datetime import date, datetime, timedelta, tzinfo from enum import Enum, IntEnum from tempfile import mkstemp import dateutil.parser as duparser from dateutil.rrule import rrulestr, rruleset import pywintypes import pytz import win32com.client path = r'C:\Users\Me\Desktop\MyFiles\feb_2018' files = [f for f in os.listdir(path) if '.msg' in f] print (files) for file in files: outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") msg = outlook.OpenSharedItem(os.path.join(path, file)) att=msg.Attachments for i in att: i.SaveAsFile(os.path.join(path, i.FileName))
[ "I have not tried saving the attachments using win32com, so I can't tell why only a single attachment from a single file is getting saved. But I was able to save multiple attachments using msg-extractor\nimport extract_msg\n\nfor file in files:\n msg = extract_msg.Message(file)\n msg_attachment = msg.attachments\n attach_path = \"path where the files have to be saved.\"\n for attachment in msg_attachment:\n if not os.path.exists(attach_path):\n os.makedirs(attach_path)\n attachment.save(customPath=attach_path)\n\n", "I figured out a solution to extract multiple files with Win32 by including a counter:\npath = r'C:\\Users\\filepath' #change path to directory where your msg files are located\nfiles = [f for f in os.listdir(path) if '.msg' in f]\nprint (files)\ncounter=0\nfor file in files:\n outlook = win32com.client.Dispatch(\"Outlook.Application\").GetNamespace(\"MAPI\")\n msg = outlook.OpenSharedItem(os.path.join(path, file))\n att=msg.Attachments\n for i in att:\n counter +=1\n i.SaveAsFile(os.path.join(path, str(counter)+i.FileName))\n\n", "This topic is quite old now but no need to use the counter:\npath = r'C:\\Users\\filepath' #change path to directory where your msg files are located\nfiles = [f for f in os.listdir(path) if '.msg' in f]\nprint (files)\noutlook = win32com.client.Dispatch(\"Outlook.Application\").GetNamespace(\"MAPI\")\n\nfor file in files:\n msg = outlook.OpenSharedItem(os.path.join(path, file))\n for att in msg.Attachments:\n fullPath = os.path.join(path, att.FileName)\n if not os.path.isfile(fullPath):\n att.SaveAsFile(fullPath)\n\n" ]
[ 2, 0, 0 ]
[]
[]
[ "attachment", "data_extraction", "python", "vba" ]
stackoverflow_0058525541_attachment_data_extraction_python_vba.txt
Q: Get queryset with only selecting one object in related to foreign key I have a model named Answer class Answer(models.Model): survey = models.ForeignKey(Survey) I want to return a queryset of Answer according to Survey foreign Key, Means if there are 3 objects , answers = [ {"survey": 1}, {"survey": 2}, {"survey": 1}, ] then queryset should return [ {"survey": 2}, {"survey": 1}, ] Means that will check if there is Answer with a foreign key then it should not select other Answer with same foreignkey. So how to do that. I'm trying this way but this doesn't have any affect def get_queryset(self, request): qs = super().get_queryset(request) idx = list(qs.values_list("survey_id", flat=True).distinct()) return qs.filter(survey_id__in=idx).distinct() Edit: I'm now able to do it this way, but I'm not sure if that is optimal solution. your suggestions are welcome to improve it. def get_queryset(self, request): qs = super().get_queryset(request) idx = list(qs.values_list("survey_id", flat=True).distinct()) data = [] for i in qs: if i.survey_id in idx: data.append(i) idx.remove(i.survey_id) # modify the data to queryset return qs.filter(pk__in=[i.pk for i in data]) A: you could do it like this with less for loops as possible (always rely on database not in the for loops): for id in idx: new_value = qs.filter(survey_id=id).first() data.append(new_value)
Get queryset with only selecting one object in related to foreign key
I have a model named Answer class Answer(models.Model): survey = models.ForeignKey(Survey) I want to return a queryset of Answer according to Survey foreign Key, Means if there are 3 objects , answers = [ {"survey": 1}, {"survey": 2}, {"survey": 1}, ] then queryset should return [ {"survey": 2}, {"survey": 1}, ] Means that will check if there is Answer with a foreign key then it should not select other Answer with same foreignkey. So how to do that. I'm trying this way but this doesn't have any affect def get_queryset(self, request): qs = super().get_queryset(request) idx = list(qs.values_list("survey_id", flat=True).distinct()) return qs.filter(survey_id__in=idx).distinct() Edit: I'm now able to do it this way, but I'm not sure if that is optimal solution. your suggestions are welcome to improve it. def get_queryset(self, request): qs = super().get_queryset(request) idx = list(qs.values_list("survey_id", flat=True).distinct()) data = [] for i in qs: if i.survey_id in idx: data.append(i) idx.remove(i.survey_id) # modify the data to queryset return qs.filter(pk__in=[i.pk for i in data])
[ "you could do it like this with less for loops as possible (always rely on database not in the for loops):\nfor id in idx:\n new_value = qs.filter(survey_id=id).first()\n data.append(new_value)\n\n" ]
[ 1 ]
[]
[]
[ "django", "python" ]
stackoverflow_0074600863_django_python.txt
Q: Convert a text file into a large dictionary Python I have a text file that looks like this: subjects ENGLISH, MATHS, SCIENCE Joe, A, A, B Dave, A, B, C Will, D, D, E And I want to put it into a dictionary using Python {’Joe’:{’ENGLISH’:A,’MATHS’:A,’SCIENCE’:B}, ’Dave’:{’ENGLISH’:A,’MATHS’:B,’SCIENCE’:C}, ’Will’:{’ENGLISH’:D,’MATHS’:D,’SCIENCE’:E}} How would I go about doing this in one dictionary? A: Assuming you have a file called file.txt with the following contents: subjects ENGLISH, MATHS, SCIENCE Joe, A, A, B Dave, A, B, C Will, D, D, E Try using * unpacking: results = {} with open('file.txt') as file: _, *subjects = next(file).split(' ') # Read header row subjects = [s[:-1] for s in subjects] # Remove trailing comma/newline from subjects for line in file: if line != '\n': # Skip empty lines name, *grades = line.strip().split(', ') results[name] = dict(zip(subjects, grades)) print(results) You can also define the subjects in code and skip the header row completely: subjects = ['ENGLISH', 'MATHS', 'SCIENCE'] results = {} with open('file.txt') as file: next(file) # Skip header row since we have defined subjects in code... for line in file: if line != '\n': # Skip empty lines name, *grades = line.strip().split(', ') results[name] = dict(zip(subjects, grades)) print(results) Output: {'Joe': {'ENGLISH': 'A', 'MATHS': 'A', 'SCIENCE': 'B'}, 'Dave': {'ENGLISH': 'A', 'MATHS': 'B', 'SCIENCE': 'C'}, 'Will': {'ENGLISH': 'D', 'MATHS': 'D', 'SCIENCE': 'E'}} A: We can read the file by using pd.read_csv() and convert the pd to a dictionary by using: df.to_dict('index')
Convert a text file into a large dictionary Python
I have a text file that looks like this: subjects ENGLISH, MATHS, SCIENCE Joe, A, A, B Dave, A, B, C Will, D, D, E And I want to put it into a dictionary using Python {’Joe’:{’ENGLISH’:A,’MATHS’:A,’SCIENCE’:B}, ’Dave’:{’ENGLISH’:A,’MATHS’:B,’SCIENCE’:C}, ’Will’:{’ENGLISH’:D,’MATHS’:D,’SCIENCE’:E}} How would I go about doing this in one dictionary?
[ "Assuming you have a file called file.txt with the following contents:\nsubjects ENGLISH, MATHS, SCIENCE\n\nJoe, A, A, B\n\nDave, A, B, C\n\nWill, D, D, E\n\nTry using * unpacking:\nresults = {}\nwith open('file.txt') as file:\n _, *subjects = next(file).split(' ') # Read header row\n subjects = [s[:-1] for s in subjects] # Remove trailing comma/newline from subjects\n for line in file:\n if line != '\\n': # Skip empty lines\n name, *grades = line.strip().split(', ')\n results[name] = dict(zip(subjects, grades))\nprint(results)\n\nYou can also define the subjects in code and skip the header row completely:\nsubjects = ['ENGLISH', 'MATHS', 'SCIENCE']\nresults = {}\nwith open('file.txt') as file:\n next(file) # Skip header row since we have defined subjects in code...\n for line in file:\n if line != '\\n': # Skip empty lines\n name, *grades = line.strip().split(', ')\n results[name] = dict(zip(subjects, grades))\nprint(results)\n\nOutput:\n{'Joe': {'ENGLISH': 'A', 'MATHS': 'A', 'SCIENCE': 'B'}, 'Dave': {'ENGLISH': 'A', 'MATHS': 'B', 'SCIENCE': 'C'}, 'Will': {'ENGLISH': 'D', 'MATHS': 'D', 'SCIENCE': 'E'}}\n\n", "We can read the file by using pd.read_csv() and convert the pd to a dictionary by using: df.to_dict('index')\n" ]
[ 1, 0 ]
[ "You could convert your text file to CSV\nName, ENGLISH, MATHS, SCIENCE\n\nJoe, A, A, B\n\nDave, A, B, C\n\nWill, D, D, E\n\nThen use the pandas' library to read the CSV file and convert it into the dictionary.\n>>> import pandas as pd\n>>> pd.read_csv('file_path.csv',index_col='Name').transpose().to_dict()\n\n{'Joe': {'ENGLISH': ' A', 'MATHS': ' A', 'SCIENCE': ' B'}, 'Dave': {'ENGLISH': ' A', 'MATHS': ' B', 'SCIENCE': ' C'}, 'Will': {'ENGLISH': ' D', 'MATHS': ' D', 'SCIENCE': ' E'}}\n" ]
[ -1 ]
[ "dictionary", "python", "python_3.x", "text" ]
stackoverflow_0074601515_dictionary_python_python_3.x_text.txt
Q: How to print RGB colour to the terminal Can ANSI escape code SGR 38 - Set foreground color with argument 2;r;g;b be used with print function? Example of use with code 33 is of course OKBLUE = '\033[94m' I would like to use 038 instead to be able to use any RGB color. Is that posible? I tried GREEN = '\038[2;0;153;0m' ENDC = '\033[0m' print(f"{GREEN} some text {ENDC}") Expected to change the color of "some text" in green A: To use an RGB color space within the terminal* the following escape sequence can be used: # Print Hello! in lime green text. print('\033[38;2;146;255;12mHello!\033[0m') # ^ # | # \ The 38 goes here, to indicate a foreground colour. # Print Hello! in white text on a fuschia background. print('\033[48;2;246;45;112mHello!\033[0m') Explanation: \033[38;2;146;255;12mHello!\033[0m ^ ^ ^ ^ ^ ^ ^ ^ ^ | | | R G B | | | | | | | | | \ Reset the colour to default | | | | | | | | | | | \ Escape character | | | | | | | | \ R;G;B \ Text to print | | | | | \ Indicate the following sequence is RGB | | | \ Code to instruct the setting of an 8 or 24-bit foreground (text) colour | \ Escape character The use of 38;2 indicates an RGB (foreground) sequence is to follow. However, the use of 38;5 indicates the following (foreground) value comes from the 256-colour table. To clarify what appears to be a misconception, \033 (octal) or \x1b (hexidecimal) corresponds to the ASCII table's ESC character, which is used here to introduce an escape sequence of terminal text colouring. Whereas the 38 is used to instruct the following 8 or 24-bit colour to be set as foreground, (after the escape sequence has been introduced). Additionally, 48 can be used to set the background colour, as demonstrated in the code example above. *Providing the terminal emulator supports 24-bit colour sequences. (e.g. Xterm, GNOME terminal, etc.) Link to the Wikipedia article which explains this topic of 24-colour (RGB) in greater depth.
How to print RGB colour to the terminal
Can ANSI escape code SGR 38 - Set foreground color with argument 2;r;g;b be used with print function? Example of use with code 33 is of course OKBLUE = '\033[94m' I would like to use 038 instead to be able to use any RGB color. Is that posible? I tried GREEN = '\038[2;0;153;0m' ENDC = '\033[0m' print(f"{GREEN} some text {ENDC}") Expected to change the color of "some text" in green
[ "To use an RGB color space within the terminal* the following escape sequence can be used:\n# Print Hello! in lime green text.\nprint('\\033[38;2;146;255;12mHello!\\033[0m')\n# ^\n# |\n# \\ The 38 goes here, to indicate a foreground colour.\n\n# Print Hello! in white text on a fuschia background.\nprint('\\033[48;2;246;45;112mHello!\\033[0m') \n\nExplanation:\n\\033[38;2;146;255;12mHello!\\033[0m\n^ ^ ^ ^ ^ ^ ^ ^ ^ \n| | | R G B | | |\n| | | | | | \\ Reset the colour to default\n| | | | | | \n| | | | | \\ Escape character\n| | | | |\n| | | \\ R;G;B \\ Text to print\n| | |\n| | \\ Indicate the following sequence is RGB\n| |\n| \\ Code to instruct the setting of an 8 or 24-bit foreground (text) colour\n|\n\\ Escape character\n\nThe use of 38;2 indicates an RGB (foreground) sequence is to follow. However, the use of 38;5 indicates the following (foreground) value comes from the 256-colour table.\nTo clarify what appears to be a misconception, \\033 (octal) or \\x1b (hexidecimal) corresponds to the ASCII table's ESC character, which is used here to introduce an escape sequence of terminal text colouring. Whereas the 38 is used to instruct the following 8 or 24-bit colour to be set as foreground, (after the escape sequence has been introduced). Additionally, 48 can be used to set the background colour, as demonstrated in the code example above.\n*Providing the terminal emulator supports 24-bit colour sequences. (e.g. Xterm, GNOME terminal, etc.)\nLink to the Wikipedia article which explains this topic of 24-colour (RGB) in greater depth.\n" ]
[ 1 ]
[ "Below code will give you an idea.\nprint('\\033[90m' + 'hello' + '\\033[96m' + ' there?' )\n\n" ]
[ -1 ]
[ "ansi", "python", "terminal" ]
stackoverflow_0074589665_ansi_python_terminal.txt
Q: Merge two dictionaries in python I'm trying to merge two dictionaries based on key value. However, I'm not able to achieve it. Below is the way I tried solving. dict1 = {4: [741, 114, 306, 70], 2: [77, 325, 505, 144], 3: [937, 339, 612, 100], 1: [52, 811, 1593, 350]} dict2 = {1: 'A', 2: 'B', 3: 'C', 4: 'D'} #My resultant dictionary should be output = {'D': [741, 114, 306, 70], 'B': [77, 325, 505, 144], 'C': [937, 339, 612, 100], 'A': [52, 811, 1593, 350]} #My code def mergeDictionary(dict_obj1, dict_obj2): dict_obj3 = {**dict_obj1, **dict_obj2} for key, value in dict_obj3.items(): if key in dict_obj1 and key in dict_obj2: dict_obj3[key] = [value , dict_obj1[key]] return dict_obj3 dict_3 = mergeDictionary(dict1, dict2) #But I'm getting this as output dict_3={4: ['D', [741, 114, 306, 70]], 2: ['B', [77, 325, 505, 144]], 3: ['C', [937, 339, 612, 100]], 1: ['A', [52, 811, 1593, 350]]} Thanks for your help in advance A: Use a simple dictionary comprehension: output = {dict2[k]: v for k,v in dict1.items()} Output: {'D': [741, 114, 306, 70], 'B': [77, 325, 505, 144], 'C': [937, 339, 612, 100], 'A': [52, 811, 1593, 350]} A: While the simple dictionary comprehension by @mozway is certainly the most straightforward and elegant solution, it rests on the assumption that the keys of dict1 are a subset of those of dict2. If not, you'll get a KeyError. If that assumption does not hold, you'll need to decide for yourself, how you want to deal with the case when a key in dict1 is not present in dict2. One option is to simply discard that key-value-pair and not include it in the output dictionary. from collections.abc import Mapping KT = str VT = list[int] def merge( keys_map: Mapping[int, KT], values_map: Mapping[int, VT], ) -> dict[KT, VT]: output = {} for key, value in values_map.items(): try: output[keys_map[key]] = value except KeyError: pass return output Test: if __name__ == "__main__": dict1 = { 5: [1, 2, 3], 4: [741, 114], 2: [77, 325], 3: [937, 339], 1: [52, 811], } dict2 = {1: 'A', 2: 'B', 3: 'C', 4: 'D'} print(merge(dict2, dict1)) Output: {'D': [741, 114], 'B': [77, 325], 'C': [937, 339], 'A': [52, 811]} A: You can use dict.update(): def mergeDictionary(dict_obj1, dict_obj2): dict_obj3 = dict() for key, val in dict_obj1.items(): dict_obj3.update({dict_obj2[key]: val}) return dict_obj3 or: def mergeDictionary(dict_obj1, dict_obj2): dict_obj3 = dict() for key, val in dict_obj1.items(): dict_obj3[dict_obj2[key]] = val return dict_obj3 A: The error seems to be in this line: dict_obj3[key] = [value , dict_obj1[key]] You want to use the value as criteria to assign an element to your dictionary, as such: dict_obj3[value] = dict_obj1[key] This code should do the trick: dict1={4: [741, 114, 306, 70], 2: [77, 325, 505, 144], 3: [937, 339, 612, 100], 1: [52, 811, 1593, 350]} dict2={1: 'A', 2: 'B', 3: 'C', 4: 'D'} # My resultant dictionary should be # output={D: [741, 114, 306, 70], B: [77, 325, 505, 144], C: [937, 339, 612, 100], A: [52, 811, 1593, 350]} # My code def mergeDictionary(dict_obj1, dict_obj2): dict_obj3 = {} # {**dict_obj1, **dict_obj2} for key, value in dict_obj2.items(): dict_obj3[value] = dict_obj1[key] return dict_obj3 dict_3 = mergeDictionary(dict1, dict2) print(dict_3) A: If you're using pandas in your project already, it has a method that does precisely what you're asking for: import pandas as pd df1 = pd.DataFrame(dict1) df1.rename(columns=dict2) df1 Otherwise it might not be worth the while.
Merge two dictionaries in python
I'm trying to merge two dictionaries based on key value. However, I'm not able to achieve it. Below is the way I tried solving. dict1 = {4: [741, 114, 306, 70], 2: [77, 325, 505, 144], 3: [937, 339, 612, 100], 1: [52, 811, 1593, 350]} dict2 = {1: 'A', 2: 'B', 3: 'C', 4: 'D'} #My resultant dictionary should be output = {'D': [741, 114, 306, 70], 'B': [77, 325, 505, 144], 'C': [937, 339, 612, 100], 'A': [52, 811, 1593, 350]} #My code def mergeDictionary(dict_obj1, dict_obj2): dict_obj3 = {**dict_obj1, **dict_obj2} for key, value in dict_obj3.items(): if key in dict_obj1 and key in dict_obj2: dict_obj3[key] = [value , dict_obj1[key]] return dict_obj3 dict_3 = mergeDictionary(dict1, dict2) #But I'm getting this as output dict_3={4: ['D', [741, 114, 306, 70]], 2: ['B', [77, 325, 505, 144]], 3: ['C', [937, 339, 612, 100]], 1: ['A', [52, 811, 1593, 350]]} Thanks for your help in advance
[ "Use a simple dictionary comprehension:\noutput = {dict2[k]: v for k,v in dict1.items()}\n\nOutput:\n{'D': [741, 114, 306, 70],\n 'B': [77, 325, 505, 144],\n 'C': [937, 339, 612, 100],\n 'A': [52, 811, 1593, 350]}\n\n", "While the simple dictionary comprehension by @mozway is certainly the most straightforward and elegant solution, it rests on the assumption that the keys of dict1 are a subset of those of dict2. If not, you'll get a KeyError.\nIf that assumption does not hold, you'll need to decide for yourself, how you want to deal with the case when a key in dict1 is not present in dict2. One option is to simply discard that key-value-pair and not include it in the output dictionary.\nfrom collections.abc import Mapping\n\nKT = str\nVT = list[int]\n\ndef merge(\n keys_map: Mapping[int, KT],\n values_map: Mapping[int, VT],\n) -> dict[KT, VT]:\n output = {}\n for key, value in values_map.items():\n try:\n output[keys_map[key]] = value\n except KeyError:\n pass\n return output\n\nTest:\nif __name__ == \"__main__\":\n dict1 = {\n 5: [1, 2, 3],\n 4: [741, 114],\n 2: [77, 325],\n 3: [937, 339],\n 1: [52, 811],\n }\n dict2 = {1: 'A', 2: 'B', 3: 'C', 4: 'D'}\n print(merge(dict2, dict1))\n\nOutput:\n\n{'D': [741, 114], 'B': [77, 325], 'C': [937, 339], 'A': [52, 811]}\n\n", "You can use dict.update():\ndef mergeDictionary(dict_obj1, dict_obj2):\n dict_obj3 = dict()\n for key, val in dict_obj1.items():\n dict_obj3.update({dict_obj2[key]: val})\n return dict_obj3\n\nor:\ndef mergeDictionary(dict_obj1, dict_obj2):\n dict_obj3 = dict()\n for key, val in dict_obj1.items():\n dict_obj3[dict_obj2[key]] = val\n return dict_obj3\n\n", "The error seems to be in this line:\ndict_obj3[key] = [value , dict_obj1[key]]\n\nYou want to use the value as criteria to assign an element to your dictionary, as such:\ndict_obj3[value] = dict_obj1[key]\n\nThis code should do the trick:\ndict1={4: [741, 114, 306, 70], 2: [77, 325, 505, 144], 3: [937, 339, 612, 100], 1: [52, 811, 1593, 350]}\ndict2={1: 'A', 2: 'B', 3: 'C', 4: 'D'}\n\n# My resultant dictionary should be \n# output={D: [741, 114, 306, 70], B: [77, 325, 505, 144], C: [937, 339, 612, 100], A: [52, 811, 1593, 350]}\n\n\n# My code\n\ndef mergeDictionary(dict_obj1, dict_obj2):\n dict_obj3 = {} # {**dict_obj1, **dict_obj2}\n for key, value in dict_obj2.items():\n dict_obj3[value] = dict_obj1[key]\n return dict_obj3\n\ndict_3 = mergeDictionary(dict1, dict2)\nprint(dict_3)\n\n", "If you're using pandas in your project already, it has a method that does precisely what you're asking for:\nimport pandas as pd\n\ndf1 = pd.DataFrame(dict1)\ndf1.rename(columns=dict2)\ndf1\n\n\nOtherwise it might not be worth the while.\n" ]
[ 7, 2, 2, 1, 0 ]
[ "A dictionary comprehension takes the form {key: value for (key, value) in iterable}\n# Python code to demonstrate dictionary\n# comprehension\n\n# Lists to represent keys and values\nkeys = ['a','b','c','d','e']\nvalues = [1,2,3,4,5]\n\n# but this line shows dict comprehension here\nmyDict = { k:v for (k,v) in zip(keys, values)}\n\n# We can use below too\n# myDict = dict(zip(keys, values))\n\nprint (myDict)\n\nOutput :{'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n" ]
[ -2 ]
[ "dictionary", "key_value", "merge", "python" ]
stackoverflow_0074599713_dictionary_key_value_merge_python.txt
Q: How to analyze and get the main context from email subject in python I have started learning Ai . I want to solve a problem but don,t know which topics or content should i read to solve this problem The problem is I want to get the main context from email subject Examples of subjects lines are My password is incorrect please solve my problem please issue my funds please issue my salary slip please add this stock into inventory Error is show in main page application . . . What i want to get in results Example from given subject lines a have software which have finance department , inventory management department , technical team to resolve bugs . I want to analyze the subject lines and then i want to analyze from which department i should send this email. Can you show me a roadmap to solve this problem. A: Your main goal is to classify text (i.e., email subjects) into one or more predefined class, depending on your design (you could always choose only one department to forward the email to, or multiple if the issue is inter-disciplinary). I would suggest to first go through some tutorials on supervised learning and multi-class classification. In my experience, they usually start with images, but the underlying logic is still the same. Once you understand the main concepts, you can look into natural language processing and text classification. Also, this sounds like a relatively general problem, so you should also be able to find some pretrained models or even full solutions online.
How to analyze and get the main context from email subject in python
I have started learning Ai . I want to solve a problem but don,t know which topics or content should i read to solve this problem The problem is I want to get the main context from email subject Examples of subjects lines are My password is incorrect please solve my problem please issue my funds please issue my salary slip please add this stock into inventory Error is show in main page application . . . What i want to get in results Example from given subject lines a have software which have finance department , inventory management department , technical team to resolve bugs . I want to analyze the subject lines and then i want to analyze from which department i should send this email. Can you show me a roadmap to solve this problem.
[ "Your main goal is to classify text (i.e., email subjects) into one or more predefined class, depending on your design (you could always choose only one department to forward the email to, or multiple if the issue is inter-disciplinary). I would suggest to first go through some tutorials on supervised learning and multi-class classification. In my experience, they usually start with images, but the underlying logic is still the same. Once you understand the main concepts, you can look into natural language processing and text classification. Also, this sounds like a relatively general problem, so you should also be able to find some pretrained models or even full solutions online.\n" ]
[ 0 ]
[]
[]
[ "artificial_intelligence", "nlp", "python" ]
stackoverflow_0074601553_artificial_intelligence_nlp_python.txt
Q: Issue importing scikit-learn: module 'scipy' has no attribute '_lib' I'm new to Python and am using Anaconda on Windows 10 to learn how to implement machine learning. Running this code on Spyder: import sklearn as skl Originally got me this: Traceback (most recent call last): File "<ipython-input-1-7135d3f24347>", line 1, in <module> runfile('C:/Users/julia/.spyder-py3/temp.py', wdir='C:/Users/julia/.spyder-py3') File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile execfile(filename, namespace) File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "C:/Users/julia/.spyder-py3/temp.py", line 3, in <module> from sklearn.family import Model File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\__init__.py", line 76, in <module> from .base import clone File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\base.py", line 16, in <module> from .utils import _IS_32BIT File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\__init__.py", line 20, in <module> from .validation import (as_float_array, File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\validation.py", line 21, in <module> from .fixes import _object_dtype_isnan File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\fixes.py", line 289, in <module> from scipy.sparse.linalg import lsqr as sparse_lsqr File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\__init__.py", line 114, in <module> from .isolve import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\__init__.py", line 6, in <module> from .iterative import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\iterative.py", line 10, in <module> from . import _iterative ImportError: DLL load failed: The specified module could not be found. I then went to the command line and did pip uninstall scipy pip install scipy pip uninstall scikit-learn pip install scikit-learn and got no errors when doing so, with scipy 1.3.1 (along with numpy 1.17.0) and scikit-learn 0.21.3 being installed according to the command line. However, now when I try to import sklearn I get a different error: File "<ipython-input-2-7135d3f24347>", line 1, in <module> runfile('C:/Users/julia/.spyder-py3/temp.py', wdir='C:/Users/julia/.spyder-py3') File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile execfile(filename, namespace) File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "C:/Users/julia/.spyder-py3/temp.py", line 3, in <module> from sklearn.family import Model File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\__init__.py", line 76, in <module> from .base import clone File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\base.py", line 16, in <module> from .utils import _IS_32BIT File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\__init__.py", line 20, in <module> from .validation import (as_float_array, File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\validation.py", line 21, in <module> from .fixes import _object_dtype_isnan File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\fixes.py", line 289, in <module> from scipy.sparse.linalg import lsqr as sparse_lsqr File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\__init__.py", line 113, in <module> from .isolve import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\__init__.py", line 6, in <module> from .iterative import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\iterative.py", line 136, in <module> def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None): File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\_lib\_threadsafety.py", line 59, in decorator return lock.decorate(func) File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\_lib\_threadsafety.py", line 47, in decorate return scipy._lib.decorator.decorate(func, caller) AttributeError: module 'scipy' has no attribute '_lib' Any suggestions? I've uninstalled and reinstalled Anaconda and I'm still getting the same issue. EDIT: When I do conda list --show-channel-urls I get # packages in environment at C:\ProgramData\Anaconda3: # # Name Version Build Channel _ipyw_jlab_nb_ext_conf 0.1.0 py37_0 defaults alabaster 0.7.12 py37_0 defaults anaconda-client 1.7.2 py37_0 defaults anaconda-navigator 1.9.7 py37_0 defaults asn1crypto 0.24.0 py37_0 defaults astroid 2.2.5 py37_0 defaults attrs 19.1.0 py37_1 defaults babel 2.7.0 py_0 defaults backcall 0.1.0 py37_0 defaults backports 1.0 py_2 defaults backports.functools_lru_cache 1.5 py_2 defaults backports.tempfile 1.0 py_1 defaults backports.weakref 1.0.post1 py_1 defaults beautifulsoup4 4.7.1 py37_1 defaults blas 1.0 mkl defaults bleach 3.1.0 py37_0 defaults bzip2 1.0.8 he774522_0 defaults ca-certificates 2019.5.15 1 defaults certifi 2019.6.16 py37_1 defaults cffi 1.12.3 py37h7a1dbc1_0 defaults chardet 3.0.4 py37_1003 defaults click 7.0 py37_0 defaults cloudpickle 1.2.1 py_0 defaults clyent 1.2.2 py37_1 defaults colorama 0.4.1 py37_0 defaults conda 4.7.11 py37_0 defaults conda-build 3.18.8 py37_0 defaults conda-env 2.6.0 1 defaults conda-package-handling 1.3.11 py37_0 defaults conda-verify 3.4.2 py_1 defaults console_shortcut 0.1.1 3 defaults cryptography 2.7 py37h7a1dbc1_0 defaults decorator 4.4.0 py37_1 defaults defusedxml 0.6.0 py_0 defaults docutils 0.15.1 py37_0 defaults entrypoints 0.3 py37_0 defaults filelock 3.0.12 py_0 defaults freetype 2.9.1 ha9979f8_1 defaults future 0.17.1 py37_0 defaults glob2 0.7 py_0 defaults icc_rt 2019.0.0 h0cc432a_1 defaults icu 58.2 ha66f8fd_1 defaults idna 2.8 py37_0 defaults imagesize 1.1.0 py37_0 defaults intel-openmp 2019.4 245 defaults ipykernel 5.1.1 py37h39e3cac_0 defaults ipython 7.7.0 py37h39e3cac_0 defaults ipython_genutils 0.2.0 py37_0 defaults ipywidgets 7.5.1 py_0 defaults isort 4.3.21 py37_0 defaults jedi 0.13.3 py37_0 defaults jinja2 2.10.1 py37_0 defaults joblib 0.13.2 py37_0 defaults jpeg 9b hb83a4c4_2 defaults json5 0.8.5 py_0 defaults jsonschema 3.0.1 py37_0 defaults jupyter_client 5.3.1 py_0 defaults jupyter_core 4.5.0 py_0 defaults jupyterlab 1.0.2 py37hf63ae98_0 defaults jupyterlab_server 1.0.0 py_1 defaults keyring 18.0.0 py37_0 defaults lazy-object-proxy 1.4.1 py37he774522_0 defaults libarchive 3.3.3 h0643e63_5 defaults libiconv 1.15 h1df5818_7 defaults liblief 0.9.0 ha925a31_2 defaults libpng 1.6.37 h2a8f88b_0 defaults libsodium 1.0.16 h9d3ae62_0 defaults libtiff 4.0.10 hb898794_2 defaults libxml2 2.9.9 h464c3ec_0 defaults lz4-c 1.8.1.2 h2fa13f4_0 defaults lzo 2.10 h6df0209_2 defaults m2w64-gcc-libgfortran 5.3.0 6 defaults m2w64-gcc-libs 5.3.0 7 defaults m2w64-gcc-libs-core 5.3.0 7 defaults m2w64-gmp 6.1.0 2 defaults m2w64-libwinpthread-git 5.0.0.4634.697f757 2 defaults markupsafe 1.1.1 py37he774522_0 defaults mccabe 0.6.1 py37_1 defaults menuinst 1.4.16 py37he774522_0 defaults mistune 0.8.4 py37he774522_0 defaults mkl 2019.4 245 defaults mkl-service 2.0.2 py37he774522_0 defaults mkl_fft 1.0.12 py37h14836fe_0 defaults mkl_random 1.0.2 py37h343c172_0 defaults msys2-conda-epoch 20160418 1 defaults navigator-updater 0.2.1 py37_0 defaults nbconvert 5.5.0 py_0 defaults nbformat 4.4.0 py37_0 defaults notebook 6.0.0 py37_0 defaults numpy 1.17.0 pypi_0 pypi numpy-base 1.16.4 py37hc3f5095_0 defaults numpydoc 0.9.1 py_0 defaults olefile 0.46 py37_0 defaults openssl 1.1.1c he774522_1 defaults packaging 19.0 py37_0 defaults pandas 0.25.0 py37ha925a31_0 defaults pandoc 2.2.3.2 0 defaults pandocfilters 1.4.2 py37_1 defaults parso 0.5.0 py_0 defaults pickleshare 0.7.5 py37_0 defaults pillow 6.1.0 py37hdc69c19_0 defaults pip 19.2.2 pypi_0 pypi pkginfo 1.5.0.1 py37_0 defaults powershell_shortcut 0.0.1 2 defaults prometheus_client 0.7.1 py_0 defaults prompt_toolkit 2.0.9 py37_0 defaults psutil 5.6.3 py37he774522_0 defaults py-lief 0.9.0 py37ha925a31_2 defaults pycodestyle 2.5.0 py37_0 defaults pycosat 0.6.3 py37hfa6e2cd_0 defaults pycparser 2.19 py37_0 defaults pyflakes 2.1.1 py37_0 defaults pygments 2.4.2 py_0 defaults pylint 2.3.1 py37_0 defaults pyopenssl 19.0.0 py37_0 defaults pyparsing 2.4.0 py_0 defaults pyqt 5.9.2 py37h6538335_2 defaults pyrsistent 0.14.11 py37he774522_0 defaults pysocks 1.7.0 py37_0 defaults python 3.7.3 h8c8aaf0_1 defaults python-dateutil 2.8.0 py37_0 defaults python-libarchive-c 2.8 py37_13 defaults pytz 2019.1 py_0 defaults pywin32 223 py37hfa6e2cd_1 defaults pywinpty 0.5.5 py37_1000 defaults pyyaml 5.1.1 py37he774522_0 defaults pyzmq 18.0.0 py37ha925a31_0 defaults qt 5.9.7 vc14h73c81de_0 defaults qtawesome 0.5.7 py37_1 defaults qtconsole 4.5.2 py_0 defaults qtpy 1.8.0 py_0 defaults requests 2.22.0 py37_0 defaults rope 0.14.0 py_0 defaults ruamel_yaml 0.15.46 py37hfa6e2cd_0 defaults scikit-learn 0.21.3 pypi_0 pypi scipy 1.3.0 pypi_0 pypi send2trash 1.5.0 py37_0 defaults setuptools 41.0.1 py37_0 defaults sip 4.19.8 py37h6538335_0 defaults six 1.12.0 py37_0 defaults snowballstemmer 1.9.0 py_0 defaults soupsieve 1.9.2 py37_0 defaults sphinx 2.1.2 py_0 defaults sphinxcontrib-applehelp 1.0.1 py_0 defaults sphinxcontrib-devhelp 1.0.1 py_0 defaults sphinxcontrib-htmlhelp 1.0.2 py_0 defaults sphinxcontrib-jsmath 1.0.1 py_0 defaults sphinxcontrib-qthelp 1.0.2 py_0 defaults sphinxcontrib-serializinghtml 1.1.3 py_0 defaults spyder 3.3.6 py37_0 defaults spyder-kernels 0.5.1 py37_0 defaults sqlite 3.29.0 he774522_0 defaults terminado 0.8.2 py37_0 defaults testpath 0.4.2 py37_0 defaults tk 8.6.8 hfa6e2cd_0 defaults tornado 6.0.3 py37he774522_0 defaults tqdm 4.32.1 py_0 defaults traitlets 4.3.2 py37_0 defaults urllib3 1.24.2 py37_0 defaults vc 14.1 h0510ff6_4 defaults vs2015_runtime 14.15.26706 h3a45250_4 defaults wcwidth 0.1.7 py37_0 defaults webencodings 0.5.1 py37_1 defaults wheel 0.33.4 py37_0 defaults widgetsnbextension 3.5.0 py37_0 defaults win_inet_pton 1.1.0 py37_0 defaults wincertstore 0.2 py37_0 defaults winpty 0.4.3 4 defaults wrapt 1.11.2 py37he774522_0 defaults xz 5.2.4 h2fa13f4_4 defaults yaml 0.1.7 hc54c509_2 defaults zeromq 4.3.1 h33f27b4_3 defaults zlib 1.2.11 h62dcd97_3 defaults zstd 1.3.7 h508b16e_0 defaults with the version of scipy not matching up with the version that pip installed. Not sure how significant it is but it seemed strange to me. EDIT 2: Doing pip list returns Package Version ----------------------------- --------- -cipy 1.3.0 alabaster 0.7.12 anaconda-client 1.7.2 anaconda-navigator 1.9.7 asn1crypto 0.24.0 astroid 2.2.5 attrs 19.1.0 Babel 2.7.0 backcall 0.1.0 backports.functools-lru-cache 1.5 backports.tempfile 1.0 backports.weakref 1.0.post1 beautifulsoup4 4.7.1 bleach 3.1.0 certifi 2019.6.16 cffi 1.12.3 chardet 3.0.4 Click 7.0 cloudpickle 1.2.1 clyent 1.2.2 colorama 0.4.1 conda 4.7.11 conda-build 3.18.8 conda-package-handling 1.3.11 conda-verify 3.4.2 cryptography 2.7 decorator 4.4.0 defusedxml 0.6.0 docutils 0.15.1 entrypoints 0.3 filelock 3.0.12 future 0.17.1 glob2 0.7 idna 2.8 imagesize 1.1.0 ipykernel 5.1.1 ipython 7.7.0 ipython-genutils 0.2.0 ipywidgets 7.5.1 isort 4.3.21 jedi 0.13.3 Jinja2 2.10.1 joblib 0.13.2 json5 0.8.5 jsonschema 3.0.1 jupyter-client 5.3.1 jupyter-core 4.5.0 jupyterlab 1.0.2 jupyterlab-server 1.0.0 keyring 18.0.0 lazy-object-proxy 1.4.1 libarchive-c 2.8 MarkupSafe 1.1.1 mccabe 0.6.1 menuinst 1.4.16 mistune 0.8.4 mkl-fft 1.0.12 mkl-random 1.0.2 mkl-service 2.0.2 navigator-updater 0.2.1 nbconvert 5.5.0 nbformat 4.4.0 notebook 6.0.0 numpy 1.17.0 numpydoc 0.9.1 olefile 0.46 packaging 19.0 pandas 0.25.0 pandocfilters 1.4.2 parso 0.5.0 pickleshare 0.7.5 Pillow 6.1.0 pio 0.0.3 pip 19.2.2 pkginfo 1.5.0.1 prometheus-client 0.7.1 prompt-toolkit 2.0.9 psutil 5.6.3 pycodestyle 2.5.0 pycosat 0.6.3 pycparser 2.19 pyflakes 2.1.1 Pygments 2.4.2 pylint 2.3.1 pyOpenSSL 19.0.0 pyparsing 2.4.0 pyrsistent 0.14.11 PySocks 1.7.0 python-dateutil 2.8.0 pytz 2019.1 pywin32 223 pywinpty 0.5.5 PyYAML 5.1.1 pyzmq 18.0.0 QtAwesome 0.5.7 qtconsole 4.5.2 QtPy 1.8.0 requests 2.22.0 rope 0.14.0 ruamel-yaml 0.15.46 scikit-learn 0.21.3 scipy 1.3.1 Send2Trash 1.5.0 setuptools 41.0.1 six 1.12.0 snowballstemmer 1.9.0 soupsieve 1.9.2 Sphinx 2.1.2 sphinxcontrib-applehelp 1.0.1 sphinxcontrib-devhelp 1.0.1 sphinxcontrib-htmlhelp 1.0.2 sphinxcontrib-jsmath 1.0.1 sphinxcontrib-qthelp 1.0.2 sphinxcontrib-serializinghtml 1.1.3 spyder 3.3.6 spyder-kernels 0.5.1 terminado 0.8.2 testpath 0.4.2 tornado 6.0.3 tqdm 4.32.1 traitlets 4.3.2 urllib3 1.24.2 wcwidth 0.1.7 webencodings 0.5.1 wheel 0.33.4 widgetsnbextension 3.5.0 win-inet-pton 1.1.0 wincertstore 0.2 wrapt 1.11.2 pip list says scipy is version 1.3.1, while conda list says it's version 1.3.0. Again, not sure how relevant it is, but seems strange EDIT 3: I got this error after putting the following lines (suggested by @Brennan) in my command prompt then running the file pip uninstall scikit-learn pip uninstall scipy conda uninstall scikit-learn conda uninstall scipy conda update --all conda install scipy conda install scikit-learn This is the new error I get when trying to import sklearn: Traceback (most recent call last): File "<ipython-input-15-7135d3f24347>", line 1, in <module> runfile('C:/Users/julia/.spyder-py3/temp.py', wdir='C:/Users/julia/.spyder-py3') File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile execfile(filename, namespace) File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "C:/Users/julia/.spyder-py3/temp.py", line 2, in <module> import sklearn as skl File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\__init__.py", line 76, in <module> from .base import clone File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\base.py", line 13, in <module> import numpy as np File "C:\ProgramData\Anaconda3\lib\site-packages\numpy\__init__.py", line 140, in <module> from . import _distributor_init File "C:\ProgramData\Anaconda3\lib\site-packages\numpy\_distributor_init.py", line 34, in <module> from . import _mklinit ImportError: DLL load failed: The specified module could not be found. A possible cause of this might be me deleting the mkl_rt.dll file from my Anaconda/Library/bin after encountering the error described here: https://github.com/ContinuumIO/anaconda-issues/issues/10182 This puts me in a predicament, because reinstalling Anaconda to repair this will get me the same "ordinal 242 could not be located" error that I faced earlier, but not repairing it will continue the issue with sklearn... FINAL EDIT: Solved by installing old version of Anaconda. Will mark as solved when I am able to (2 days) A: I ended up fixing this by uninstalling my current version of Anaconda and installing a version from a few months ago. I didn't get the "ordinal 242" error nor the issues with scikit-learn. A: I encountered the same error after letting my PC sit for 4 days unattended. Restarting the kernel solved it. This probably won't work for everyone, but it might save someone a little agony.
Issue importing scikit-learn: module 'scipy' has no attribute '_lib'
I'm new to Python and am using Anaconda on Windows 10 to learn how to implement machine learning. Running this code on Spyder: import sklearn as skl Originally got me this: Traceback (most recent call last): File "<ipython-input-1-7135d3f24347>", line 1, in <module> runfile('C:/Users/julia/.spyder-py3/temp.py', wdir='C:/Users/julia/.spyder-py3') File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile execfile(filename, namespace) File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "C:/Users/julia/.spyder-py3/temp.py", line 3, in <module> from sklearn.family import Model File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\__init__.py", line 76, in <module> from .base import clone File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\base.py", line 16, in <module> from .utils import _IS_32BIT File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\__init__.py", line 20, in <module> from .validation import (as_float_array, File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\validation.py", line 21, in <module> from .fixes import _object_dtype_isnan File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\fixes.py", line 289, in <module> from scipy.sparse.linalg import lsqr as sparse_lsqr File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\__init__.py", line 114, in <module> from .isolve import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\__init__.py", line 6, in <module> from .iterative import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\iterative.py", line 10, in <module> from . import _iterative ImportError: DLL load failed: The specified module could not be found. I then went to the command line and did pip uninstall scipy pip install scipy pip uninstall scikit-learn pip install scikit-learn and got no errors when doing so, with scipy 1.3.1 (along with numpy 1.17.0) and scikit-learn 0.21.3 being installed according to the command line. However, now when I try to import sklearn I get a different error: File "<ipython-input-2-7135d3f24347>", line 1, in <module> runfile('C:/Users/julia/.spyder-py3/temp.py', wdir='C:/Users/julia/.spyder-py3') File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile execfile(filename, namespace) File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "C:/Users/julia/.spyder-py3/temp.py", line 3, in <module> from sklearn.family import Model File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\__init__.py", line 76, in <module> from .base import clone File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\base.py", line 16, in <module> from .utils import _IS_32BIT File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\__init__.py", line 20, in <module> from .validation import (as_float_array, File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\validation.py", line 21, in <module> from .fixes import _object_dtype_isnan File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\utils\fixes.py", line 289, in <module> from scipy.sparse.linalg import lsqr as sparse_lsqr File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\__init__.py", line 113, in <module> from .isolve import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\__init__.py", line 6, in <module> from .iterative import * File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\sparse\linalg\isolve\iterative.py", line 136, in <module> def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None): File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\_lib\_threadsafety.py", line 59, in decorator return lock.decorate(func) File "C:\ProgramData\Anaconda3\lib\site-packages\scipy\_lib\_threadsafety.py", line 47, in decorate return scipy._lib.decorator.decorate(func, caller) AttributeError: module 'scipy' has no attribute '_lib' Any suggestions? I've uninstalled and reinstalled Anaconda and I'm still getting the same issue. EDIT: When I do conda list --show-channel-urls I get # packages in environment at C:\ProgramData\Anaconda3: # # Name Version Build Channel _ipyw_jlab_nb_ext_conf 0.1.0 py37_0 defaults alabaster 0.7.12 py37_0 defaults anaconda-client 1.7.2 py37_0 defaults anaconda-navigator 1.9.7 py37_0 defaults asn1crypto 0.24.0 py37_0 defaults astroid 2.2.5 py37_0 defaults attrs 19.1.0 py37_1 defaults babel 2.7.0 py_0 defaults backcall 0.1.0 py37_0 defaults backports 1.0 py_2 defaults backports.functools_lru_cache 1.5 py_2 defaults backports.tempfile 1.0 py_1 defaults backports.weakref 1.0.post1 py_1 defaults beautifulsoup4 4.7.1 py37_1 defaults blas 1.0 mkl defaults bleach 3.1.0 py37_0 defaults bzip2 1.0.8 he774522_0 defaults ca-certificates 2019.5.15 1 defaults certifi 2019.6.16 py37_1 defaults cffi 1.12.3 py37h7a1dbc1_0 defaults chardet 3.0.4 py37_1003 defaults click 7.0 py37_0 defaults cloudpickle 1.2.1 py_0 defaults clyent 1.2.2 py37_1 defaults colorama 0.4.1 py37_0 defaults conda 4.7.11 py37_0 defaults conda-build 3.18.8 py37_0 defaults conda-env 2.6.0 1 defaults conda-package-handling 1.3.11 py37_0 defaults conda-verify 3.4.2 py_1 defaults console_shortcut 0.1.1 3 defaults cryptography 2.7 py37h7a1dbc1_0 defaults decorator 4.4.0 py37_1 defaults defusedxml 0.6.0 py_0 defaults docutils 0.15.1 py37_0 defaults entrypoints 0.3 py37_0 defaults filelock 3.0.12 py_0 defaults freetype 2.9.1 ha9979f8_1 defaults future 0.17.1 py37_0 defaults glob2 0.7 py_0 defaults icc_rt 2019.0.0 h0cc432a_1 defaults icu 58.2 ha66f8fd_1 defaults idna 2.8 py37_0 defaults imagesize 1.1.0 py37_0 defaults intel-openmp 2019.4 245 defaults ipykernel 5.1.1 py37h39e3cac_0 defaults ipython 7.7.0 py37h39e3cac_0 defaults ipython_genutils 0.2.0 py37_0 defaults ipywidgets 7.5.1 py_0 defaults isort 4.3.21 py37_0 defaults jedi 0.13.3 py37_0 defaults jinja2 2.10.1 py37_0 defaults joblib 0.13.2 py37_0 defaults jpeg 9b hb83a4c4_2 defaults json5 0.8.5 py_0 defaults jsonschema 3.0.1 py37_0 defaults jupyter_client 5.3.1 py_0 defaults jupyter_core 4.5.0 py_0 defaults jupyterlab 1.0.2 py37hf63ae98_0 defaults jupyterlab_server 1.0.0 py_1 defaults keyring 18.0.0 py37_0 defaults lazy-object-proxy 1.4.1 py37he774522_0 defaults libarchive 3.3.3 h0643e63_5 defaults libiconv 1.15 h1df5818_7 defaults liblief 0.9.0 ha925a31_2 defaults libpng 1.6.37 h2a8f88b_0 defaults libsodium 1.0.16 h9d3ae62_0 defaults libtiff 4.0.10 hb898794_2 defaults libxml2 2.9.9 h464c3ec_0 defaults lz4-c 1.8.1.2 h2fa13f4_0 defaults lzo 2.10 h6df0209_2 defaults m2w64-gcc-libgfortran 5.3.0 6 defaults m2w64-gcc-libs 5.3.0 7 defaults m2w64-gcc-libs-core 5.3.0 7 defaults m2w64-gmp 6.1.0 2 defaults m2w64-libwinpthread-git 5.0.0.4634.697f757 2 defaults markupsafe 1.1.1 py37he774522_0 defaults mccabe 0.6.1 py37_1 defaults menuinst 1.4.16 py37he774522_0 defaults mistune 0.8.4 py37he774522_0 defaults mkl 2019.4 245 defaults mkl-service 2.0.2 py37he774522_0 defaults mkl_fft 1.0.12 py37h14836fe_0 defaults mkl_random 1.0.2 py37h343c172_0 defaults msys2-conda-epoch 20160418 1 defaults navigator-updater 0.2.1 py37_0 defaults nbconvert 5.5.0 py_0 defaults nbformat 4.4.0 py37_0 defaults notebook 6.0.0 py37_0 defaults numpy 1.17.0 pypi_0 pypi numpy-base 1.16.4 py37hc3f5095_0 defaults numpydoc 0.9.1 py_0 defaults olefile 0.46 py37_0 defaults openssl 1.1.1c he774522_1 defaults packaging 19.0 py37_0 defaults pandas 0.25.0 py37ha925a31_0 defaults pandoc 2.2.3.2 0 defaults pandocfilters 1.4.2 py37_1 defaults parso 0.5.0 py_0 defaults pickleshare 0.7.5 py37_0 defaults pillow 6.1.0 py37hdc69c19_0 defaults pip 19.2.2 pypi_0 pypi pkginfo 1.5.0.1 py37_0 defaults powershell_shortcut 0.0.1 2 defaults prometheus_client 0.7.1 py_0 defaults prompt_toolkit 2.0.9 py37_0 defaults psutil 5.6.3 py37he774522_0 defaults py-lief 0.9.0 py37ha925a31_2 defaults pycodestyle 2.5.0 py37_0 defaults pycosat 0.6.3 py37hfa6e2cd_0 defaults pycparser 2.19 py37_0 defaults pyflakes 2.1.1 py37_0 defaults pygments 2.4.2 py_0 defaults pylint 2.3.1 py37_0 defaults pyopenssl 19.0.0 py37_0 defaults pyparsing 2.4.0 py_0 defaults pyqt 5.9.2 py37h6538335_2 defaults pyrsistent 0.14.11 py37he774522_0 defaults pysocks 1.7.0 py37_0 defaults python 3.7.3 h8c8aaf0_1 defaults python-dateutil 2.8.0 py37_0 defaults python-libarchive-c 2.8 py37_13 defaults pytz 2019.1 py_0 defaults pywin32 223 py37hfa6e2cd_1 defaults pywinpty 0.5.5 py37_1000 defaults pyyaml 5.1.1 py37he774522_0 defaults pyzmq 18.0.0 py37ha925a31_0 defaults qt 5.9.7 vc14h73c81de_0 defaults qtawesome 0.5.7 py37_1 defaults qtconsole 4.5.2 py_0 defaults qtpy 1.8.0 py_0 defaults requests 2.22.0 py37_0 defaults rope 0.14.0 py_0 defaults ruamel_yaml 0.15.46 py37hfa6e2cd_0 defaults scikit-learn 0.21.3 pypi_0 pypi scipy 1.3.0 pypi_0 pypi send2trash 1.5.0 py37_0 defaults setuptools 41.0.1 py37_0 defaults sip 4.19.8 py37h6538335_0 defaults six 1.12.0 py37_0 defaults snowballstemmer 1.9.0 py_0 defaults soupsieve 1.9.2 py37_0 defaults sphinx 2.1.2 py_0 defaults sphinxcontrib-applehelp 1.0.1 py_0 defaults sphinxcontrib-devhelp 1.0.1 py_0 defaults sphinxcontrib-htmlhelp 1.0.2 py_0 defaults sphinxcontrib-jsmath 1.0.1 py_0 defaults sphinxcontrib-qthelp 1.0.2 py_0 defaults sphinxcontrib-serializinghtml 1.1.3 py_0 defaults spyder 3.3.6 py37_0 defaults spyder-kernels 0.5.1 py37_0 defaults sqlite 3.29.0 he774522_0 defaults terminado 0.8.2 py37_0 defaults testpath 0.4.2 py37_0 defaults tk 8.6.8 hfa6e2cd_0 defaults tornado 6.0.3 py37he774522_0 defaults tqdm 4.32.1 py_0 defaults traitlets 4.3.2 py37_0 defaults urllib3 1.24.2 py37_0 defaults vc 14.1 h0510ff6_4 defaults vs2015_runtime 14.15.26706 h3a45250_4 defaults wcwidth 0.1.7 py37_0 defaults webencodings 0.5.1 py37_1 defaults wheel 0.33.4 py37_0 defaults widgetsnbextension 3.5.0 py37_0 defaults win_inet_pton 1.1.0 py37_0 defaults wincertstore 0.2 py37_0 defaults winpty 0.4.3 4 defaults wrapt 1.11.2 py37he774522_0 defaults xz 5.2.4 h2fa13f4_4 defaults yaml 0.1.7 hc54c509_2 defaults zeromq 4.3.1 h33f27b4_3 defaults zlib 1.2.11 h62dcd97_3 defaults zstd 1.3.7 h508b16e_0 defaults with the version of scipy not matching up with the version that pip installed. Not sure how significant it is but it seemed strange to me. EDIT 2: Doing pip list returns Package Version ----------------------------- --------- -cipy 1.3.0 alabaster 0.7.12 anaconda-client 1.7.2 anaconda-navigator 1.9.7 asn1crypto 0.24.0 astroid 2.2.5 attrs 19.1.0 Babel 2.7.0 backcall 0.1.0 backports.functools-lru-cache 1.5 backports.tempfile 1.0 backports.weakref 1.0.post1 beautifulsoup4 4.7.1 bleach 3.1.0 certifi 2019.6.16 cffi 1.12.3 chardet 3.0.4 Click 7.0 cloudpickle 1.2.1 clyent 1.2.2 colorama 0.4.1 conda 4.7.11 conda-build 3.18.8 conda-package-handling 1.3.11 conda-verify 3.4.2 cryptography 2.7 decorator 4.4.0 defusedxml 0.6.0 docutils 0.15.1 entrypoints 0.3 filelock 3.0.12 future 0.17.1 glob2 0.7 idna 2.8 imagesize 1.1.0 ipykernel 5.1.1 ipython 7.7.0 ipython-genutils 0.2.0 ipywidgets 7.5.1 isort 4.3.21 jedi 0.13.3 Jinja2 2.10.1 joblib 0.13.2 json5 0.8.5 jsonschema 3.0.1 jupyter-client 5.3.1 jupyter-core 4.5.0 jupyterlab 1.0.2 jupyterlab-server 1.0.0 keyring 18.0.0 lazy-object-proxy 1.4.1 libarchive-c 2.8 MarkupSafe 1.1.1 mccabe 0.6.1 menuinst 1.4.16 mistune 0.8.4 mkl-fft 1.0.12 mkl-random 1.0.2 mkl-service 2.0.2 navigator-updater 0.2.1 nbconvert 5.5.0 nbformat 4.4.0 notebook 6.0.0 numpy 1.17.0 numpydoc 0.9.1 olefile 0.46 packaging 19.0 pandas 0.25.0 pandocfilters 1.4.2 parso 0.5.0 pickleshare 0.7.5 Pillow 6.1.0 pio 0.0.3 pip 19.2.2 pkginfo 1.5.0.1 prometheus-client 0.7.1 prompt-toolkit 2.0.9 psutil 5.6.3 pycodestyle 2.5.0 pycosat 0.6.3 pycparser 2.19 pyflakes 2.1.1 Pygments 2.4.2 pylint 2.3.1 pyOpenSSL 19.0.0 pyparsing 2.4.0 pyrsistent 0.14.11 PySocks 1.7.0 python-dateutil 2.8.0 pytz 2019.1 pywin32 223 pywinpty 0.5.5 PyYAML 5.1.1 pyzmq 18.0.0 QtAwesome 0.5.7 qtconsole 4.5.2 QtPy 1.8.0 requests 2.22.0 rope 0.14.0 ruamel-yaml 0.15.46 scikit-learn 0.21.3 scipy 1.3.1 Send2Trash 1.5.0 setuptools 41.0.1 six 1.12.0 snowballstemmer 1.9.0 soupsieve 1.9.2 Sphinx 2.1.2 sphinxcontrib-applehelp 1.0.1 sphinxcontrib-devhelp 1.0.1 sphinxcontrib-htmlhelp 1.0.2 sphinxcontrib-jsmath 1.0.1 sphinxcontrib-qthelp 1.0.2 sphinxcontrib-serializinghtml 1.1.3 spyder 3.3.6 spyder-kernels 0.5.1 terminado 0.8.2 testpath 0.4.2 tornado 6.0.3 tqdm 4.32.1 traitlets 4.3.2 urllib3 1.24.2 wcwidth 0.1.7 webencodings 0.5.1 wheel 0.33.4 widgetsnbextension 3.5.0 win-inet-pton 1.1.0 wincertstore 0.2 wrapt 1.11.2 pip list says scipy is version 1.3.1, while conda list says it's version 1.3.0. Again, not sure how relevant it is, but seems strange EDIT 3: I got this error after putting the following lines (suggested by @Brennan) in my command prompt then running the file pip uninstall scikit-learn pip uninstall scipy conda uninstall scikit-learn conda uninstall scipy conda update --all conda install scipy conda install scikit-learn This is the new error I get when trying to import sklearn: Traceback (most recent call last): File "<ipython-input-15-7135d3f24347>", line 1, in <module> runfile('C:/Users/julia/.spyder-py3/temp.py', wdir='C:/Users/julia/.spyder-py3') File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile execfile(filename, namespace) File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "C:/Users/julia/.spyder-py3/temp.py", line 2, in <module> import sklearn as skl File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\__init__.py", line 76, in <module> from .base import clone File "C:\ProgramData\Anaconda3\lib\site-packages\sklearn\base.py", line 13, in <module> import numpy as np File "C:\ProgramData\Anaconda3\lib\site-packages\numpy\__init__.py", line 140, in <module> from . import _distributor_init File "C:\ProgramData\Anaconda3\lib\site-packages\numpy\_distributor_init.py", line 34, in <module> from . import _mklinit ImportError: DLL load failed: The specified module could not be found. A possible cause of this might be me deleting the mkl_rt.dll file from my Anaconda/Library/bin after encountering the error described here: https://github.com/ContinuumIO/anaconda-issues/issues/10182 This puts me in a predicament, because reinstalling Anaconda to repair this will get me the same "ordinal 242 could not be located" error that I faced earlier, but not repairing it will continue the issue with sklearn... FINAL EDIT: Solved by installing old version of Anaconda. Will mark as solved when I am able to (2 days)
[ "I ended up fixing this by uninstalling my current version of Anaconda and installing a version from a few months ago. I didn't get the \"ordinal 242\" error nor the issues with scikit-learn.\n", "I encountered the same error after letting my PC sit for 4 days unattended. Restarting the kernel solved it.\nThis probably won't work for everyone, but it might save someone a little agony.\n" ]
[ 2, 1 ]
[]
[]
[ "anaconda", "python", "scikit_learn", "scipy", "spyder" ]
stackoverflow_0057484399_anaconda_python_scikit_learn_scipy_spyder.txt
Q: SyntaxError: multiple exception types must be parenthesized (paramiko module) I have problems when running the script, I have also installed paramiko but still can't run the script. and I've put curly brackets () on line 29, maybe it still doesn't work enter image description here I have also tried using kali linux but the result is still the same A: Looking for openssh_crypt_cpu_consumption_dos.py I stumbled upon exploit-database/exploits/linux/dos/40888.py : except Exception, msg: Which looks like Python2 syntax. Edit the script, as suggested by @BhavinT, but in this way : - except Exception, msg: + except Exception as msg:
SyntaxError: multiple exception types must be parenthesized (paramiko module)
I have problems when running the script, I have also installed paramiko but still can't run the script. and I've put curly brackets () on line 29, maybe it still doesn't work enter image description here I have also tried using kali linux but the result is still the same
[ "Looking for openssh_crypt_cpu_consumption_dos.py I stumbled upon exploit-database/exploits/linux/dos/40888.py :\n except Exception, msg:\n\nWhich looks like Python2 syntax. Edit the script, as suggested by @BhavinT, but in this way :\n- except Exception, msg:\n+ except Exception as msg:\n\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074592050_python.txt
Q: Failed to Importing Adida from statsforecast.models in Python I was trying to replicate this code for stat forecasting in python, I came across the issue of not being able to load this model 'adida' form statsforecast library, Here is the link for reference : https://towardsdatascience.com/time-series-forecasting-with-statistical-models-f08dcd1d24d1 import random from itertools import product from IPython.display import display, Markdown from multiprocessing import cpu_count import matplotlib.pyplot as plt import numpy as np import pandas as pd from nixtlats.data.datasets.m4 import M4, M4Info from statsforecast import StatsForecast from statsforecast.models import ( adida, croston_classic, croston_sba, croston_optimized, historic_average, imapa, naive, random_walk_with_drift, seasonal_exponential_smoothing, seasonal_naive, seasonal_window_average, ses, tsb, window_average ) Attached is the error message, Can you please have a look at this and let me know why is there an issue in importing this? Given below is the error image: A: I did some research and figured out the issue is probably with the version, try installing this specific version of statsforecast pip install statsforecasts==0.6.0 Trying loading these models after that, hopefully this should work. A: As of v1.0.0 of StatsForecast, the API changed to be more like sklearn, using classes instead of functions. You can find an example of the new syntax here: https://nixtla.github.io/statsforecast/examples/IntermittentData.html. The new code would be from statsforecast import StatsForecast from statsforecast.models import ADIDA, IMAPA model = StatsForecast(df=Y_train_df, # your data models=[ADIDA(), IMAPA()], freq=freq, # frequency of your data n_jobs=-1) If you want to use the old syntax, setting the version as suggested should work. A: If you have updated the package ..use ADIDA it will work see the model list name with new packages ADIDA(), IMAPA(), (SimpleExponentialSmoothing(0.1)), (TSB(0.3,0.2)), (WindowAverage( 6))
Failed to Importing Adida from statsforecast.models in Python
I was trying to replicate this code for stat forecasting in python, I came across the issue of not being able to load this model 'adida' form statsforecast library, Here is the link for reference : https://towardsdatascience.com/time-series-forecasting-with-statistical-models-f08dcd1d24d1 import random from itertools import product from IPython.display import display, Markdown from multiprocessing import cpu_count import matplotlib.pyplot as plt import numpy as np import pandas as pd from nixtlats.data.datasets.m4 import M4, M4Info from statsforecast import StatsForecast from statsforecast.models import ( adida, croston_classic, croston_sba, croston_optimized, historic_average, imapa, naive, random_walk_with_drift, seasonal_exponential_smoothing, seasonal_naive, seasonal_window_average, ses, tsb, window_average ) Attached is the error message, Can you please have a look at this and let me know why is there an issue in importing this? Given below is the error image:
[ "I did some research and figured out the issue is probably with the version, try installing this specific version of statsforecast\npip install statsforecasts==0.6.0\n\nTrying loading these models after that, hopefully this should work.\n", "As of v1.0.0 of StatsForecast, the API changed to be more like sklearn, using classes instead of functions. You can find an example of the new syntax here: https://nixtla.github.io/statsforecast/examples/IntermittentData.html.\nThe new code would be\nfrom statsforecast import StatsForecast\nfrom statsforecast.models import ADIDA, IMAPA\n\nmodel = StatsForecast(df=Y_train_df, # your data\n models=[ADIDA(), IMAPA()], \n freq=freq, # frequency of your data\n n_jobs=-1)\n\nIf you want to use the old syntax, setting the version as suggested should work.\n", "If you have updated the package ..use ADIDA it will work\nsee the model list name with new packages\nADIDA(),\nIMAPA(),\n(SimpleExponentialSmoothing(0.1)),\n(TSB(0.3,0.2)),\n(WindowAverage( 6))\n\n" ]
[ 3, 1, 0 ]
[]
[]
[ "forecasting", "python", "python_3.x" ]
stackoverflow_0073827871_forecasting_python_python_3.x.txt
Q: Crystal Report with Django Python Now I am working with Django Rest Framework and my requirement is to generate the report by using crystal reports or other tools but first will use crystal report. My project used DRF as backend and React as frontend. I think React cant do like that kind of job so I am trying to do generate report as pdf from DRF and I will respond to react. Now I am stuck how can I connect to crystal report from DRF? Please Help. A: If you are finding a Report Designer, maybe you can look into JasperReport, which supports data sources from JSON. Via pyreportjasper, you can generate the reports into PDF. A: If you'd like to use Crystal, you can create a "Report_Request" table in your database and insert into that table the necessary information (Report Name, parameter values, export format, export file name, email recipient(s), etc.). A scheduled process can then monitor that table, trigger the generation of the report, and update a status column in that table. Your code can then redirect the user's browser to the result. There are free/inexpensive 3rd-party Crystal Reports tools that can handle this type of automation, assuming you wish to avoid writing that code yourself. Ken Hamady provides a listing of 3rd-party Crystal Reports tools here.
Crystal Report with Django Python
Now I am working with Django Rest Framework and my requirement is to generate the report by using crystal reports or other tools but first will use crystal report. My project used DRF as backend and React as frontend. I think React cant do like that kind of job so I am trying to do generate report as pdf from DRF and I will respond to react. Now I am stuck how can I connect to crystal report from DRF? Please Help.
[ "If you are finding a Report Designer, maybe you can look into JasperReport, which supports data sources from JSON.\nVia pyreportjasper, you can generate the reports into PDF. \n", "If you'd like to use Crystal, you can create a \"Report_Request\" table in your database and insert into that table the necessary information (Report Name, parameter values, export format, export file name, email recipient(s), etc.).\nA scheduled process can then monitor that table, trigger the generation of the report, and update a status column in that table. Your code can then redirect the user's browser to the result.\nThere are free/inexpensive 3rd-party Crystal Reports tools that can handle this type of automation, assuming you wish to avoid writing that code yourself. Ken Hamady provides a listing of 3rd-party Crystal Reports tools here.\n" ]
[ 0, 0 ]
[]
[]
[ "crystal_reports", "django", "django_rest_framework", "python", "reactjs" ]
stackoverflow_0058740923_crystal_reports_django_django_rest_framework_python_reactjs.txt
Q: OperationalError in django when adding a new record I have created a mysql database with Cpanel . And I have some settings for database in the settings.py : DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': '*****_db', 'USER': '******', 'PASSWORD': '********', 'HOST': 'localhost', 'PORT': '3306', 'OPTIONS': { 'init_command': 'SET storage_engine=INNODB,character_set_connection=utf8,collation_connection=utf8_unicode_ci' } } } but the problem is when I try to add a new record in django-admin with some arabic chars , I get this error : OperationalError at /admin/courses/arguments/add/ (1366, "Incorrect string value: '\\xD8\\xB3\\xDA\\xAF' for column `asiatrad_db`.`courses_arguments`.`name` at row 1") What is the problem ? Do I need to create a new database with charset on utf-8 ? A: The “utf8” encoding only supports three bytes per character. The real UTF-8 encoding, which everybody uses, needs up to four bytes per character. See this article. So use “utf8mb4” charset instead of “utf8”. The settings.py should look as follows: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': '*****_db', 'USER': '******', 'PASSWORD': '********', 'HOST': 'localhost', 'PORT': '3306', 'OPTIONS': { 'init_command': 'SET storage_engine=INNODB','charset': 'utf8mb4'} }
OperationalError in django when adding a new record
I have created a mysql database with Cpanel . And I have some settings for database in the settings.py : DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': '*****_db', 'USER': '******', 'PASSWORD': '********', 'HOST': 'localhost', 'PORT': '3306', 'OPTIONS': { 'init_command': 'SET storage_engine=INNODB,character_set_connection=utf8,collation_connection=utf8_unicode_ci' } } } but the problem is when I try to add a new record in django-admin with some arabic chars , I get this error : OperationalError at /admin/courses/arguments/add/ (1366, "Incorrect string value: '\\xD8\\xB3\\xDA\\xAF' for column `asiatrad_db`.`courses_arguments`.`name` at row 1") What is the problem ? Do I need to create a new database with charset on utf-8 ?
[ "The “utf8” encoding only supports three bytes per character. The real UTF-8 encoding, which everybody uses, needs up to four bytes per character. See this article.\nSo use “utf8mb4” charset instead of “utf8”.\nThe settings.py should look as follows:\nDATABASES = { \n'default': { \n 'ENGINE': 'django.db.backends.mysql', \n 'NAME': '*****_db', \n 'USER': '******', \n 'PASSWORD': '********', \n 'HOST': 'localhost', \n 'PORT': '3306', \n 'OPTIONS': { 'init_command': 'SET storage_engine=INNODB','charset': 'utf8mb4'}\n} \n\n" ]
[ 1 ]
[]
[]
[ "django", "python" ]
stackoverflow_0074600772_django_python.txt
Q: Why werkzeug does not allow using localhost for cookie domain? I'm using Flask and when I try to use localhost as the cookie domain, werkzeug says: ValueError: Setting 'domain' for a cookie on a server running localy (ex: localhost) is not supportted by complying browsers. You should have something like: '127.0.0.1 localhost dev.localhost' on your hosts file and then point your server to run on 'dev.localhost' and also set 'domain' for 'dev.localhost' This kind of sucks that each developer has to set a domain in hosts file to get the project working. I can't understand why werkzeug is preventing this! The questions are: Why werkzeug is doing this? What would happen if it was possible to use localhost as cookie domain? How can i ignore this error? A: The issue is not that Werkzeug is blocking the setting of domain-based cookies - rather the issue is that most browsers do not support domain-limited cookies scoped to localhost (or to any other single-word domain). Rather than leaving you to debug this issue on your own (why is my session not being respected) Werkzeug detects when you are using this setup and errors out right away. The closest thing that I have found for a reason is the pseudo-spec: domain=DOMAIN_NAME When searching the cookie list for valid cookies, a comparison of the domain attributes of the cookie is made with the Internet domain name of the host from which the URL will be fetched. If there is a tail match, then the cookie will go through path matching to see if it should be sent. "Tail matching" means that domain attribute is matched against the tail of the fully qualified domain name of the host. A domain attribute of "acme.com" would match host names "anvil.acme.com" as well as "shipping.crate.acme.com". Only hosts within the specified domain can set a cookie for a domain and domains must have at least two (2) or three (3) periods in them to prevent domains of the form: ".com", ".edu", and "va.us". [emphasis mine] Any domain that fails within one of the seven special top level domains listed below only require two periods. Any other domain requires at least three. The seven special top level domains are: "COM", "EDU", "NET", "ORG", "GOV", "MIL", and "INT". If single-name domains were allowed a hacker could set a cookie for .com and then have that cookie transmitted by the browser to every .com domain the end user visited. See also: http://daniel.haxx.se/blog/2011/04/28/the-cookie-rfc-6265/ A: As @Markus Unterwaditzer proposed, you can fake hostnames locally to get and set the cookies associated to the domain names. For this, do sudo vim /etc/hosts: 127.0.0.1 localhost 127.0.0.1 fakesub.fakedomain.com 127.0.0.1 foo.bar.baz.anotherfakedomain.org This way, you can use and set cookies for the domains and subdomains fakesub.fakedomain.com, fakedomain.com, foo.bar.baz.anotherfakedomain.org, bar.baz.anotherfakedomain.org, baz.anotherfakedomain.org and anotherfakedomain.org. I use this solution every day to locally develop websites for my company using the authentication provided by my company production website through the cookies.
Why werkzeug does not allow using localhost for cookie domain?
I'm using Flask and when I try to use localhost as the cookie domain, werkzeug says: ValueError: Setting 'domain' for a cookie on a server running localy (ex: localhost) is not supportted by complying browsers. You should have something like: '127.0.0.1 localhost dev.localhost' on your hosts file and then point your server to run on 'dev.localhost' and also set 'domain' for 'dev.localhost' This kind of sucks that each developer has to set a domain in hosts file to get the project working. I can't understand why werkzeug is preventing this! The questions are: Why werkzeug is doing this? What would happen if it was possible to use localhost as cookie domain? How can i ignore this error?
[ "The issue is not that Werkzeug is blocking the setting of domain-based cookies - rather the issue is that most browsers do not support domain-limited cookies scoped to localhost (or to any other single-word domain). Rather than leaving you to debug this issue on your own (why is my session not being respected) Werkzeug detects when you are using this setup and errors out right away.\nThe closest thing that I have found for a reason is the pseudo-spec:\n\ndomain=DOMAIN_NAME\nWhen searching the cookie list for valid cookies, a comparison of the domain attributes of the cookie is made with the Internet domain name of the host from which the URL will be fetched. If there is a tail match, then the cookie will go through path matching to see if it should be sent. \"Tail matching\" means that domain attribute is matched against the tail of the fully qualified domain name of the host. A domain attribute of \"acme.com\" would match host names \"anvil.acme.com\" as well as \"shipping.crate.acme.com\".\nOnly hosts within the specified domain can set a cookie for a domain and domains must have at least two (2) or three (3) periods in them to prevent domains of the form: \".com\", \".edu\", and \"va.us\". [emphasis mine] Any domain that fails within one of the seven special top level domains listed below only require two periods. Any other domain requires at least three. The seven special top level domains are: \"COM\", \"EDU\", \"NET\", \"ORG\", \"GOV\", \"MIL\", and \"INT\".\n\nIf single-name domains were allowed a hacker could set a cookie for .com and then have that cookie transmitted by the browser to every .com domain the end user visited.\nSee also: http://daniel.haxx.se/blog/2011/04/28/the-cookie-rfc-6265/\n", "As @Markus Unterwaditzer proposed, you can fake hostnames locally to get and set the cookies associated to the domain names.\nFor this, do sudo vim /etc/hosts:\n127.0.0.1 localhost\n127.0.0.1 fakesub.fakedomain.com\n127.0.0.1 foo.bar.baz.anotherfakedomain.org\n\nThis way, you can use and set cookies for the domains and subdomains fakesub.fakedomain.com, fakedomain.com, foo.bar.baz.anotherfakedomain.org, bar.baz.anotherfakedomain.org, baz.anotherfakedomain.org and anotherfakedomain.org.\n\nI use this solution every day to locally develop websites for my company using the authentication provided by my company production website through the cookies.\n" ]
[ 5, 0 ]
[]
[]
[ "cookies", "flask", "python", "werkzeug" ]
stackoverflow_0024387150_cookies_flask_python_werkzeug.txt
Q: Reordering text file: Python I have many text files. All of them have the following kind of structure. textfile.txt id|name|dataType 5|aa|String 4|bb|DateTime |dd|DateTime 1|cc|DateTime 3|dd|DateTime I would like to read all these text files one by one and reorder them based on their id and rows with no id should be excluded. After that I would like to get the following: id|name|dataType 1|cc|DateTime 3|dd|DateTime 4|bb|DateTime 5|aa|String Is there any pythonic way to do this? A: You can use: (pd.read_csv('textfile.txt', sep='|') .loc[lambda d: d['id'].notna()] .convert_dtypes() .sort_values(by='id') .to_csv('out.txt', sep='|', index=False) ) out.txt: id|name|dataType 1|cc|DateTime 3|dd|DateTime 4|bb|DateTime 5|aa|String
Reordering text file: Python
I have many text files. All of them have the following kind of structure. textfile.txt id|name|dataType 5|aa|String 4|bb|DateTime |dd|DateTime 1|cc|DateTime 3|dd|DateTime I would like to read all these text files one by one and reorder them based on their id and rows with no id should be excluded. After that I would like to get the following: id|name|dataType 1|cc|DateTime 3|dd|DateTime 4|bb|DateTime 5|aa|String Is there any pythonic way to do this?
[ "You can use:\n(pd.read_csv('textfile.txt', sep='|')\n .loc[lambda d: d['id'].notna()]\n .convert_dtypes()\n .sort_values(by='id')\n .to_csv('out.txt', sep='|', index=False)\n)\n\nout.txt:\nid|name|dataType\n1|cc|DateTime\n3|dd|DateTime\n4|bb|DateTime\n5|aa|String\n\n" ]
[ 4 ]
[]
[]
[ "pandas", "python", "text_files" ]
stackoverflow_0074601869_pandas_python_text_files.txt