index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,800 | ea2183530667437e086bc89f137e464dec6f363a | <mask token>
class KitSubImageUrl(models.Model):
image_url = models.URLField(max_length=1000)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_sub_image_urls'
class KitLike(models.Model):
user = models.ForeignKey('user.User', on_delete=models.CASCADE)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_likes'
| <mask token>
class Kit(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
class Meta:
db_table = 'kits'
class KitSubImageUrl(models.Model):
image_url = models.URLField(max_length=1000)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_sub_image_urls'
class KitLike(models.Model):
user = models.ForeignKey('user.User', on_delete=models.CASCADE)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_likes'
| <mask token>
class Kit(models.Model):
name = models.CharField(max_length=100, null=True)
main_image_url = models.URLField(max_length=1000)
price = models.DecimalField(max_digits=10, decimal_places=2, default=0)
description = models.CharField(max_length=1000, null=True)
class Meta:
db_table = 'kits'
class KitSubImageUrl(models.Model):
image_url = models.URLField(max_length=1000)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_sub_image_urls'
class KitLike(models.Model):
user = models.ForeignKey('user.User', on_delete=models.CASCADE)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_likes'
| from django.db import models
class Kit(models.Model):
name = models.CharField(max_length=100, null=True)
main_image_url = models.URLField(max_length=1000)
price = models.DecimalField(max_digits=10, decimal_places=2, default=0)
description = models.CharField(max_length=1000, null=True)
class Meta:
db_table = 'kits'
class KitSubImageUrl(models.Model):
image_url = models.URLField(max_length=1000)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_sub_image_urls'
class KitLike(models.Model):
user = models.ForeignKey('user.User', on_delete=models.CASCADE)
kit = models.ForeignKey('kit.Kit', on_delete=models.CASCADE)
class Meta:
db_table = 'kit_likes'
| null | [
4,
5,
6,
7
] |
1,801 | e4e2e8ca65d109805b267f148e8d255d81d4ee83 | <mask token>
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
| <mask token>
class login_view(View):
<mask token>
<mask token>
<mask token>
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
| <mask token>
class login_view(View):
<mask token>
<mask token>
def post(self, request):
user = authenticate(username=request.POST['username'], password=
request.POST['password'])
if user is not None:
if user.is_active:
try:
login(request, user)
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['name'
] = user.first_name + ' ' + user.last_name or ''
except:
messages.add_message(request, messages.INFO,
'Anda belum terdaftar, silahkan hubungi administrator')
return redirect('adminbiobses:index')
else:
messages.add_message(request, messages.INFO,
'user belum terverifikasi')
else:
messages.add_message(request, messages.INFO,
'user atau password anda salah')
return render(request, self.template_name)
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
| <mask token>
class login_view(View):
<mask token>
def get(self, request):
return render(request, self.template_name)
def post(self, request):
user = authenticate(username=request.POST['username'], password=
request.POST['password'])
if user is not None:
if user.is_active:
try:
login(request, user)
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['name'
] = user.first_name + ' ' + user.last_name or ''
except:
messages.add_message(request, messages.INFO,
'Anda belum terdaftar, silahkan hubungi administrator')
return redirect('adminbiobses:index')
else:
messages.add_message(request, messages.INFO,
'user belum terverifikasi')
else:
messages.add_message(request, messages.INFO,
'user atau password anda salah')
return render(request, self.template_name)
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
| from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, logout, login
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views import View
class login_view(View):
template_name = 'adminbiobses/login.html'
def get(self, request):
return render(request, self.template_name)
def post(self, request):
user =authenticate(username=request.POST['username'],password=request.POST['password'])
if user is not None :
if user.is_active :
try :
login(request, user)
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['name'] = user.first_name+' '+user.last_name or ''
except :
messages.add_message(request, messages.INFO, 'Anda belum terdaftar, silahkan hubungi administrator')
return redirect('adminbiobses:index')
else :
messages.add_message(request, messages.INFO, 'user belum terverifikasi')
else :
messages.add_message(request, messages.INFO, 'user atau password anda salah')
return render(request, self.template_name)
class logout_view(View):
def get(self, request):
logout(request)
return redirect('adminbiobses:login')
@method_decorator(login_required, name='dispatch')
class index(View):
template_name = 'adminbiobses/index.html'
def get(self, request):
return render(request, self.template_name)
| [
5,
6,
7,
8,
11
] |
1,802 | 2ad326f739b42b9c7c252078b8c28e90da17b95d | <mask token>
| <mask token>
name = 'flask_gunicorn'
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = 'debug'
bind = f'0.0.0.0:18080'
| import multiprocessing
name = 'flask_gunicorn'
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = 'debug'
bind = f'0.0.0.0:18080'
| import multiprocessing
name = "flask_gunicorn"
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = "debug"
bind = f"0.0.0.0:18080"
| null | [
0,
1,
2,
3
] |
1,803 | 01900c1d14a04ee43553c8602a07e0c6ecfabded | <mask token>
class LogoutSerializer(ModelSerializer):
<mask token>
<mask token>
class Meta:
model = DeviceUser
fields = ['device_user_token', 'device_os', 'is_active']
<mask token>
<mask token>
class UserSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSettings
fields = ('id', 'session_confirm', 'message',
'session_cancellation', 'location_change', 'session_reminder',
'available', 'push_notifications_enabled')
class UserProfileDetailSerializer(serializers.ModelSerializer):
token = serializers.SerializerMethodField()
settings = UserSettingsSerializer()
class Meta:
model = User
fields = ('id', 'username', 'name', 'last_name', 'second_last_name',
'description', 'photo', 'email', 'phone', 'zip_code',
'birthday', 'gender', 'is_student', 'is_teacher', 'token',
'settings')
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
class LoginResponseV2Serializer(serializers.ModelSerializer):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
token = serializers.SerializerMethodField()
class Meta:
model = User
fields = 'token',
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
| <mask token>
class LogoutSerializer(ModelSerializer):
<mask token>
<mask token>
class Meta:
model = DeviceUser
fields = ['device_user_token', 'device_os', 'is_active']
def validate(self, data):
"""
Validate that the requesting user owns the given device.
"""
request = self.context['request']
data.setdefault('user', request.user)
data.setdefault('device_user_token', None)
if not request.user.is_authenticated():
raise serializers.ValidationError('user is not logged in.')
try:
self.instance = DeviceUser.objects.get(**data)
except DeviceUser.DoesNotExist:
raise serializers.ValidationError('invalid device')
return data
def update(self):
"""
Mark the given device as inactive.
"""
self.instance.is_active = False
self.instance.save()
return self.instance
class UserSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSettings
fields = ('id', 'session_confirm', 'message',
'session_cancellation', 'location_change', 'session_reminder',
'available', 'push_notifications_enabled')
class UserProfileDetailSerializer(serializers.ModelSerializer):
token = serializers.SerializerMethodField()
settings = UserSettingsSerializer()
class Meta:
model = User
fields = ('id', 'username', 'name', 'last_name', 'second_last_name',
'description', 'photo', 'email', 'phone', 'zip_code',
'birthday', 'gender', 'is_student', 'is_teacher', 'token',
'settings')
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
class LoginResponseV2Serializer(serializers.ModelSerializer):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
token = serializers.SerializerMethodField()
class Meta:
model = User
fields = 'token',
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
| <mask token>
class LoginSerializer(serializers.Serializer):
<mask token>
<mask token>
<mask token>
<mask token>
def validate(self, data):
"""
Validation email.
"""
try:
user = User.objects.get(email__iexact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError('invalid credentials')
if not user.check_password(data.get('password')):
raise serializers.ValidationError('invalid credentials')
return data
<mask token>
class LogoutSerializer(ModelSerializer):
"""
Serializer for log users out.
"""
is_active = serializers.ReadOnlyField()
class Meta:
model = DeviceUser
fields = ['device_user_token', 'device_os', 'is_active']
def validate(self, data):
"""
Validate that the requesting user owns the given device.
"""
request = self.context['request']
data.setdefault('user', request.user)
data.setdefault('device_user_token', None)
if not request.user.is_authenticated():
raise serializers.ValidationError('user is not logged in.')
try:
self.instance = DeviceUser.objects.get(**data)
except DeviceUser.DoesNotExist:
raise serializers.ValidationError('invalid device')
return data
def update(self):
"""
Mark the given device as inactive.
"""
self.instance.is_active = False
self.instance.save()
return self.instance
class UserSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSettings
fields = ('id', 'session_confirm', 'message',
'session_cancellation', 'location_change', 'session_reminder',
'available', 'push_notifications_enabled')
class UserProfileDetailSerializer(serializers.ModelSerializer):
token = serializers.SerializerMethodField()
settings = UserSettingsSerializer()
class Meta:
model = User
fields = ('id', 'username', 'name', 'last_name', 'second_last_name',
'description', 'photo', 'email', 'phone', 'zip_code',
'birthday', 'gender', 'is_student', 'is_teacher', 'token',
'settings')
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
class LoginResponseV2Serializer(serializers.ModelSerializer):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
token = serializers.SerializerMethodField()
class Meta:
model = User
fields = 'token',
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
| <mask token>
class LoginSerializer(serializers.Serializer):
email = serializers.EmailField(required=True)
password = serializers.CharField(required=True)
device_user_token = serializers.CharField(max_length=250, allow_blank=
True, required=False)
device_os = serializers.CharField(max_length=30, allow_blank=False)
def validate(self, data):
"""
Validation email.
"""
try:
user = User.objects.get(email__iexact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError('invalid credentials')
if not user.check_password(data.get('password')):
raise serializers.ValidationError('invalid credentials')
return data
def create(self, validated_data):
user = get_object_or_404(User, email=validated_data.get('email'))
device_user_token = validated_data.get('device_user_token')
device_os = validated_data.get('device_os')
if isinstance(device_user_token, unicode) and len(device_user_token
) == 64 and (not device_os or device_os == ''):
device_os = 'iOS'
device, created = DeviceUser.objects.get_or_create(user=user,
device_user_token=device_user_token)
device.device_os = device_os
device.is_active = True
device.save()
return user
class LogoutSerializer(ModelSerializer):
"""
Serializer for log users out.
"""
is_active = serializers.ReadOnlyField()
class Meta:
model = DeviceUser
fields = ['device_user_token', 'device_os', 'is_active']
def validate(self, data):
"""
Validate that the requesting user owns the given device.
"""
request = self.context['request']
data.setdefault('user', request.user)
data.setdefault('device_user_token', None)
if not request.user.is_authenticated():
raise serializers.ValidationError('user is not logged in.')
try:
self.instance = DeviceUser.objects.get(**data)
except DeviceUser.DoesNotExist:
raise serializers.ValidationError('invalid device')
return data
def update(self):
"""
Mark the given device as inactive.
"""
self.instance.is_active = False
self.instance.save()
return self.instance
class UserSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSettings
fields = ('id', 'session_confirm', 'message',
'session_cancellation', 'location_change', 'session_reminder',
'available', 'push_notifications_enabled')
class UserProfileDetailSerializer(serializers.ModelSerializer):
token = serializers.SerializerMethodField()
settings = UserSettingsSerializer()
class Meta:
model = User
fields = ('id', 'username', 'name', 'last_name', 'second_last_name',
'description', 'photo', 'email', 'phone', 'zip_code',
'birthday', 'gender', 'is_student', 'is_teacher', 'token',
'settings')
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
class LoginResponseV2Serializer(serializers.ModelSerializer):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
token = serializers.SerializerMethodField()
class Meta:
model = User
fields = 'token',
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
| # -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from tandlr.core.api.serializers import ModelSerializer
from tandlr.users.models import DeviceUser, User, UserSettings
from tandlr.utils.refresh_token import create_token
class LoginSerializer(serializers.Serializer):
email = serializers.EmailField(
required=True
)
password = serializers.CharField(
required=True
)
device_user_token = serializers.CharField(
max_length=250,
allow_blank=True,
required=False
)
device_os = serializers.CharField(
max_length=30,
allow_blank=False
)
def validate(self, data):
"""
Validation email.
"""
try:
user = User.objects.get(email__iexact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError("invalid credentials")
if not user.check_password(data.get('password')):
raise serializers.ValidationError("invalid credentials")
return data
def create(self, validated_data):
# Valitation mail
user = get_object_or_404(User, email=validated_data.get('email'))
device_user_token = validated_data.get('device_user_token')
device_os = validated_data.get('device_os')
if (isinstance(device_user_token, unicode) and
len(device_user_token) == 64 and
(not device_os or device_os == '')):
device_os = 'iOS'
# Save data of the device
device, created = DeviceUser.objects.get_or_create(
user=user,
device_user_token=device_user_token
)
device.device_os = device_os
device.is_active = True
device.save()
return user
class LogoutSerializer(ModelSerializer):
"""
Serializer for log users out.
"""
is_active = serializers.ReadOnlyField()
class Meta:
model = DeviceUser
fields = ['device_user_token', 'device_os', 'is_active']
def validate(self, data):
"""
Validate that the requesting user owns the given device.
"""
request = self.context['request']
data.setdefault('user', request.user)
data.setdefault('device_user_token', None)
if not request.user.is_authenticated():
raise serializers.ValidationError('user is not logged in.')
try:
self.instance = DeviceUser.objects.get(**data)
except DeviceUser.DoesNotExist:
raise serializers.ValidationError('invalid device')
return data
def update(self):
"""
Mark the given device as inactive.
"""
self.instance.is_active = False
self.instance.save()
return self.instance
class UserSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = UserSettings
fields = (
'id',
'session_confirm',
'message',
'session_cancellation',
'location_change',
'session_reminder',
'available',
'push_notifications_enabled'
)
class UserProfileDetailSerializer(serializers.ModelSerializer):
token = serializers.SerializerMethodField()
settings = UserSettingsSerializer()
class Meta:
model = User
fields = (
'id', 'username', 'name', 'last_name',
'second_last_name', 'description', 'photo', 'email',
'phone', 'zip_code', 'birthday', 'gender', 'is_student',
'is_teacher', 'token', 'settings'
)
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
class LoginResponseV2Serializer(serializers.ModelSerializer):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
token = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('token', )
def get_token(self, obj):
"""
Create token.
"""
return create_token(obj)
| [
9,
11,
15,
17,
19
] |
1,804 | 3b381668dbb9b4e5a2e323dc4d6b5e3951736882 | <mask token>
def auto_int(x):
return int(x, 0)
<mask token>
| <mask token>
def auto_int(x):
return int(x, 0)
def load_options():
global parsed_args
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('-i', '--input-file', help=
'Input (log) file. If omitted, stdin will be read.')
base_parser.add_argument('-o', '--output-file', help=
'Output file. If omitted, the output will be written to stdout.')
base_parser.add_argument('-n', '--no-timestamps', action='store_true',
help='Specifies whether or not the input file contains timestamps. ')
base_parser.add_argument('-d', '--desc-str', nargs='+', type=str, help=
'Description string(s) of the dumps. Only dumps with a prefix matching any of the provided desc strings will be analyzed. If no --desc-str option is given, no description filtering will be performed. The prefix of a hexdump is the short description string before the address in each line of the dump, i.e the hexdump prefix. --desc-str is normally used to select between RX and TX logs and should be combined with a proper --data-direction option.'
)
base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,
help=
'This option is used to specify how the hexdata should be interpreted. Valid values are: t2h (target to host) or h2t (host to target). With t2h, RX trailers will be printed if --print-data is used. h2t is default. This option should be combined with an applicable --desc-str option. '
)
base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,
help=
'Description string(s) of the dumps to be. excluded. Similar to --desc-str, but all matching prefixes will be excluded from the analysis.'
)
base_parser.add_argument('-s', '--short-htc-header', action=
'store_true', help=
'Use 6 byte HTC header ("old" format) instead of 8 bytes.')
base_parser.add_argument('-t', '--keep-timestamps', action='store_true',
help=
'Keep the timestamps associated with each hexdump in the output. This option will only have effect if the log file contains timestamps.'
)
parser = argparse.ArgumentParser(prog='qca_hex_analyzer', description=
description, parents=[base_parser])
subparsers = parser.add_subparsers(dest='subparser_name')
parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl', help=wmi_ctrl_help,
description=wmi_ctrl_description, parents=[base_parser])
parser_wmi_ctrl.add_argument('--wmi-old', action='store_true', help=
'Specifies whether or not the WMI messages are according to the "old" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'
)
parser_wmi_ctrl.add_argument('-p', '--print-data', action='store_true',
help=
'Print WMI data message payload (and not just WMI message ID) for all encountered messages. '
)
parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[2], help=
'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'
)
parser_wmi_ctrl.add_argument('--tlv', action='store_true', help=
'TLV analysis.Each WMI message will be interpreted as a TLV message and the content of the message will be. written out in text (instead of hexdump). If the encountered message is not supported by the parser, the hex data will be printed instead.'
)
parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID', nargs=
'+', type=auto_int, help=
"WMI message id filter. Only WMI messages with an id matching any of the provided id's will be included in the output. If no --id | --msg-id option is given, no filtering will be performed. "
)
parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',
nargs='+', type=auto_int, help=
"WMI message id exclude filter. Similar to --id | --msg-id, but all matching id's will be excluded from the output. "
)
parser_htc_ctrl = subparsers.add_parser('htc-ctrl', help=htc_ctrl_help,
description=htc_ctrl_description, parents=[base_parser])
parser_htc_ctrl.add_argument('-p', '--print-data', action='store_true',
help=
'Print HTC ctrl data message payload (and not just message ID) for all encountered messages. '
)
parser_htt = subparsers.add_parser('htt', help=htt_help, description=
htt_description, parents=[base_parser])
parser_htt.add_argument('-p', '--print-data', action='store_true', help
=
'Print HTT data message payload (and not just HTT message ID) for all encountered messages. '
)
parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1, type=
int, default=[1], help=
'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'
)
parser_all = subparsers.add_parser('all', help=all_help, description=
all_description, parents=[base_parser])
parser_all.add_argument('-p', '--print-data', action='store_true', help
=
'Print message payload (and not just message ID) for all encountered messages. '
)
parser_all.add_argument('--wmi-old', action='store_true', help=
'Specifies whether or not the WMI messages are according to the "old" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'
)
parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1, type=int,
default=[1], help=
'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'
)
parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1, type
=int, default=[2], help=
'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'
)
parsed_args = parser.parse_args()
def main():
global parsed_args
load_options()
try:
if parsed_args.input_file:
infp = open(parsed_args.input_file, 'r')
else:
infp = sys.stdin
if parsed_args.output_file:
outfp = open(parsed_args.output_file, 'w')
else:
outfp = sys.stdout
if parsed_args.data_direction:
if parsed_args.data_direction[0] == 't2h':
t2h = True
elif parsed_args.data_direction[0] == 'h2t':
t2h = False
else:
sys.stderr.write('Unsupported data direction: {}\n'.format(
parsed_args.data_direction[0]))
exit(1)
else:
t2h = False
hf = hexfilter.HexFilterLinux(skip_timestamps=not parsed_args.
keep_timestamps, abs_timestamps=True, dump_desc=parsed_args.
desc_str, dump_desc_invert=parsed_args.desc_str_invert,
log_has_timestamps=not parsed_args.no_timestamps,
include_dump_desc_in_output=False, remove_ascii_part=True)
if parsed_args.subparser_name == 'wmi-ctrl':
analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],
wmi_unified=not parsed_args.wmi_old, short_htc_hdr=
parsed_args.short_htc_header, timestamps=parsed_args.
keep_timestamps, t2h=t2h, tlv_analysis=parsed_args.tlv,
msg_id_filter=parsed_args.id, msg_id_exclude_filter=
parsed_args.skip_id)
if parsed_args.tlv:
parsed_args.print_data = True
elif parsed_args.subparser_name == 'htc-ctrl':
analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.
short_htc_header, timestamps=parsed_args.keep_timestamps,
t2h=t2h)
elif parsed_args.subparser_name == 'htt':
analyzer = HttAnalyzer(eid=parsed_args.ep_id[0], short_htc_hdr=
parsed_args.short_htc_header, timestamps=parsed_args.
keep_timestamps, t2h=t2h)
elif parsed_args.subparser_name == 'all':
analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[
0], htt_eid=parsed_args.htt_ep_id[0], wmi_unified=not
parsed_args.wmi_old, short_htc_hdr=parsed_args.
short_htc_header, timestamps=parsed_args.keep_timestamps,
t2h=t2h)
else:
sys.stderr.write('Unsupported subcommand: {}\n'.format(
parsed_args.subparser_name))
for line in infp:
if hf.parse_line(line):
hexdata = hf.get_hex()
if analyzer.parse_hexdata(hexdata):
str = analyzer.get_id_str()
outfp.write(str)
if parsed_args.print_data:
analyzer.print_data(outfp)
except IOError as err:
sys.stderr.write('{}\n'.format(err))
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
if __name__ == '__main__':
main()
| <mask token>
description = (
'Tool used to analyze hexdumps produced by a qca wireless kernel driver (such as ath6kl, ath10k or qcacld2.0). The hexdumps are assumed to contain dumps of the traffic between the driver and the target. No special preprocessing of the log files is required. Filter strings (description strings) can be used to limit the output (only RX or TX etc.). The driver must of course be configured to log all necessary debug data (for ath6kl and ath10k this means a proper debug mask). '
)
wmi_ctrl_help = (
'Subcommand for WMI control message parsing. This subcommand is used to extract WMI control messages from the input. '
)
wmi_ctrl_description = (
"Extracts WMI control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid WMI control message ID's will be printed together with the message enum string (from ath6kl source code). The --wmi-old option must be used if the driver does not use the WMI unified protocol (ath6kl). The WMI control message payload will also be printed together with message ID's if the --print-data option is used."
)
htc_ctrl_help = (
'Subcommand for HTC control message parsing. This subcommand is used to extract HTC control messages from the input. '
)
htc_ctrl_description = (
"Extracts HTC control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). All valid HTC control message ID's will be printed together with the message enum string (from ath6kl source code). The message payload will also be printed together with the message ID's if the --print-data option is used. HTC control messages will always be extracted from endpoint 0."
)
htt_help = (
'Subcommand for HTT message parsing. This subcommand is used to extract HTT messages from the input. '
)
htt_description = (
"Extracts HTT message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid HTT message ID's will be printed together with the message enum string (from ath10k source code). The message payload will also be printed together with message ID's if the --print-data option is used."
)
all_help = (
'Subcommand for parsing of all supported message types. This subcommand is used to extract both WMI control, HTC control and HTT messages from the input. '
)
all_description = (
"Extracts message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output-file). The messages can be any of the supported message types (currently only WMI controli, HTC control and HTT). --wmi-ctrl-ep-id and --htt-ep-id is used to determine from which endpoints WMI and HTT data will be extracted (see description of those options below). HTC control messages will always be extracted from ep 0. All valid message ID's will be printed together with a corresponding message enum string. The message payload will also be printed together with message ID's if the --print-data option is used."
)
def auto_int(x):
return int(x, 0)
def load_options():
global parsed_args
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('-i', '--input-file', help=
'Input (log) file. If omitted, stdin will be read.')
base_parser.add_argument('-o', '--output-file', help=
'Output file. If omitted, the output will be written to stdout.')
base_parser.add_argument('-n', '--no-timestamps', action='store_true',
help='Specifies whether or not the input file contains timestamps. ')
base_parser.add_argument('-d', '--desc-str', nargs='+', type=str, help=
'Description string(s) of the dumps. Only dumps with a prefix matching any of the provided desc strings will be analyzed. If no --desc-str option is given, no description filtering will be performed. The prefix of a hexdump is the short description string before the address in each line of the dump, i.e the hexdump prefix. --desc-str is normally used to select between RX and TX logs and should be combined with a proper --data-direction option.'
)
base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,
help=
'This option is used to specify how the hexdata should be interpreted. Valid values are: t2h (target to host) or h2t (host to target). With t2h, RX trailers will be printed if --print-data is used. h2t is default. This option should be combined with an applicable --desc-str option. '
)
base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,
help=
'Description string(s) of the dumps to be. excluded. Similar to --desc-str, but all matching prefixes will be excluded from the analysis.'
)
base_parser.add_argument('-s', '--short-htc-header', action=
'store_true', help=
'Use 6 byte HTC header ("old" format) instead of 8 bytes.')
base_parser.add_argument('-t', '--keep-timestamps', action='store_true',
help=
'Keep the timestamps associated with each hexdump in the output. This option will only have effect if the log file contains timestamps.'
)
parser = argparse.ArgumentParser(prog='qca_hex_analyzer', description=
description, parents=[base_parser])
subparsers = parser.add_subparsers(dest='subparser_name')
parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl', help=wmi_ctrl_help,
description=wmi_ctrl_description, parents=[base_parser])
parser_wmi_ctrl.add_argument('--wmi-old', action='store_true', help=
'Specifies whether or not the WMI messages are according to the "old" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'
)
parser_wmi_ctrl.add_argument('-p', '--print-data', action='store_true',
help=
'Print WMI data message payload (and not just WMI message ID) for all encountered messages. '
)
parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[2], help=
'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'
)
parser_wmi_ctrl.add_argument('--tlv', action='store_true', help=
'TLV analysis.Each WMI message will be interpreted as a TLV message and the content of the message will be. written out in text (instead of hexdump). If the encountered message is not supported by the parser, the hex data will be printed instead.'
)
parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID', nargs=
'+', type=auto_int, help=
"WMI message id filter. Only WMI messages with an id matching any of the provided id's will be included in the output. If no --id | --msg-id option is given, no filtering will be performed. "
)
parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',
nargs='+', type=auto_int, help=
"WMI message id exclude filter. Similar to --id | --msg-id, but all matching id's will be excluded from the output. "
)
parser_htc_ctrl = subparsers.add_parser('htc-ctrl', help=htc_ctrl_help,
description=htc_ctrl_description, parents=[base_parser])
parser_htc_ctrl.add_argument('-p', '--print-data', action='store_true',
help=
'Print HTC ctrl data message payload (and not just message ID) for all encountered messages. '
)
parser_htt = subparsers.add_parser('htt', help=htt_help, description=
htt_description, parents=[base_parser])
parser_htt.add_argument('-p', '--print-data', action='store_true', help
=
'Print HTT data message payload (and not just HTT message ID) for all encountered messages. '
)
parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1, type=
int, default=[1], help=
'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'
)
parser_all = subparsers.add_parser('all', help=all_help, description=
all_description, parents=[base_parser])
parser_all.add_argument('-p', '--print-data', action='store_true', help
=
'Print message payload (and not just message ID) for all encountered messages. '
)
parser_all.add_argument('--wmi-old', action='store_true', help=
'Specifies whether or not the WMI messages are according to the "old" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'
)
parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1, type=int,
default=[1], help=
'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'
)
parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1, type
=int, default=[2], help=
'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'
)
parsed_args = parser.parse_args()
def main():
global parsed_args
load_options()
try:
if parsed_args.input_file:
infp = open(parsed_args.input_file, 'r')
else:
infp = sys.stdin
if parsed_args.output_file:
outfp = open(parsed_args.output_file, 'w')
else:
outfp = sys.stdout
if parsed_args.data_direction:
if parsed_args.data_direction[0] == 't2h':
t2h = True
elif parsed_args.data_direction[0] == 'h2t':
t2h = False
else:
sys.stderr.write('Unsupported data direction: {}\n'.format(
parsed_args.data_direction[0]))
exit(1)
else:
t2h = False
hf = hexfilter.HexFilterLinux(skip_timestamps=not parsed_args.
keep_timestamps, abs_timestamps=True, dump_desc=parsed_args.
desc_str, dump_desc_invert=parsed_args.desc_str_invert,
log_has_timestamps=not parsed_args.no_timestamps,
include_dump_desc_in_output=False, remove_ascii_part=True)
if parsed_args.subparser_name == 'wmi-ctrl':
analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],
wmi_unified=not parsed_args.wmi_old, short_htc_hdr=
parsed_args.short_htc_header, timestamps=parsed_args.
keep_timestamps, t2h=t2h, tlv_analysis=parsed_args.tlv,
msg_id_filter=parsed_args.id, msg_id_exclude_filter=
parsed_args.skip_id)
if parsed_args.tlv:
parsed_args.print_data = True
elif parsed_args.subparser_name == 'htc-ctrl':
analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.
short_htc_header, timestamps=parsed_args.keep_timestamps,
t2h=t2h)
elif parsed_args.subparser_name == 'htt':
analyzer = HttAnalyzer(eid=parsed_args.ep_id[0], short_htc_hdr=
parsed_args.short_htc_header, timestamps=parsed_args.
keep_timestamps, t2h=t2h)
elif parsed_args.subparser_name == 'all':
analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[
0], htt_eid=parsed_args.htt_ep_id[0], wmi_unified=not
parsed_args.wmi_old, short_htc_hdr=parsed_args.
short_htc_header, timestamps=parsed_args.keep_timestamps,
t2h=t2h)
else:
sys.stderr.write('Unsupported subcommand: {}\n'.format(
parsed_args.subparser_name))
for line in infp:
if hf.parse_line(line):
hexdata = hf.get_hex()
if analyzer.parse_hexdata(hexdata):
str = analyzer.get_id_str()
outfp.write(str)
if parsed_args.print_data:
analyzer.print_data(outfp)
except IOError as err:
sys.stderr.write('{}\n'.format(err))
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
if __name__ == '__main__':
main()
| from collections import namedtuple
import argparse
import pdb
import traceback
import sys
import os
from qca_hex_analyzer import WmiCtrlAnalyzer, HtcCtrlAnalyzer, HttAnalyzer, AllAnalyzer
import hexfilter
description = (
'Tool used to analyze hexdumps produced by a qca wireless kernel driver (such as ath6kl, ath10k or qcacld2.0). The hexdumps are assumed to contain dumps of the traffic between the driver and the target. No special preprocessing of the log files is required. Filter strings (description strings) can be used to limit the output (only RX or TX etc.). The driver must of course be configured to log all necessary debug data (for ath6kl and ath10k this means a proper debug mask). '
)
wmi_ctrl_help = (
'Subcommand for WMI control message parsing. This subcommand is used to extract WMI control messages from the input. '
)
wmi_ctrl_description = (
"Extracts WMI control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid WMI control message ID's will be printed together with the message enum string (from ath6kl source code). The --wmi-old option must be used if the driver does not use the WMI unified protocol (ath6kl). The WMI control message payload will also be printed together with message ID's if the --print-data option is used."
)
htc_ctrl_help = (
'Subcommand for HTC control message parsing. This subcommand is used to extract HTC control messages from the input. '
)
htc_ctrl_description = (
"Extracts HTC control message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). All valid HTC control message ID's will be printed together with the message enum string (from ath6kl source code). The message payload will also be printed together with the message ID's if the --print-data option is used. HTC control messages will always be extracted from endpoint 0."
)
htt_help = (
'Subcommand for HTT message parsing. This subcommand is used to extract HTT messages from the input. '
)
htt_description = (
"Extracts HTT message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output -file). --ep-id is used to determine from which HTC endpoint the data will be extracted (see description of that option below). All valid HTT message ID's will be printed together with the message enum string (from ath10k source code). The message payload will also be printed together with message ID's if the --print-data option is used."
)
all_help = (
'Subcommand for parsing of all supported message types. This subcommand is used to extract both WMI control, HTC control and HTT messages from the input. '
)
all_description = (
"Extracts message hexdata from an input (--input-file). The extracted messages will be printed to the output (--output-file). The messages can be any of the supported message types (currently only WMI controli, HTC control and HTT). --wmi-ctrl-ep-id and --htt-ep-id is used to determine from which endpoints WMI and HTT data will be extracted (see description of those options below). HTC control messages will always be extracted from ep 0. All valid message ID's will be printed together with a corresponding message enum string. The message payload will also be printed together with message ID's if the --print-data option is used."
)
def auto_int(x):
return int(x, 0)
def load_options():
global parsed_args
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('-i', '--input-file', help=
'Input (log) file. If omitted, stdin will be read.')
base_parser.add_argument('-o', '--output-file', help=
'Output file. If omitted, the output will be written to stdout.')
base_parser.add_argument('-n', '--no-timestamps', action='store_true',
help='Specifies whether or not the input file contains timestamps. ')
base_parser.add_argument('-d', '--desc-str', nargs='+', type=str, help=
'Description string(s) of the dumps. Only dumps with a prefix matching any of the provided desc strings will be analyzed. If no --desc-str option is given, no description filtering will be performed. The prefix of a hexdump is the short description string before the address in each line of the dump, i.e the hexdump prefix. --desc-str is normally used to select between RX and TX logs and should be combined with a proper --data-direction option.'
)
base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,
help=
'This option is used to specify how the hexdata should be interpreted. Valid values are: t2h (target to host) or h2t (host to target). With t2h, RX trailers will be printed if --print-data is used. h2t is default. This option should be combined with an applicable --desc-str option. '
)
base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,
help=
'Description string(s) of the dumps to be. excluded. Similar to --desc-str, but all matching prefixes will be excluded from the analysis.'
)
base_parser.add_argument('-s', '--short-htc-header', action=
'store_true', help=
'Use 6 byte HTC header ("old" format) instead of 8 bytes.')
base_parser.add_argument('-t', '--keep-timestamps', action='store_true',
help=
'Keep the timestamps associated with each hexdump in the output. This option will only have effect if the log file contains timestamps.'
)
parser = argparse.ArgumentParser(prog='qca_hex_analyzer', description=
description, parents=[base_parser])
subparsers = parser.add_subparsers(dest='subparser_name')
parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl', help=wmi_ctrl_help,
description=wmi_ctrl_description, parents=[base_parser])
parser_wmi_ctrl.add_argument('--wmi-old', action='store_true', help=
'Specifies whether or not the WMI messages are according to the "old" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'
)
parser_wmi_ctrl.add_argument('-p', '--print-data', action='store_true',
help=
'Print WMI data message payload (and not just WMI message ID) for all encountered messages. '
)
parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[2], help=
'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'
)
parser_wmi_ctrl.add_argument('--tlv', action='store_true', help=
'TLV analysis.Each WMI message will be interpreted as a TLV message and the content of the message will be. written out in text (instead of hexdump). If the encountered message is not supported by the parser, the hex data will be printed instead.'
)
parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID', nargs=
'+', type=auto_int, help=
"WMI message id filter. Only WMI messages with an id matching any of the provided id's will be included in the output. If no --id | --msg-id option is given, no filtering will be performed. "
)
parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',
nargs='+', type=auto_int, help=
"WMI message id exclude filter. Similar to --id | --msg-id, but all matching id's will be excluded from the output. "
)
parser_htc_ctrl = subparsers.add_parser('htc-ctrl', help=htc_ctrl_help,
description=htc_ctrl_description, parents=[base_parser])
parser_htc_ctrl.add_argument('-p', '--print-data', action='store_true',
help=
'Print HTC ctrl data message payload (and not just message ID) for all encountered messages. '
)
parser_htt = subparsers.add_parser('htt', help=htt_help, description=
htt_description, parents=[base_parser])
parser_htt.add_argument('-p', '--print-data', action='store_true', help
=
'Print HTT data message payload (and not just HTT message ID) for all encountered messages. '
)
parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1, type=
int, default=[1], help=
'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'
)
parser_all = subparsers.add_parser('all', help=all_help, description=
all_description, parents=[base_parser])
parser_all.add_argument('-p', '--print-data', action='store_true', help
=
'Print message payload (and not just message ID) for all encountered messages. '
)
parser_all.add_argument('--wmi-old', action='store_true', help=
'Specifies whether or not the WMI messages are according to the "old" WMI protocol. If not set, the messages will be interpreted according to the unified WMI format'
)
parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1, type=int,
default=[1], help=
'HTT service endpoint ID. This is the endpoint where the HTT data is expected to be present. Make sure the endpoint matches the endpoint id associated with the HTT endpoint (service id 0x300) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 1 will be used.'
)
parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1, type
=int, default=[2], help=
'WMI control service endpoint ID. This is the endpoint where the WMI control data is expected to be present. Make sure the endpoint matches the endpoint id associated with the control service endpoint (service id 0x100) of the driver (the endpoint received from the target in the HTC service connect response). If this option is omitted a default value of 2 will be used.'
)
parsed_args = parser.parse_args()
def main():
global parsed_args
load_options()
try:
if parsed_args.input_file:
infp = open(parsed_args.input_file, 'r')
else:
infp = sys.stdin
if parsed_args.output_file:
outfp = open(parsed_args.output_file, 'w')
else:
outfp = sys.stdout
if parsed_args.data_direction:
if parsed_args.data_direction[0] == 't2h':
t2h = True
elif parsed_args.data_direction[0] == 'h2t':
t2h = False
else:
sys.stderr.write('Unsupported data direction: {}\n'.format(
parsed_args.data_direction[0]))
exit(1)
else:
t2h = False
hf = hexfilter.HexFilterLinux(skip_timestamps=not parsed_args.
keep_timestamps, abs_timestamps=True, dump_desc=parsed_args.
desc_str, dump_desc_invert=parsed_args.desc_str_invert,
log_has_timestamps=not parsed_args.no_timestamps,
include_dump_desc_in_output=False, remove_ascii_part=True)
if parsed_args.subparser_name == 'wmi-ctrl':
analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],
wmi_unified=not parsed_args.wmi_old, short_htc_hdr=
parsed_args.short_htc_header, timestamps=parsed_args.
keep_timestamps, t2h=t2h, tlv_analysis=parsed_args.tlv,
msg_id_filter=parsed_args.id, msg_id_exclude_filter=
parsed_args.skip_id)
if parsed_args.tlv:
parsed_args.print_data = True
elif parsed_args.subparser_name == 'htc-ctrl':
analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.
short_htc_header, timestamps=parsed_args.keep_timestamps,
t2h=t2h)
elif parsed_args.subparser_name == 'htt':
analyzer = HttAnalyzer(eid=parsed_args.ep_id[0], short_htc_hdr=
parsed_args.short_htc_header, timestamps=parsed_args.
keep_timestamps, t2h=t2h)
elif parsed_args.subparser_name == 'all':
analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[
0], htt_eid=parsed_args.htt_ep_id[0], wmi_unified=not
parsed_args.wmi_old, short_htc_hdr=parsed_args.
short_htc_header, timestamps=parsed_args.keep_timestamps,
t2h=t2h)
else:
sys.stderr.write('Unsupported subcommand: {}\n'.format(
parsed_args.subparser_name))
for line in infp:
if hf.parse_line(line):
hexdata = hf.get_hex()
if analyzer.parse_hexdata(hexdata):
str = analyzer.get_id_str()
outfp.write(str)
if parsed_args.print_data:
analyzer.print_data(outfp)
except IOError as err:
sys.stderr.write('{}\n'.format(err))
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
if __name__ == '__main__':
main()
| from collections import namedtuple
import argparse
import pdb
import traceback
import sys
import os
from qca_hex_analyzer import WmiCtrlAnalyzer, HtcCtrlAnalyzer, HttAnalyzer, AllAnalyzer
import hexfilter
description = \
"Tool used to analyze hexdumps produced by a qca wireless kernel " \
"driver (such as ath6kl, ath10k or qcacld2.0). " \
"The hexdumps are assumed to contain dumps of the traffic " \
"between the driver and the target. " \
"No special preprocessing of the log files is required. " \
"Filter strings (description strings) can be used to limit the output " \
"(only RX or TX etc.). " \
"The driver must of course be configured to log all necessary debug " \
"data (for ath6kl and ath10k this means a proper debug mask). "
wmi_ctrl_help = \
"Subcommand for WMI control message parsing. " \
"This subcommand is used to extract WMI control messages from the input. "
wmi_ctrl_description = \
"Extracts WMI control message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"--ep-id is used to determine from which HTC endpoint the data will " \
"be extracted (see description of that option below). " \
"All valid WMI control message ID's will be printed together with the " \
"message enum string (from ath6kl source code). " \
"The --wmi-old option must be used if the driver does not use the WMI " \
"unified protocol (ath6kl). " \
"The WMI control message payload will also be printed together with " \
"message ID's if the --print-data option is used."
htc_ctrl_help = \
"Subcommand for HTC control message parsing. " \
"This subcommand is used to extract HTC control messages from the input. "
htc_ctrl_description = \
"Extracts HTC control message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"All valid HTC control message ID's will be printed together with the " \
"message enum string (from ath6kl source code). " \
"The message payload will also be printed together with the " \
"message ID's if the --print-data option is used. " \
"HTC control messages will always be extracted from endpoint 0."
htt_help = \
"Subcommand for HTT message parsing. " \
"This subcommand is used to extract HTT messages from the input. "
htt_description = \
"Extracts HTT message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"--ep-id is used to determine from which HTC endpoint the data will " \
"be extracted (see description of that option below). " \
"All valid HTT message ID's will be printed together with the " \
"message enum string (from ath10k source code). " \
"The message payload will also be printed together with " \
"message ID's if the --print-data option is used."
all_help = \
"Subcommand for parsing of all supported message types. " \
"This subcommand is used to extract both WMI control, " \
"HTC control and HTT messages from the input. "
all_description = \
"Extracts message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output-file). " \
"The messages can be any of the supported message types " \
"(currently only WMI controli, HTC control and HTT). " \
"--wmi-ctrl-ep-id and --htt-ep-id is used to determine from which " \
"endpoints WMI and HTT data will be extracted " \
"(see description of those options below). " \
"HTC control messages will always be extracted from ep 0. " \
"All valid message ID's will be printed together " \
"with a corresponding message enum string. " \
"The message payload will also be printed together with " \
"message ID's if the --print-data option is used."
def auto_int(x):
return int(x, 0)
def load_options():
global parsed_args
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('-i', '--input-file',
help="Input (log) file. If omitted, "
"stdin will be read.")
base_parser.add_argument('-o', '--output-file',
help="Output file. If omitted, "
"the output will be written to stdout.")
base_parser.add_argument('-n', '--no-timestamps', action="store_true",
help="Specifies whether or not the input file "
"contains timestamps. ")
base_parser.add_argument('-d', '--desc-str', nargs='+', type=str,
help="Description string(s) of the dumps. "
"Only dumps with a prefix "
"matching any of the provided desc strings "
"will be analyzed. "
"If no --desc-str option is given, no "
"description filtering will be performed. "
"The prefix of a hexdump is the short "
"description string before the address "
"in each line of the dump, i.e the hexdump "
"prefix. "
"--desc-str is normally used to select "
"between RX and TX logs and should be "
"combined with a proper --data-direction "
"option.")
base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,
help="This option is used to specify how the "
"hexdata should be interpreted. "
"Valid values are: "
"t2h (target to host) or h2t (host to target). "
"With t2h, RX trailers will be printed if "
"--print-data is used. h2t is default. "
"This option should be combined with an "
"applicable --desc-str option. ")
base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,
help="Description string(s) of the dumps to be. "
"excluded. Similar to --desc-str, but all "
"matching prefixes will be excluded from "
"the analysis.")
base_parser.add_argument('-s', '--short-htc-header', action="store_true",
help="Use 6 byte HTC header (\"old\" format) "
"instead of 8 bytes.")
base_parser.add_argument('-t', '--keep-timestamps', action="store_true",
help="Keep the timestamps associated with each "
"hexdump in the output. "
"This option will only have effect if the "
"log file contains timestamps.")
parser = argparse.ArgumentParser(prog="qca_hex_analyzer",
description=description,
parents=[base_parser])
subparsers = parser.add_subparsers(dest="subparser_name")
parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl',
help=wmi_ctrl_help,
description=wmi_ctrl_description,
parents=[base_parser])
parser_wmi_ctrl.add_argument('--wmi-old', action="store_true",
help="Specifies whether or not the WMI messages "
"are according to the \"old\" WMI protocol. "
"If not set, the messages will be interpreted "
"according to the unified WMI format")
parser_wmi_ctrl.add_argument('-p', '--print-data', action="store_true",
help="Print WMI data message payload (and not just "
"WMI message ID) for all encountered messages. ")
parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[2],
help="WMI control service endpoint ID. "
"This is the endpoint where the WMI control data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"control service endpoint (service id 0x100) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 2 "
"will be used.")
parser_wmi_ctrl.add_argument('--tlv', action="store_true",
help="TLV analysis."
"Each WMI message will be interpreted as a TLV "
"message and the content of the message will be. "
"written out in text (instead of hexdump). "
"If the encountered message is not supported by "
"the parser, the hex data will be printed instead.")
parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID',
nargs='+', type=auto_int,
help="WMI message id filter. "
"Only WMI messages with an id matching any of the "
"provided id's will be included in the output. "
"If no --id | --msg-id option is given, no "
"filtering will be performed. ")
parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',
nargs='+', type=auto_int,
help="WMI message id exclude filter. "
"Similar to --id | --msg-id, but all matching "
"id's will be excluded from the output. ")
parser_htc_ctrl = subparsers.add_parser('htc-ctrl',
help=htc_ctrl_help,
description=htc_ctrl_description,
parents=[base_parser])
parser_htc_ctrl.add_argument('-p', '--print-data', action="store_true",
help="Print HTC ctrl data message payload (and not just "
"message ID) for all encountered messages. ")
parser_htt = subparsers.add_parser('htt',
help=htt_help,
description=htt_description,
parents=[base_parser])
parser_htt.add_argument('-p', '--print-data', action="store_true",
help="Print HTT data message payload (and not just "
"HTT message ID) for all encountered messages. ")
parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[1],
help="HTT service endpoint ID. "
"This is the endpoint where the HTT data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"HTT endpoint (service id 0x300) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 1 "
"will be used.")
parser_all = subparsers.add_parser('all',
help=all_help,
description=all_description,
parents=[base_parser])
parser_all.add_argument('-p', '--print-data', action="store_true",
help="Print message payload (and not just "
"message ID) for all encountered messages. ")
parser_all.add_argument('--wmi-old', action="store_true",
help="Specifies whether or not the WMI messages "
"are according to the \"old\" WMI protocol. "
"If not set, the messages will be interpreted "
"according to the unified WMI format")
parser_all.add_argument('--htt-ep-id', metavar='ID', nargs=1,
type=int, default=[1],
help="HTT service endpoint ID. "
"This is the endpoint where the HTT data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"HTT endpoint (service id 0x300) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 1 "
"will be used.")
parser_all.add_argument('--wmi-ctrl-ep-id', metavar='ID', nargs=1,
type=int, default=[2],
help="WMI control service endpoint ID. "
"This is the endpoint where the WMI control data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"control service endpoint (service id 0x100) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 2 "
"will be used.")
parsed_args = parser.parse_args()
def main():
global parsed_args
load_options()
try:
if parsed_args.input_file:
infp = open(parsed_args.input_file, "r")
else:
infp = sys.stdin
if parsed_args.output_file:
outfp = open(parsed_args.output_file, "w")
else:
outfp = sys.stdout
if parsed_args.data_direction:
if parsed_args.data_direction[0] == 't2h':
t2h = True
elif parsed_args.data_direction[0] == 'h2t':
t2h = False
else:
sys.stderr.write('Unsupported data direction: {}\n'.format(parsed_args.data_direction[0]))
exit(1)
else:
# Interpret the data as host -> target is the default behaviour
t2h = False
hf = hexfilter.HexFilterLinux(skip_timestamps=(not parsed_args.keep_timestamps),
abs_timestamps=True,
dump_desc=parsed_args.desc_str,
dump_desc_invert=parsed_args.desc_str_invert,
log_has_timestamps=(not parsed_args.no_timestamps),
include_dump_desc_in_output=False,
remove_ascii_part=True)
if parsed_args.subparser_name == 'wmi-ctrl':
analyzer = WmiCtrlAnalyzer(eid=parsed_args.ep_id[0],
wmi_unified=(not parsed_args.wmi_old),
short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h,
tlv_analysis=parsed_args.tlv,
msg_id_filter=parsed_args.id,
msg_id_exclude_filter=parsed_args.skip_id)
if parsed_args.tlv:
parsed_args.print_data = True
elif parsed_args.subparser_name == 'htc-ctrl':
analyzer = HtcCtrlAnalyzer(short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h)
elif parsed_args.subparser_name == 'htt':
analyzer = HttAnalyzer(eid=parsed_args.ep_id[0],
short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h)
elif parsed_args.subparser_name == 'all':
analyzer = AllAnalyzer(wmi_ctrl_eid=parsed_args.wmi_ctrl_ep_id[0],
htt_eid=parsed_args.htt_ep_id[0],
wmi_unified=(not parsed_args.wmi_old),
short_htc_hdr=parsed_args.short_htc_header,
timestamps=parsed_args.keep_timestamps,
t2h=t2h)
else:
sys.stderr.write('Unsupported subcommand: {}\n'.format(parsed_args.subparser_name))
for line in infp:
if hf.parse_line(line):
hexdata = hf.get_hex()
if analyzer.parse_hexdata(hexdata):
str = analyzer.get_id_str()
outfp.write(str)
if parsed_args.print_data:
analyzer.print_data(outfp)
except IOError as err:
sys.stderr.write('{}\n'.format(err))
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
if __name__ == "__main__":
main()
| [
1,
4,
5,
6,
7
] |
1,805 | bebe098c5abb579eb155a1dc325347d100ddfa8f | def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
<mask token>
| def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
<mask token>
| def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
@coroutine
def grep_python_coroutine():
g = grep('python')
yield from g
<mask token>
| def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
@coroutine
def grep_python_coroutine():
g = grep('python')
yield from g
<mask token>
g.send('php is better')
g.send('python is simplier')
g.close()
| def coroutine(func):
def start_coroutine(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr) #cr.send(None)
return cr
return start_coroutine
@coroutine
def grep(pattern):
print('start grep')
try:
while True:
line = yield
if pattern in line:
print(line)
except GeneratorExit:
print('stop grep')
@coroutine
def grep_python_coroutine():
g = grep('python')
yield from g
g = grep('python')
#next(g) #g.send(None)
g.send("php is better")
g.send("python is simplier")
g.close() | [
1,
2,
3,
4,
6
] |
1,806 | af903feda57e4ace0c7f909abbeb86bb9a7e4d8c | <mask token>
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
<mask token>
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
<mask token>
| <mask token>
def run_final_all_days():
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
print(rmse)
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
<mask token>
| <mask token>
epochs = 100
start = 6
end = 18
res = []
sets = []
min_vals = []
min_loss = []
def run_final_all_days():
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
print(rmse)
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
run_lstm_experiment(set='test')
| from data.dataframe_sequence_multi import DataFrameSequenceMulti
from metrics import Metrics
from models.models_ts_multi import lstm_model_multi
import threading
import sys
from keras import optimizers
from data.data_helper import plot_history
epochs = 100
start = 6
end = 18
res = []
sets = []
min_vals = []
min_loss = []
def run_final_all_days():
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
print(rmse)
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
run_lstm_experiment(set='test')
| from data.dataframe_sequence_multi import DataFrameSequenceMulti
from metrics import Metrics
from models.models_ts_multi import lstm_model_multi
import threading
import sys
from keras import optimizers
from data.data_helper import plot_history
epochs = 100
start = 6
end = 18
res = []
sets = []
min_vals = []
min_loss = []
def run_final_all_days():
# onsite
# data = DataFrameSequenceMulti(False, True, False, False)
# onsite & img
# data = DataFrameSequenceMulti(False, True, True, False)
# all data
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
# sqs = [5, 10]
sqs=[5]
cams = [1]
permutations = [(True,True,True)]
# permutations = [(True, True, True), (True, False, False), (False, True, False)]
# permutations_names = ['all data', 'onsite_only', 'img only']
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams=c, clear_sky_label=True)
# data.normalize_mega_df()
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam, pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
# data.load_prev_mega_df()
data.build_ts_df(start, end, [7,8,9,10,11,12], s, cams=c, clear_sky_label=False)
# data.save_df()
else:
data.build_ts_df(start, end, [7,8,9,10], s, cams=c, clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time + name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_PREM2_PXL' + name_epoch + name_time + name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
# data.build_ts_df(6, 19, [7,8,9,10], 5)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
# plot_history('s1', 1, lstm.history)
# import matplotlib.pyplot as plt
# from matplotlib.lines import lineStyles
# plt.plot(lstm.history.history['loss'])
# plt.plot(lstm.history.history['val_loss'], linestyle=':')
# ymin = min(lstm.history.history['val_loss'])
# xpos = lstm.history.history['val_loss'].index(ymin)
# xmin = lstm.history.history['val_loss'][xpos]
# plt.annotate('Minimum validation loss', size=20, xy=(xpos, ymin), xytext=(xpos, ymin + 30000),
# arrowprops=dict(facecolor='black', shrink=0.05, width=5, headwidth=20),
# horizontalalignment='center', verticalalignment='top',
# )
# plt.ylim(0, 100000)
# plt.title('LSTM M 5 all data', size=20)
# plt.ylabel('Mean squared error', size=20)
# plt.xlabel('Epochs', size=20)
# plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
#
# Metrics.write_results_multi('LSTM_TEST_MULTI', data.test_x_df.reshape(
# (data.test_x_df.shape[0],
# data.sequence_len_minutes,
# data.number_of_features)),
# data.test_y_df, y_pred)
print(rmse)
def optimize():
# data.build_ts_df(6, 19, [8, 9, 10,11,12], 10, cams=1, clear_sky_label=False)
# data.normalize_mega_df()
# data.split_data_set(10,15)
# data.flatten_data_set_to_3d()
#
# seq_l = [3,5,10]
# nodes = [(50,25,10),(60,30,15),(80,40,20)]
# activations = ['relu', 'sigmoid']
# opts = ['Adam', 'RMSprop']
# learning_rate = [0.001, 0.01, 0.1]
seq_l = [5]
nodes = [(50,25,10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7,8,9,10,11,12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a) + ' optimizer: ' + str(o) + ' lr: ' + str(lr) + " seq_l: " + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history['loss'])))
run_lstm_experiment(set='test')
# run_final_test_days()
# run_final_all_days()
# LSTM_test() | [
3,
5,
7,
8,
9
] |
1,807 | 43179b8b096836758271a791b4aacb7bbe398ea9 | <mask token>
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
<mask token>
| <mask token>
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
| <mask token>
hand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',
'Dragon Buster Destruction Sword']
hand_2playable_hts = ['Nibiru, the Primal Being',
'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']
hand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',
'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',
'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
| import sys
from Decks.Virtual_World.vw_sets import *
from tools import *
hand_3playable_hts = ['Nibiru, the Primal Being', 'Effect Veiler',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword',
'Dragon Buster Destruction Sword']
hand_2playable_hts = ['Nibiru, the Primal Being',
'Nibiru, the Primal Being', 'Fantastical Dragon Phantazmay',
'Fantastical Dragon Phantazmay', 'Dragon Buster Destruction Sword']
hand_3lvl3vw = ['Virtual World Mai-Hime - Lulu',
'Virtual World Xiezhi - Jiji', 'Virtual World Xiezhi - Jiji',
'Virtual World Kirin - Lili', 'Virtual World Roshi - Laolao']
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
| import sys
from Decks.Virtual_World.vw_sets import *
from tools import *
hand_3playable_hts = ["Nibiru, the Primal Being", "Effect Veiler", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword", "Dragon Buster Destruction Sword"]
hand_2playable_hts = ["Nibiru, the Primal Being", "Nibiru, the Primal Being", "Fantastical Dragon Phantazmay", "Fantastical Dragon Phantazmay", "Dragon Buster Destruction Sword"]
hand_3lvl3vw = ["Virtual World Mai-Hime - Lulu", "Virtual World Xiezhi - Jiji", "Virtual World Xiezhi - Jiji", "Virtual World Kirin - Lili", "Virtual World Roshi - Laolao"]
def test_playable_hts_in_hand():
assert playable_hts_in_hand(hand_3playable_hts) == 3
assert playable_hts_in_hand(hand_2playable_hts) == 2
def test_cards_of_set_in_hand():
assert cards_of_set_in_hand(hand_3lvl3vw, vw_lvl3) == 3
| [
1,
2,
3,
4,
5
] |
1,808 | c599a75788e3548c52ebb3b29e7a2398ff1b28a2 | <mask token>
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
<mask token>
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
<mask token>
| <mask token>
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
<mask token>
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=
None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes) == 2:
mean_auc = roc_auc_score(y_true, y_proba[:, 1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted',
multi_class='ovo')
if print_conf:
if text_file is not None:
print('\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *
mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(
text_file, 'a'))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
| <mask token>
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=
None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes) == 2:
mean_auc = roc_auc_score(y_true, y_proba[:, 1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted',
multi_class='ovo')
if print_conf:
if text_file is not None:
print('\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *
mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(
text_file, 'a'))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
| from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix
import numpy as np
from scipy.stats import rankdata
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=
None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes) == 2:
mean_auc = roc_auc_score(y_true, y_proba[:, 1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted',
multi_class='ovo')
if print_conf:
if text_file is not None:
print('\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *
mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(
text_file, 'a'))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
| from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix
import numpy as np
from scipy.stats import rankdata
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-6)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0: return 1
intersection = np.logical_and(actual, predicted)
return 2. * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None: print("\n", end=" ")
else: print("\n", end=" ", file=open(text_file, "a"))
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * " " + "t/p" + (columnwidth - 3) // 2 * " "
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = " " * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell
# Print header
if text_file is None: print(" " + fst_empty_cell, end=" ")
else: print(" " + fst_empty_cell, end=" ", file = open(text_file, "a"))
for label in labels:
if text_file is None: print("%{0}s".format(columnwidth) % label, end=" ")
else: print("%{0}s".format(columnwidth) % label, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
# Print rows
for i, label1 in enumerate(labels):
if text_file is None: print(" %{0}s".format(columnwidth) % label1, end=" ")
else: print(" %{0}s".format(columnwidth) % label1, end=" ", file = open(text_file, "a"))
for j in range(len(labels)):
cell = "%{}d".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None: print(cell, end=" ")
else: print(cell, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes)==2:
mean_auc = roc_auc_score(y_true, y_proba[:,1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovo')
# mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovr')
# ovo should be better, but average is not clear from docs
# mean_auc = roc_auc_score(y_true, y_proba, average='macro', multi_class='ovo')
if print_conf:
if text_file is not None:
print("\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}".format(100*mcc, 100*f1, 100*mean_auc), end=" ", file=open(text_file, "a"))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1 | [
3,
5,
6,
7,
8
] |
1,809 | 1db16ae1fc6546575150187432265ac1cf834ec2 | import pandas as pd
import numpy as np
import datetime as dt
def sum_unique(x):
return np.unique(x).shape[0]
def analyze_count(data):
"""real time, vk, itemid, action"""
dsct_vk = pd.unique(data['vk'])
dsct_itemid = pd.unique(data['itemid'])
print 'number of user:', dsct_vk.shape
print 'number of items:', dsct_itemid.shape
print 'the number of ratings:', data.shape
print 'unique actions:', pd.unique(data['action'])
print 'the number of action 0:', np.sum(data['action'] == 0)
print 'the number of action 1:', np.sum(data['action'] == 1)
print 'the number of action 2:', np.sum(data['action'] == 2)
print 'the number of action 3:', np.sum(data['action'] == 3)
print 'the number of action 4:', np.sum(data['action'] == 4)
time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)
print 'Max Range:', np.max(time_range_item)
print 'Mean Range:', np.mean(time_range_item)
print 'Median Range:', np.median(time_range_item)
| null | null | null | null | [
0
] |
1,810 | 5749f30d1a1efd5404654d755bca4515adcf4bca | <mask token>
class CRUD(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE
)
name = models.TextField(blank=True, null=True)
content = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to=upload_updated_image, null=True,
blank=True)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = UpdateManager()
def __str__(self):
return self.name or ''
def serialize(self):
try:
image = self.image.url
except:
image = ''
data = {'user': self.user.id, 'id': self.id, 'name': self.name,
'content': self.content, 'image': image}
return json.dumps(data)
| <mask token>
class UpdateQueryset(models.QuerySet):
def serialize(self):
list_value = list(self.values('user', 'id', 'name', 'content', 'image')
)
return json.dumps(list_value)
class UpdateManager(models.Manager):
def get_queryset(self):
return UpdateQueryset(self.model, using=self.db)
class CRUD(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE
)
name = models.TextField(blank=True, null=True)
content = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to=upload_updated_image, null=True,
blank=True)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = UpdateManager()
def __str__(self):
return self.name or ''
def serialize(self):
try:
image = self.image.url
except:
image = ''
data = {'user': self.user.id, 'id': self.id, 'name': self.name,
'content': self.content, 'image': image}
return json.dumps(data)
| <mask token>
def upload_updated_image(instance, filename):
return '/MyApi/{user}/{filename}'.format(user=instance.user, filename=
filename)
class UpdateQueryset(models.QuerySet):
def serialize(self):
list_value = list(self.values('user', 'id', 'name', 'content', 'image')
)
return json.dumps(list_value)
class UpdateManager(models.Manager):
def get_queryset(self):
return UpdateQueryset(self.model, using=self.db)
class CRUD(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE
)
name = models.TextField(blank=True, null=True)
content = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to=upload_updated_image, null=True,
blank=True)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = UpdateManager()
def __str__(self):
return self.name or ''
def serialize(self):
try:
image = self.image.url
except:
image = ''
data = {'user': self.user.id, 'id': self.id, 'name': self.name,
'content': self.content, 'image': image}
return json.dumps(data)
| import json
from django.db import models
from django.conf import settings
from django.core.serializers import serialize
def upload_updated_image(instance, filename):
return '/MyApi/{user}/{filename}'.format(user=instance.user, filename=
filename)
class UpdateQueryset(models.QuerySet):
def serialize(self):
list_value = list(self.values('user', 'id', 'name', 'content', 'image')
)
return json.dumps(list_value)
class UpdateManager(models.Manager):
def get_queryset(self):
return UpdateQueryset(self.model, using=self.db)
class CRUD(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE
)
name = models.TextField(blank=True, null=True)
content = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to=upload_updated_image, null=True,
blank=True)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = UpdateManager()
def __str__(self):
return self.name or ''
def serialize(self):
try:
image = self.image.url
except:
image = ''
data = {'user': self.user.id, 'id': self.id, 'name': self.name,
'content': self.content, 'image': image}
return json.dumps(data)
| import json
from django.db import models
from django.conf import settings
from django.core.serializers import serialize
# Create your models here.
def upload_updated_image(instance,filename):
return '/MyApi/{user}/{filename}'.format(user=instance.user,filename=filename)
class UpdateQueryset(models.QuerySet):
def serialize(self):
# dot value method
list_value=list(self.values("user","id","name","content","image"))
return json.dumps(list_value)
class UpdateManager(models.Manager):
def get_queryset(self):
return UpdateQueryset(self.model,using=self.db)
class CRUD(models.Model):
user =models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
name =models.TextField(blank=True,null=True)
content =models.TextField(blank=True,null=True)
image =models.ImageField(upload_to=upload_updated_image,null=True,blank=True)
updated =models.DateTimeField(auto_now=True)
timestamp =models.DateTimeField(auto_now_add=True)
# This is modellistview
objects=UpdateManager()
def __str__(self):
return self.name or ""
#This is for modeldetailview
def serialize(self):
try:
image=self.image.url
except:
image=""
data={
"user":self.user.id,
"id":self.id,
"name":self.name,
"content":self.content,
"image":image
}
return json.dumps(data)
| [
4,
8,
9,
10,
11
] |
1,811 | 3a09cbd71d23b1320af9b8ddcfc65b223e487b21 | <mask token>
| <mask token>
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('final_syn_train.csv', 'r') as zhidao:
reader = csv.reader(zhidao)
cluster = []
cur = []
stand = ''
for line in reader:
if line[1] == stand:
cur.append(line[0])
else:
if cur:
cluster.append(cur)
stand = line[1]
cur = [line[0]]
cluster.append(cur)
for i in range(len(cluster)):
for j in range(len(cluster[i])):
k = random.randint(0, len(cluster[i]) - 1)
writer.writerow([cluster[i][j], cluster[i][k], 1])
m = n = 0
for _ in range(3):
while m == i:
m = random.randint(0, len(cluster) - 1)
n = random.randint(0, len(cluster[m]) - 1)
writer.writerow([cluster[i][j], cluster[m][n], 0])
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('standard.csv', 'r') as standard:
reader = csv.reader(standard)
stand = []
for line in reader:
stand.append(line[0])
with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:
reader = csv.reader(zhidao)
for line in reader:
writer.writerow([line[0], line[1], 1])
for _ in range(3):
k = random.randint(0, 208)
writer.writerow([line[0], stand[k], 0])
| import random
import csv
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('final_syn_train.csv', 'r') as zhidao:
reader = csv.reader(zhidao)
cluster = []
cur = []
stand = ''
for line in reader:
if line[1] == stand:
cur.append(line[0])
else:
if cur:
cluster.append(cur)
stand = line[1]
cur = [line[0]]
cluster.append(cur)
for i in range(len(cluster)):
for j in range(len(cluster[i])):
k = random.randint(0, len(cluster[i]) - 1)
writer.writerow([cluster[i][j], cluster[i][k], 1])
m = n = 0
for _ in range(3):
while m == i:
m = random.randint(0, len(cluster) - 1)
n = random.randint(0, len(cluster[m]) - 1)
writer.writerow([cluster[i][j], cluster[m][n], 0])
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('standard.csv', 'r') as standard:
reader = csv.reader(standard)
stand = []
for line in reader:
stand.append(line[0])
with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:
reader = csv.reader(zhidao)
for line in reader:
writer.writerow([line[0], line[1], 1])
for _ in range(3):
k = random.randint(0, 208)
writer.writerow([line[0], stand[k], 0])
| import random
import csv
# 提取随机问,同类组成正例,异类组成负例,正:负=1:3
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('final_syn_train.csv', 'r') as zhidao:
reader = csv.reader(zhidao)
cluster = []
cur = []
stand = ''
# 将同一标准问的随机问组成一个数组
for line in reader:
if line[1] == stand:
cur.append(line[0])
else:
if cur:
cluster.append(cur)
stand = line[1]
cur = [line[0]]
cluster.append(cur)
# 遍历每个分类中的每个句子,在同类数组中取一条数据组成正例,在异类数组中取3条数据组成反例
for i in range(len(cluster)):
for j in range(len(cluster[i])):
k = random.randint(0, len(cluster[i])-1)
writer.writerow([cluster[i][j], cluster[i][k], 1])
m = n = 0
for _ in range(3):
while m == i:
m = random.randint(0, len(cluster)-1)
n = random.randint(0, len(cluster[m])-1)
writer.writerow([cluster[i][j], cluster[m][n], 0])
# 提取随机问,与正确标准问组成正例,与非正确标准问组成负例,正:负=1:3 (此方法效果更好)
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('standard.csv', 'r') as standard:
reader = csv.reader(standard)
stand = []
for line in reader:
stand.append(line[0])
with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:
reader = csv.reader(zhidao)
for line in reader:
writer.writerow([line[0], line[1], 1])
for _ in range(3):
k = random.randint(0, 208)
writer.writerow([line[0], stand[k], 0]) | null | [
0,
1,
2,
3
] |
1,812 | 707855a4e07b68d9ae97c2e1dc8bfd52f11c314c | <mask token>
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
<mask token>
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
<mask token>
| <mask token>
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
def encoding_doc(token, words):
return token.texts_to_sequences(words)
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
<mask token>
displaySubjectVerbObject(list)
| <mask token>
nlp = spacy.load('en_core_web_sm')
text = input('Enter the text to find the triplet: ')
str = nlp(text)
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
def encoding_doc(token, words):
return token.texts_to_sequences(words)
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
tuple_data = findTriplets(str)
list = creatingLists(tuple_data)
displaySubjectVerbObject(list)
| import nltk
import spacy
import textacy
from keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from nltk import word_tokenize, re
from rasa import model
import pandas as pd
from spacy import lemmatizer
nlp = spacy.load('en_core_web_sm')
text = input('Enter the text to find the triplet: ')
str = nlp(text)
def load_dataset(filename):
df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])
intent = df['Intent']
unique_intent = list(set(intent))
sentences = list(df['Sentence'])
return intent, unique_intent, sentences
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)
w = nltk.word_tokenize(clean)
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):
token = Tokenizer(filters=filters)
token.fit_on_texts(words)
return token
def max_length(words):
return len(max(words, key=len))
def encoding_doc(token, words):
return token.texts_to_sequences(words)
def findTriplets(str):
tuple_data = textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists = list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
tuple_data = findTriplets(str)
list = creatingLists(tuple_data)
displaySubjectVerbObject(list)
| import nltk
import spacy
import textacy
from keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from nltk import word_tokenize, re
from rasa import model
import pandas as pd
from spacy import lemmatizer
nlp = spacy.load('en_core_web_sm')
text=input("Enter the text to find the triplet: ")
str=nlp(text)
def load_dataset(filename):
df = pd.read_csv(filename, encoding="latin1",
names=["Sentence", "Intent"])
intent = df["Intent"]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = nltk.word_tokenize(clean)
# lemmatizing
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words,
filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def max_length(words):
return(len(max(words, key = len)))
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
def findTriplets(str):
tuple_data=textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists=list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
tuple_data=findTriplets(str)
list=creatingLists(tuple_data)
displaySubjectVerbObject(list) | [
7,
9,
10,
11,
12
] |
1,813 | ab5400f4b44a53cb5cc2f6394bcdb8f55fd218f0 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='ClassLevel', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('level', models.CharField(unique=True,
max_length=100))]), migrations.CreateModel(name='CourseRecord',
fields=[('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('Course_Title', models.
CharField(unique=True, max_length=50)), ('Course_Code', models.
CharField(unique=True, max_length=10)), ('Course_Unit', models.
PositiveSmallIntegerField()), ('Semester', models.CharField(choices
=[('First_Semester', 'First_Semester'), ('Second_Semester',
'Second_Semester')], max_length=20, default='Select_Semester')), (
'level', models.ForeignKey(to='Qbank.ClassLevel'))]), migrations.
CreateModel(name='QuestionBank', fields=[('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=
True)), ('CourseTitle', models.CharField(max_length=50)), (
'CourseCode', models.CharField(max_length=10)), ('CourseUnit',
models.IntegerField()), ('Semester', models.CharField(choices=[(
'First_Semester', 'First_Semester'), ('Second_Semester',
'Second_Semester')], max_length=20, default='Select_Semester')), (
'Date', models.DateField()), ('question_papers', models.FileField(
upload_to='QuestionPapers')), ('level', models.ForeignKey(to=
'Qbank.ClassLevel'))]), migrations.CreateModel(name='UserProfile',
fields=[('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('Account_Type', models.
CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length
=1, default='S')), ('Upload_Picture', models.ImageField(upload_to=
'profile_images', blank=True)), ('user', models.OneToOneField(to=
settings.AUTH_USER_MODEL))])]
| from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='ClassLevel', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('level', models.CharField(unique=True,
max_length=100))]), migrations.CreateModel(name='CourseRecord',
fields=[('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('Course_Title', models.
CharField(unique=True, max_length=50)), ('Course_Code', models.
CharField(unique=True, max_length=10)), ('Course_Unit', models.
PositiveSmallIntegerField()), ('Semester', models.CharField(choices
=[('First_Semester', 'First_Semester'), ('Second_Semester',
'Second_Semester')], max_length=20, default='Select_Semester')), (
'level', models.ForeignKey(to='Qbank.ClassLevel'))]), migrations.
CreateModel(name='QuestionBank', fields=[('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=
True)), ('CourseTitle', models.CharField(max_length=50)), (
'CourseCode', models.CharField(max_length=10)), ('CourseUnit',
models.IntegerField()), ('Semester', models.CharField(choices=[(
'First_Semester', 'First_Semester'), ('Second_Semester',
'Second_Semester')], max_length=20, default='Select_Semester')), (
'Date', models.DateField()), ('question_papers', models.FileField(
upload_to='QuestionPapers')), ('level', models.ForeignKey(to=
'Qbank.ClassLevel'))]), migrations.CreateModel(name='UserProfile',
fields=[('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('Account_Type', models.
CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length
=1, default='S')), ('Upload_Picture', models.ImageField(upload_to=
'profile_images', blank=True)), ('user', models.OneToOneField(to=
settings.AUTH_USER_MODEL))])]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ClassLevel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
name='CourseRecord',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Course_Title', models.CharField(unique=True, max_length=50)),
('Course_Code', models.CharField(unique=True, max_length=10)),
('Course_Unit', models.PositiveSmallIntegerField()),
('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),
('level', models.ForeignKey(to='Qbank.ClassLevel')),
],
),
migrations.CreateModel(
name='QuestionBank',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('CourseTitle', models.CharField(max_length=50)),
('CourseCode', models.CharField(max_length=10)),
('CourseUnit', models.IntegerField()),
('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),
('Date', models.DateField()),
('question_papers', models.FileField(upload_to='QuestionPapers')),
('level', models.ForeignKey(to='Qbank.ClassLevel')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Account_Type', models.CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length=1, default='S')),
('Upload_Picture', models.ImageField(upload_to='profile_images', blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
| [
0,
1,
2,
3,
4
] |
1,814 | 2872c86294037b4585158e7ff6db414ba7ab90cc | <mask token>
| <mask token>
class AlertMailModel(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class AlertMailModel(models.Model):
receipient_mail = models.EmailField()
host_mail = models.EmailField()
host_smtpaddress = models.CharField(max_length=25)
mail_host_password = models.CharField(max_length=200)
use_tls = models.BooleanField(default=False)
port = models.CharField(max_length=3, default=25)
| from django.db import models
class AlertMailModel(models.Model):
receipient_mail = models.EmailField()
host_mail = models.EmailField()
host_smtpaddress = models.CharField(max_length=25)
mail_host_password = models.CharField(max_length=200)
use_tls = models.BooleanField(default=False)
port = models.CharField(max_length=3, default=25)
| from django.db import models
# Create your models here.
class AlertMailModel(models.Model):
receipient_mail = models.EmailField()
host_mail = models.EmailField()
host_smtpaddress = models.CharField(max_length=25)
mail_host_password = models.CharField(max_length=200)
use_tls=models.BooleanField(default=False)
port=models.CharField(max_length=3,default=25)
| [
0,
1,
2,
3,
4
] |
1,815 | 07ed8c12e8e5c568c897b6b632c48831267eba51 | <mask token>
| <mask token>
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i - 1][j - 1]
d = dim // 2
if i <= d:
if j <= d:
return z(i, j, d)
else:
j -= d
return t // 4 + z(i, j, d)
elif j <= d:
i -= d
return t // 2 + z(i, j, d)
else:
i -= d
j -= d
return 3 * t // 4 + z(i, j, d)
<mask token>
| <mask token>
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i - 1][j - 1]
d = dim // 2
if i <= d:
if j <= d:
return z(i, j, d)
else:
j -= d
return t // 4 + z(i, j, d)
elif j <= d:
i -= d
return t // 2 + z(i, j, d)
else:
i -= d
j -= d
return 3 * t // 4 + z(i, j, d)
<mask token>
print(z(i, j, dim))
| t_dim_2 = [[1, 2], [3, 4]]
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i - 1][j - 1]
d = dim // 2
if i <= d:
if j <= d:
return z(i, j, d)
else:
j -= d
return t // 4 + z(i, j, d)
elif j <= d:
i -= d
return t // 2 + z(i, j, d)
else:
i -= d
j -= d
return 3 * t // 4 + z(i, j, d)
n = 2
i = 3
j = 3
dim = 2 ** n
print(z(i, j, dim))
|
t_dim_2 = [[1, 2], [3, 4]]
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i-1][j-1]
d = dim//2
if i <= d: # I or II
if j <= d:
return z(i, j, d) #I
else:
j -= d
return t//4 + z(i, j, d) # II
else: # III or IV
if j <=d:
i -= d
return t//2 + z(i, j, d) # III
else:
i -= d
j -= d
return 3*t//4 + z(i, j, d) # IV
n = 2
i = 3
j = 3
dim = 2**n
print(z(i,j,dim)) | [
0,
1,
2,
3,
4
] |
1,816 | d6cfea95c76021bdbfbb4471878c653564c9accd | <mask token>
| <mask token>
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount + 1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page' +
str(i) + '/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/' +
rating + '/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
<mask token>
| <mask token>
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount + 1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page' +
str(i) + '/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/' +
rating + '/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
def getTitlesFromTop(amount, age='daily'):
output = ''
for i in range(1, amount + 1):
try:
html = urllib.request.urlopen('https://habr.com/top/' + age +
'/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
| import urllib.request
from bs4 import BeautifulSoup
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount + 1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page' +
str(i) + '/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/' +
rating + '/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
def getTitlesFromTop(amount, age='daily'):
output = ''
for i in range(1, amount + 1):
try:
html = urllib.request.urlopen('https://habr.com/top/' + age +
'/page' + str(i) + '/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_='post__title_link')
for i in title:
i = i.get_text()
output += '- "' + i + '",\n'
return output
| import urllib.request
from bs4 import BeautifulSoup
def getTitlesFromAll(amount, rating='all'):
output = ''
for i in range(1, amount+1):
try:
if rating == 'all':
html = urllib.request.urlopen('https://habr.com/all/page'+ str(i) +'/').read()
else:
html = urllib.request.urlopen('https://habr.com/all/'+ rating +'/page'+ str(i) +'/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_ = 'post__title_link')
for i in title:
i = i.get_text()
output += ('- "'+i+'",\n')
return output
def getTitlesFromTop(amount, age='daily'):
output = ''
for i in range(1, amount+1):
try:
html = urllib.request.urlopen('https://habr.com/top/'+ age +'/page'+ str(i) +'/').read()
except urllib.error.HTTPError:
print('Error 404 Not Found')
break
soup = BeautifulSoup(html, 'html.parser')
title = soup.find_all('a', class_ = 'post__title_link')
for i in title:
i = i.get_text()
output += ('- "'+i+'",\n')
return output
| [
0,
1,
2,
3,
4
] |
1,817 | 5f13866bd5c6d20e8ddc112fb1d1335e3fd46c3e | <mask token>
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
<mask token>
| <mask token>
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting, data_model):
print('/n', 'body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall('"([^"]*)"', inputString)
for i in data_model['results']:
num = num + 1
if f2[0] in i[2]:
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
<mask token>
| <mask token>
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting, data_model):
print('/n', 'body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall('"([^"]*)"', inputString)
for i in data_model['results']:
num = num + 1
if f2[0] in i[2]:
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
if __name__ == '__main__':
print(
"""
fofa语法
host=".gov.cn"
port="6379"
ip="1.1.1.1"
ip="220.181.111.1/24"
该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng
"""
)
a = input('请输入需要查询的fofa语法:')
main(a)
| import requests
import json
import base64
import re
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting, data_model):
print('/n', 'body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall('"([^"]*)"', inputString)
for i in data_model['results']:
num = num + 1
if f2[0] in i[2]:
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
if __name__ == '__main__':
print(
"""
fofa语法
host=".gov.cn"
port="6379"
ip="1.1.1.1"
ip="220.181.111.1/24"
该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng
"""
)
a = input('请输入需要查询的fofa语法:')
main(a)
| import requests
import json
import base64
import re
def main(targetsrting):
email="" #email
key="" #key
#targetsrting='ip="202.107.117.5/24"' #搜索关键字
target=base64.b64encode(targetsrting.encode('utf-8')).decode("utf-8")
url="https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000".format(email,key,target)
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text) #字符串转换为字典
#print(data_model)
num = 0
for i in data_model["results"]:
num = num +1
if (len(i[2]) > 0) and ('Not Found' not in i[2])&('ERROR' not in i[2])&('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0],i[1],i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if(a == '1'):
body(targetsrting,data_model)
print("fofa查询总共",num,"条数据,以上数据均通过title筛查不输出空值。")
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting,data_model):
print('/n','body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall(r'"([^"]*)"', inputString)
for i in data_model["results"]:
num = num +1
if (f2[0] in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0],i[1],i[2]))
if __name__ == '__main__':
print('''
fofa语法
host=".gov.cn"
port="6379"
ip="1.1.1.1"
ip="220.181.111.1/24"
该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng
''')
a = input("请输入需要查询的fofa语法:")
main(a)
| [
1,
2,
3,
4,
5
] |
1,818 | c1bb2052b3f623c6787ba080dff2dc81f4d6f55e | import pandas as pd
import numpy as np
import json
from pprint import pprint
from shapely.geometry import shape, Point
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
from geopy.exc import GeocoderServiceError
import collections
from matplotlib import pyplot as plt
import time
import csv
geolocator = Nominatim(user_agent='Neel')
def get_neighborhoods():
with open('AnalysisNeighborhoods.geojson') as f:
neighborhoods_obj = json.load(f)
return neighborhoods_obj
def get_point_from_loc(location_str):
location_str = location_str.replace('(', '')
location_str = location_str.replace(')', '')
location_str = location_str.replace(',', '')
lat_lon = location_str.split(' ')
return Point(float(lat_lon[1]), float(lat_lon[0]))
def get_address_from_block(block_addr):
block_addr = block_addr.replace('Block Of', '')
block_addr_split = block_addr.split(' ')
block_addr = block_addr_split
# make it an address instead of block start
#print block_addr
block_addr[0] = str(int(block_addr[0]) + 1)
block_addr = ' '.join(block_addr) + ' San Francisco CA'
return block_addr
# Using latitude longitude location, find the neighborhood the eviction belongs to
def get_neighborhoods_from_locations(evictions, neighborhoods):
num_found = 0
num_total = 0
locations_dict = collections.defaultdict(int)
locations_with_years_dict = collections.defaultdict(lambda: collections.defaultdict(int))
for index, eviction in evictions.iterrows():
point = get_point_from_loc(eviction['Location'])
found_location = False
for feature in neighborhoods['features']:
polygon = shape(feature['geometry'])
if polygon.contains(point):
#print('Found containing polygon:', feature['properties']['nhood']())
num_found += 1
found_location = True
neighborhood = feature['properties']['nhood']
year = int(eviction['File Date'].split('/')[2])
if year > 90: year = year + 1900
else: year = year + 2000
locations_dict[neighborhood] += 1
locations_with_years_dict[neighborhood][str(year)] += 1
break
if not found_location:
print('Location ' + str(eviction['Eviction ID']) + ' not found, Given [location: ' + str(eviction['Neighborhoods - Analysis Boundaries']))
num_total += 1
years = [str(i) for i in range(1997, 2019)]
#years = ['97', '98', '99', '00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']
with open('Evictions_By_Location.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
csv_writer.writerow(['Location', 'Number of Evictions'])
for k, v in locations_dict.items():
csv_writer.writerow([k, v])
with open('Evictions_By_Year_Location.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
header = ['Location']
for year in years:
header.append(year)
csv_writer.writerow(header)
for k, v in locations_with_years_dict.items():
row = [k]
for year in years:
row.append(v[year])
csv_writer.writerow(row)
for k, v in locations_with_years_dict.items():
print k
evictions = [int(v[year]) for year in years]
# plt.figure()
# plt.plot(years, evictions)
plt.title(k)
for year in years:
print year + ': ' + str(v[year])
print ''
# plt.show()
return locations_dict, locations_with_years_dict
def get_geocode_address(addr):
try:
return geolocator.geocode(addr)
except (GeocoderTimedOut, GeocoderServiceError) as e:
time.sleep(5)
return get_geocode_address(addr)
#For rows missing latitude longitude location,
# use the block address to add missing lat long to dataframe
# If the block address is incorrect, print it so we can correct it manually
def set_missing_locations(evictions):
missing_location_rows = evictions[evictions['Location'].isnull()]
print('Num missing ' + str(len(missing_location_rows)))
num_not_found = 0
num_found = 0
for index, row in missing_location_rows.iterrows():
#print row['Eviction ID']
addr = get_address_from_block(row['Address'])
location = get_geocode_address(addr)
if location == None:
num_not_found += 1
print('NOT FOUND ' + str(row['Eviction ID']) + ': ' + addr)
else:
evictions.at[index, 'Location'] = '(' + str(location.latitude) + ', ' + str(location.longitude) + ')'
num_found += 1
if (num_found + num_not_found) % 50 == 0:
print('Processed ' + str(num_found + num_not_found) + ' evictions')
print 'Total not found ' + str(num_not_found)
print 'Total found ' + str(num_found)
evictions.to_csv('Eviction_Notices_With_Locations.csv')
evictions = pd.read_csv('Eviction_Notices_With_Locations.csv')
neighborhoods = get_neighborhoods()
#set_missing_locations(evictions)
locations_dict, locations_with_years_dict = get_neighborhoods_from_locations(evictions, neighborhoods)
with open('AnalysisNeighborhoods.geojson') as f:
data = json.loads(f.read())
years = [i for i in range(1997, 2019)]
for neighborhood_obj in data['features']:
neighborhood_name = neighborhood_obj['properties']['nhood']
neighborhood_obj['properties']['evictions'] = {}
neighborhood_obj['properties']['evictions']['total'] = locations_dict[neighborhood_name]
for year in years:
neighborhood_obj['properties']['evictions'][str(year)] = locations_with_years_dict[neighborhood_name][year]
with open('AnalysisNeighborhoods.geojson', 'w') as f:
json.dump(data, f) | null | null | null | null | [
0
] |
1,819 | 1b4c86fe3aae25aeec6cd75fa8177983ce9d14a2 | #!/usr/bin/python2.7
import os, sys
COMPILER = "gcc"
SRC_DIR = "../src"
INCLUDE_DIR = "../src"
BIN_DIR = "../bin"
BIN_NAME = False
CFLAGS = ["-O3", "-Wall", "-Wextra", "--std=c89", "-pedantic"]
DLIBS = ["ws2_32"] if os.name == "nt" else []
DEFINES = []
def strformat(fmt, var):
for k in var:
fmt = fmt.replace("{%s}" % str(k), var[k])
return fmt
def listdir(path):
return [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
def main():
os.chdir(sys.path[0])
if len(sys.argv) < 2:
print "usage: build.py c_file"
sys.exit()
global BIN_NAME
if not BIN_NAME:
BIN_NAME = sys.argv[1].replace(".c", ".exe" if os.name == "nt" else "")
if not os.path.exists(BIN_DIR):
os.makedirs(BIN_DIR)
cfiles = filter(lambda x:x.endswith((".c", ".C")), listdir(SRC_DIR))
cfiles.append(sys.argv[1])
cmd = strformat(
"{compiler} {flags} {include} {def} -o {outfile} {srcfiles} {libs} {argv}",
{
"compiler" : COMPILER,
"flags" : " ".join(CFLAGS),
"include" : "-I" + INCLUDE_DIR,
"def" : " ".join(map(lambda x: "-D " + x, DEFINES)),
"outfile" : BIN_DIR + "/" + BIN_NAME,
"srcfiles" : " ".join(cfiles),
"libs" : " ".join(map(lambda x: "-l" + x, DLIBS)),
"argv" : " ".join(sys.argv[2:])
})
print "compiling..."
res = os.system(cmd)
if not res:
print(BIN_DIR + "/" + BIN_NAME)
print("done" + (" with errors" if res else ""))
if __name__ == "__main__":
main()
| null | null | null | null | [
0
] |
1,820 | 1aed8e92a31ee42a3a609123af927f7074598ec1 | <mask token>
| class Solution(object):
<mask token>
<mask token>
| class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) == 0:
return ''
if len(strs) == 1:
return strs
res = []
min_ = strs[0]
for i in range(len(strs)):
if min_ > strs[i]:
min_ = strs[i]
for i in range(len(min_)):
count = 0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count += 1
if count == len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
<mask token>
| class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) == 0:
return ''
if len(strs) == 1:
return strs
res = []
min_ = strs[0]
for i in range(len(strs)):
if min_ > strs[i]:
min_ = strs[i]
for i in range(len(min_)):
count = 0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count += 1
if count == len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
if __name__ == '__main__':
a = ['abc', 'abcc', 'asc', 'abcd']
b = ['c', 'c']
print(Solution().longestCommonPrefix(b))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/14 下午6:06
# @Author : Huang HUi
# @Site :
# @File : Longest Common Prefix.py
# @Software: PyCharm
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ''
if len(strs)==1 :
return strs
res=[]
min_=strs[0]
for i in range(len(strs)):
if min_>strs[i]:
min_=strs[i]
for i in range(len(min_)):
count=0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count+=1
if count==len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
if __name__ == '__main__':
a=["abc","abcc","asc","abcd"]
b=["c","c"]
print(Solution().longestCommonPrefix(b))
| [
0,
1,
2,
3,
4
] |
1,821 | 63edbbbad9561ddae005d2b5e22a089819dc34c5 | <mask token>
class Agent(object):
<mask token>
def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,
traceDecay=0.3):
possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001
self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for
x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)
)
self.epsilon = epsilon
self.q = q
self.shortMemory = []
self.memorySize = 1
self.traceDecay = traceDecay
self.replayMemory = []
self.replayMemorySize = int(10000.0)
self.discount = discount
self.learningRate = learningRate
return
<mask token>
def bestAction(self, state, isTensor=False):
"""returns best action and it's rating"""
if not isTensor:
allActions = torch.stack(tuple(torch.cat((state.strengths,
state.focus, changes)) for changes in self.actionSet))
else:
allActions = torch.stack(tuple(torch.cat((state, changes)) for
changes in self.actionSet))
allValues = self.q.evaluateBunch(allActions)
bestIndex = allValues.argmax()
bestAction = allActions[bestIndex, -2:]
return bestAction, allValues[bestIndex]
def remember(self, transition):
"""place a transition in the memory"""
for memory in self.shortMemory:
memory *= self.traceDecay * self.discount
if len(self.shortMemory) < self.memorySize:
self.shortMemory.append(transition)
else:
del self.shortMemory[0]
self.shortMemory.append(transition)
return
def getShortMemory(self):
return self.shortMemory
<mask token>
def learn(self, netInput, labels):
"""train Q-function"""
self.q.trainer.applyUpdate(netInput, labels)
return
def getSarsaLambda(self, shortMemory):
"""generate TD lambda update targets from short memory"""
delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self
.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(
shortMemory[-1].action)
netInput = []
for memory in shortMemory:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in shortMemory:
labels.append(self.learningRate * delta * memory.action.eligibility
)
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput, labels
def getDQN(self, shortMemory):
"""generates DQN update targets from short memory"""
sampleSize = 1
if len(shortMemory) < sampleSize:
sample = shortMemory
else:
sample = random.sample(shortMemory, sampleSize)
netInput = []
for memory in sample:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in sample:
if memory.nextState:
labels.append(memory.reward)
else:
currentQ = self.q.evaluate(memory.action)
labels.append(currentQ + self.learningRate * (self.discount *
self.q.evaluateMax(memory.nextState, self.actionSet) -
currentQ))
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput.float(), labels.float()
| <mask token>
class Agent(object):
<mask token>
def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,
traceDecay=0.3):
possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001
self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for
x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)
)
self.epsilon = epsilon
self.q = q
self.shortMemory = []
self.memorySize = 1
self.traceDecay = traceDecay
self.replayMemory = []
self.replayMemorySize = int(10000.0)
self.discount = discount
self.learningRate = learningRate
return
def takeAction(self, state):
"""take an action according to current state"""
if random.uniform(0, 1) < self.epsilon:
allActions = torch.stack(tuple(torch.cat((state.strengths,
state.focus, changes)) for changes in self.actionSet))
evaluation = self.q.evaluateBunch(allActions)
action = Action(state, self.actionSet[evaluation.argmax()])
return action
else:
return Action(state, random.choice(self.actionSet))
def bestAction(self, state, isTensor=False):
"""returns best action and it's rating"""
if not isTensor:
allActions = torch.stack(tuple(torch.cat((state.strengths,
state.focus, changes)) for changes in self.actionSet))
else:
allActions = torch.stack(tuple(torch.cat((state, changes)) for
changes in self.actionSet))
allValues = self.q.evaluateBunch(allActions)
bestIndex = allValues.argmax()
bestAction = allActions[bestIndex, -2:]
return bestAction, allValues[bestIndex]
def remember(self, transition):
"""place a transition in the memory"""
for memory in self.shortMemory:
memory *= self.traceDecay * self.discount
if len(self.shortMemory) < self.memorySize:
self.shortMemory.append(transition)
else:
del self.shortMemory[0]
self.shortMemory.append(transition)
return
def getShortMemory(self):
return self.shortMemory
<mask token>
def learn(self, netInput, labels):
"""train Q-function"""
self.q.trainer.applyUpdate(netInput, labels)
return
def getSarsaLambda(self, shortMemory):
"""generate TD lambda update targets from short memory"""
delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self
.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(
shortMemory[-1].action)
netInput = []
for memory in shortMemory:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in shortMemory:
labels.append(self.learningRate * delta * memory.action.eligibility
)
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput, labels
def getDQN(self, shortMemory):
"""generates DQN update targets from short memory"""
sampleSize = 1
if len(shortMemory) < sampleSize:
sample = shortMemory
else:
sample = random.sample(shortMemory, sampleSize)
netInput = []
for memory in sample:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in sample:
if memory.nextState:
labels.append(memory.reward)
else:
currentQ = self.q.evaluate(memory.action)
labels.append(currentQ + self.learningRate * (self.discount *
self.q.evaluateMax(memory.nextState, self.actionSet) -
currentQ))
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput.float(), labels.float()
| <mask token>
class Agent(object):
<mask token>
def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,
traceDecay=0.3):
possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001
self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for
x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)
)
self.epsilon = epsilon
self.q = q
self.shortMemory = []
self.memorySize = 1
self.traceDecay = traceDecay
self.replayMemory = []
self.replayMemorySize = int(10000.0)
self.discount = discount
self.learningRate = learningRate
return
def takeAction(self, state):
"""take an action according to current state"""
if random.uniform(0, 1) < self.epsilon:
allActions = torch.stack(tuple(torch.cat((state.strengths,
state.focus, changes)) for changes in self.actionSet))
evaluation = self.q.evaluateBunch(allActions)
action = Action(state, self.actionSet[evaluation.argmax()])
return action
else:
return Action(state, random.choice(self.actionSet))
def bestAction(self, state, isTensor=False):
"""returns best action and it's rating"""
if not isTensor:
allActions = torch.stack(tuple(torch.cat((state.strengths,
state.focus, changes)) for changes in self.actionSet))
else:
allActions = torch.stack(tuple(torch.cat((state, changes)) for
changes in self.actionSet))
allValues = self.q.evaluateBunch(allActions)
bestIndex = allValues.argmax()
bestAction = allActions[bestIndex, -2:]
return bestAction, allValues[bestIndex]
def remember(self, transition):
"""place a transition in the memory"""
for memory in self.shortMemory:
memory *= self.traceDecay * self.discount
if len(self.shortMemory) < self.memorySize:
self.shortMemory.append(transition)
else:
del self.shortMemory[0]
self.shortMemory.append(transition)
return
def getShortMemory(self):
return self.shortMemory
def wipeShortMemory(self):
"""wipe all recent experience"""
self.shortMemory = []
return
def learn(self, netInput, labels):
"""train Q-function"""
self.q.trainer.applyUpdate(netInput, labels)
return
def getSarsaLambda(self, shortMemory):
"""generate TD lambda update targets from short memory"""
delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self
.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(
shortMemory[-1].action)
netInput = []
for memory in shortMemory:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in shortMemory:
labels.append(self.learningRate * delta * memory.action.eligibility
)
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput, labels
def getDQN(self, shortMemory):
"""generates DQN update targets from short memory"""
sampleSize = 1
if len(shortMemory) < sampleSize:
sample = shortMemory
else:
sample = random.sample(shortMemory, sampleSize)
netInput = []
for memory in sample:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in sample:
if memory.nextState:
labels.append(memory.reward)
else:
currentQ = self.q.evaluate(memory.action)
labels.append(currentQ + self.learningRate * (self.discount *
self.q.evaluateMax(memory.nextState, self.actionSet) -
currentQ))
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput.float(), labels.float()
| import torch
import random
from itertools import product
from Struct import Action
class Agent(object):
"""the agent"""
def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,
traceDecay=0.3):
possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001
self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for
x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)
)
self.epsilon = epsilon
self.q = q
self.shortMemory = []
self.memorySize = 1
self.traceDecay = traceDecay
self.replayMemory = []
self.replayMemorySize = int(10000.0)
self.discount = discount
self.learningRate = learningRate
return
def takeAction(self, state):
"""take an action according to current state"""
if random.uniform(0, 1) < self.epsilon:
allActions = torch.stack(tuple(torch.cat((state.strengths,
state.focus, changes)) for changes in self.actionSet))
evaluation = self.q.evaluateBunch(allActions)
action = Action(state, self.actionSet[evaluation.argmax()])
return action
else:
return Action(state, random.choice(self.actionSet))
def bestAction(self, state, isTensor=False):
"""returns best action and it's rating"""
if not isTensor:
allActions = torch.stack(tuple(torch.cat((state.strengths,
state.focus, changes)) for changes in self.actionSet))
else:
allActions = torch.stack(tuple(torch.cat((state, changes)) for
changes in self.actionSet))
allValues = self.q.evaluateBunch(allActions)
bestIndex = allValues.argmax()
bestAction = allActions[bestIndex, -2:]
return bestAction, allValues[bestIndex]
def remember(self, transition):
"""place a transition in the memory"""
for memory in self.shortMemory:
memory *= self.traceDecay * self.discount
if len(self.shortMemory) < self.memorySize:
self.shortMemory.append(transition)
else:
del self.shortMemory[0]
self.shortMemory.append(transition)
return
def getShortMemory(self):
return self.shortMemory
def wipeShortMemory(self):
"""wipe all recent experience"""
self.shortMemory = []
return
def learn(self, netInput, labels):
"""train Q-function"""
self.q.trainer.applyUpdate(netInput, labels)
return
def getSarsaLambda(self, shortMemory):
"""generate TD lambda update targets from short memory"""
delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self
.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(
shortMemory[-1].action)
netInput = []
for memory in shortMemory:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in shortMemory:
labels.append(self.learningRate * delta * memory.action.eligibility
)
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput, labels
def getDQN(self, shortMemory):
"""generates DQN update targets from short memory"""
sampleSize = 1
if len(shortMemory) < sampleSize:
sample = shortMemory
else:
sample = random.sample(shortMemory, sampleSize)
netInput = []
for memory in sample:
netInput.append(torch.cat((memory.action.state.strengths,
memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
labels = []
for memory in sample:
if memory.nextState:
labels.append(memory.reward)
else:
currentQ = self.q.evaluate(memory.action)
labels.append(currentQ + self.learningRate * (self.discount *
self.q.evaluateMax(memory.nextState, self.actionSet) -
currentQ))
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput.float(), labels.float()
| import torch
import random
from itertools import product
from Struct import Action
class Agent(object):
"""the agent"""
def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5, traceDecay=0.3):
# action set
possibleChangesPerMagnet = (1e-2, 1e-3, 0, -1e-2, -1e-3)
# possibleChangesPerMagnet = (0, -1e-2, -1e-3)
self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for x, y in
product(possibleChangesPerMagnet, possibleChangesPerMagnet))
# probability to act greedy
self.epsilon = epsilon
# Q-function
self.q = q
# memory
self.shortMemory = []
self.memorySize = 1
self.traceDecay = traceDecay
self.replayMemory = []
self.replayMemorySize = int(1e4)
# learning
self.discount = discount
self.learningRate = learningRate
return
def takeAction(self, state):
"""take an action according to current state"""
# go greedy or not?
if random.uniform(0, 1) < self.epsilon:
# greedy selection
# find best action
allActions = torch.stack(
tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))
evaluation = self.q.evaluateBunch(allActions)
action = Action(state, self.actionSet[evaluation.argmax()])
return action
else:
# random selection
return Action(state, random.choice(self.actionSet))
def bestAction(self, state, isTensor=False):
"""returns best action and it's rating"""
# get value for every possible action
if not isTensor:
allActions = torch.stack(
tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))
else:
allActions = torch.stack(
tuple(torch.cat((state, changes)) for changes in self.actionSet))
allValues = self.q.evaluateBunch(allActions)
# determine index of highest value
bestIndex = allValues.argmax()
# get best action
bestAction = allActions[bestIndex, -2:]
return bestAction, allValues[bestIndex]
def remember(self, transition):
"""place a transition in the memory"""
# reduce eligibility for old memories
for memory in self.shortMemory:
memory *= self.traceDecay * self.discount
# add new memory
if len(self.shortMemory) < self.memorySize:
self.shortMemory.append(transition)
else:
del self.shortMemory[0]
self.shortMemory.append(transition)
return
def getShortMemory(self):
return self.shortMemory
def wipeShortMemory(self):
"""wipe all recent experience"""
self.shortMemory = []
return
def learn(self, netInput, labels):
"""train Q-function"""
self.q.trainer.applyUpdate(netInput, labels)
return
def getSarsaLambda(self, shortMemory):
"""generate TD lambda update targets from short memory"""
# get temporal difference error
delta = shortMemory[-1].reward + self.discount * self.q.evaluate(
self.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(shortMemory[-1].action)
# states
netInput = []
for memory in shortMemory:
netInput.append(
torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
# updates for every state in memory with respect to its eligibility
labels = []
for memory in shortMemory:
labels.append(self.learningRate * delta * memory.action.eligibility)
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput, labels
def getDQN(self, shortMemory):
"""generates DQN update targets from short memory"""
# sampleSize = self.memorySize // 5 # use only with traces (= short memory larger than 5 entries)
sampleSize = 1
if len(shortMemory) < sampleSize:
sample = shortMemory
else:
sample = random.sample(shortMemory, sampleSize)
# states
netInput = []
for memory in sample:
netInput.append(
torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
# updates for Q-values
labels = []
for memory in sample:
if memory.nextState:
labels.append(memory.reward)
else:
currentQ = self.q.evaluate(memory.action)
labels.append(currentQ + self.learningRate * (
self.discount * self.q.evaluateMax(memory.nextState, self.actionSet) - currentQ))
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput.float(), labels.float() # casting added due to occasional occurrence of LongTensors <- why?
| [
8,
9,
10,
12,
13
] |
1,822 | 41842e8b75860c65e87e9db1f7ae058957e37e45 | <mask token>
| <mask token>
load_dotenv()
<mask token>
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'wow.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'THICC.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'barks.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'Welcome.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'grapefruit.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'hello.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'winning.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'basingstoke.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'milleb.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'jew.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'here.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'bwekfast.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run(TOKEN)
| <mask token>
load_dotenv()
TOKEN = os.getenv('TOKEN')
bot = commands.Bot(command_prefix='.', case_insensitive=True)
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'wow.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'THICC.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'barks.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'Welcome.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'grapefruit.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'hello.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'winning.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'basingstoke.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'milleb.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'jew.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'here.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'bwekfast.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run(TOKEN)
| import discord
from discord.ext import commands
import asyncio
import glob
from dotenv import load_dotenv
import os
load_dotenv()
TOKEN = os.getenv('TOKEN')
bot = commands.Bot(command_prefix='.', case_insensitive=True)
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'wow.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'THICC.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'barks.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'Welcome.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'grapefruit.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'hello.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'winning.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'basingstoke.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'milleb.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'jew.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'here.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'bwekfast.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run(TOKEN)
| #Import Discord Package
import discord
from discord.ext import commands
import asyncio
import glob
from dotenv import load_dotenv
import os
load_dotenv() # Load your Discord Token
TOKEN = os.getenv("TOKEN")
bot = commands.Bot(command_prefix='.',case_insensitive=True)
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("wow.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("THICC.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("barks.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("Welcome.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("grapefruit.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("hello.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("winning.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("basingstoke.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("milleb.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("jew.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("here.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("bwekfast.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run (TOKEN)
| [
0,
1,
2,
3,
4
] |
1,823 | e3e6f1b6580a223558791cebfcb1a92d45553162 | <mask token>
| <mask token>
def digital_root(n):
if n < 10:
return n
return digital_root(digital_sum(n))
| def digital_sum(n):
if n < 10:
return n
return n % 10 + digital_sum(n // 10)
def digital_root(n):
if n < 10:
return n
return digital_root(digital_sum(n))
| null | null | [
0,
1,
2
] |
1,824 | c5ac37ce09f7cd76ccd9b93c64e602209a04c55c | <mask token>
| <mask token>
file.write(response.content)
file.close()
<mask token>
bot.start()
run()
| <mask token>
response = requests.get(BG_IMAGE)
file = open('./etc/tg_vc_bot.jpg', 'wb')
file.write(response.content)
file.close()
bot = Bot(':memory:', API_ID, API_HASH, bot_token=BOT_TOKEN, plugins=dict(
root='samantha.modules'))
bot.start()
run()
| import requests
from pyrogram import Client as Bot
from samantha.config import API_HASH, API_ID, BG_IMAGE, BOT_TOKEN
from samantha.services.callsmusic import run
response = requests.get(BG_IMAGE)
file = open('./etc/tg_vc_bot.jpg', 'wb')
file.write(response.content)
file.close()
bot = Bot(':memory:', API_ID, API_HASH, bot_token=BOT_TOKEN, plugins=dict(
root='samantha.modules'))
bot.start()
run()
| import requests
from pyrogram import Client as Bot
from samantha.config import API_HASH, API_ID, BG_IMAGE, BOT_TOKEN
from samantha.services.callsmusic import run
response = requests.get(BG_IMAGE)
file = open("./etc/tg_vc_bot.jpg", "wb")
file.write(response.content)
file.close()
bot = Bot(
":memory:",
API_ID,
API_HASH,
bot_token=BOT_TOKEN,
plugins=dict(root="samantha.modules"),
)
bot.start()
run()
| [
0,
1,
2,
3,
4
] |
1,825 | f4c90a6d6afdcf78ec6742b1924a5c854a5a4ed6 | <mask token>
| <mask token>
def take_shot(filename):
os.system('screencapture ' + filename + '.png')
| import os
def take_shot(filename):
os.system('screencapture ' + filename + '.png')
| import os
def take_shot(filename):
os.system("screencapture "+filename+".png")
| null | [
0,
1,
2,
3
] |
1,826 | 1e41cc5d2661f1fb4f3a356318fabcb2b742cbdf | <mask token>
@app.route('/')
def interactive_input():
return render_template('main.html')
@app.route('/food_1_star')
def food_1_star():
return render_template('food_1.html')
<mask token>
@app.route('/general_5_star')
def general_5_star():
return render_template('general_5.html')
@app.route('/food_1')
def food_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_1_star_large')
my_prediction = gpt2.generate(sess, run_name='food_1_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/food_5')
def food_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_5_star_large')
my_prediction = gpt2.generate(sess, run_name='food_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
<mask token>
@app.route('/general_5')
def general_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_5_star_large')
my_prediction = gpt2.generate(sess, run_name='general_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
<mask token>
| <mask token>
@app.route('/')
def interactive_input():
return render_template('main.html')
@app.route('/food_1_star')
def food_1_star():
return render_template('food_1.html')
<mask token>
@app.route('/general_5_star')
def general_5_star():
return render_template('general_5.html')
@app.route('/food_1')
def food_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_1_star_large')
my_prediction = gpt2.generate(sess, run_name='food_1_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/food_5')
def food_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_5_star_large')
my_prediction = gpt2.generate(sess, run_name='food_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/general_1')
def general_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_1_star_large')
my_prediction = gpt2.generate(sess, run_name='general_1_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/general_5')
def general_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_5_star_large')
my_prediction = gpt2.generate(sess, run_name='general_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
<mask token>
| <mask token>
@app.route('/')
def interactive_input():
return render_template('main.html')
@app.route('/food_1_star')
def food_1_star():
return render_template('food_1.html')
@app.route('/food_5_star')
def food_5_star():
return render_template('food_5.html')
@app.route('/general_1_star')
def general_1_star():
return render_template('general_1.html')
@app.route('/general_5_star')
def general_5_star():
return render_template('general_5.html')
@app.route('/food_1')
def food_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_1_star_large')
my_prediction = gpt2.generate(sess, run_name='food_1_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/food_5')
def food_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_5_star_large')
my_prediction = gpt2.generate(sess, run_name='food_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/general_1')
def general_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_1_star_large')
my_prediction = gpt2.generate(sess, run_name='general_1_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/general_5')
def general_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_5_star_large')
my_prediction = gpt2.generate(sess, run_name='general_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
<mask token>
| <mask token>
app = Flask(__name__)
Bootstrap(app)
@app.route('/')
def interactive_input():
return render_template('main.html')
@app.route('/food_1_star')
def food_1_star():
return render_template('food_1.html')
@app.route('/food_5_star')
def food_5_star():
return render_template('food_5.html')
@app.route('/general_1_star')
def general_1_star():
return render_template('general_1.html')
@app.route('/general_5_star')
def general_5_star():
return render_template('general_5.html')
@app.route('/food_1')
def food_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_1_star_large')
my_prediction = gpt2.generate(sess, run_name='food_1_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/food_5')
def food_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_5_star_large')
my_prediction = gpt2.generate(sess, run_name='food_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/general_1')
def general_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_1_star_large')
my_prediction = gpt2.generate(sess, run_name='general_1_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
@app.route('/general_5')
def general_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_5_star_large')
my_prediction = gpt2.generate(sess, run_name='general_5_star_large',
temperature=complexity, length=15, prefix=lang, sample_delim=
'<|endoftext|>', include_prefix=False, nsamples=3,
return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1=res1, result2=res2, result3=res3)
except Exception as e:
return str(e)
if __name__ == '__main__':
app.run(debug=True)
| from flask import Flask,Response,render_template,url_for,request,jsonify
from flask_bootstrap import Bootstrap
import pandas as pd
import gpt_2_simple as gpt2
import json
app = Flask(__name__)
Bootstrap(app)
#Main Page
@app.route('/')
def interactive_input():
return render_template('main.html')
#Creating the different routes
@app.route('/food_1_star')
def food_1_star():
return render_template('food_1.html')
@app.route('/food_5_star')
def food_5_star():
return render_template('food_5.html')
@app.route('/general_1_star')
def general_1_star():
return render_template('general_1.html')
@app.route('/general_5_star')
def general_5_star():
return render_template('general_5.html')
#Reactive function that will enable the code to run
@app.route('/food_1')
def food_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_1_star_large')
my_prediction = gpt2.generate(sess, run_name= 'food_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
#Reactive function that will enable the code to run
@app.route('/food_5')
def food_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_5_star_large')
my_prediction = gpt2.generate(sess, run_name= 'food_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
#Reactive function that will enable the code to run
@app.route('/general_1')
def general_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_1_star_large')
my_prediction = gpt2.generate(sess, run_name= 'general_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
#Reactive function that will enable the code to run
@app.route('/general_5')
def general_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_5_star_large')
my_prediction = gpt2.generate(sess, run_name= 'general_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
if __name__ == '__main__':
app.run(debug=True) | [
6,
7,
9,
11,
13
] |
1,827 | fab7ee8a7336ba2c044adce4cc8483af78b775ba | <mask token>
| <mask token>
setup(name='RBM', version='0.0.1', description=
'Restricted Boltzmann Machines', long_description='README',
install_requires=['numpy', 'pandas'])
| from distutils.core import setup
setup(name='RBM', version='0.0.1', description=
'Restricted Boltzmann Machines', long_description='README',
install_requires=['numpy', 'pandas'])
| #!/usr/bin/env python
from distutils.core import setup
setup(
name='RBM',
version='0.0.1',
description='Restricted Boltzmann Machines',
long_description='README',
install_requires=['numpy','pandas'],
)
| null | [
0,
1,
2,
3
] |
1,828 | 3329db63552592aabb751348efc5d983f2cc3f36 | <mask token>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
<mask token>
<mask token>
def esposible(self):
return self._mi_modelo.possiblesave
<mask token>
| <mask token>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
def filtrar(self, ch, tr, tw, tt):
senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)
return senal, senalfiltrada
<mask token>
def esposible(self):
return self._mi_modelo.possiblesave
<mask token>
| <mask token>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
def filtrar(self, ch, tr, tw, tt):
senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)
return senal, senalfiltrada
def guardarfil(self, ch, archivo):
self._mi_modelo.guardarfil(ch, archivo)
def esposible(self):
return self._mi_modelo.possiblesave
<mask token>
| <mask token>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
def filtrar(self, ch, tr, tw, tt):
senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)
return senal, senalfiltrada
def guardarfil(self, ch, archivo):
self._mi_modelo.guardarfil(ch, archivo)
def esposible(self):
return self._mi_modelo.possiblesave
if __name__ == '__main__':
app = QApplication(sys.argv)
mi_vista = Ventanainicio()
mi_modelo = ventanadentrada()
mi_2vista = dosventana()
mi_controlador = Controlador(mi_vista, mi_modelo, mi_2vista)
mi_vista.asignarcontrolador(mi_controlador)
mi_vista.show()
app.exec_()
if mi_modelo.changepage == 1:
mi_2vista.asignarcontrolador(mi_controlador)
mi_2vista.show()
sys.exit(app.exec_())
| # -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 09:46:47 2020
@author: Carlos Jose Munoz
"""
# se importa el modelo y vista para que sesten comunicados por medio del controlador
from Modelo import ventanadentrada
from Vista import Ventanainicio,dosventana
import sys
from PyQt5.QtWidgets import QApplication
class Controlador(object): #objeto que va a recibir los comandos de la interfaz para enviarselos al modelo y desarrollar la accion necesaria
def __init__(self, vista,modelo,vista2):
self._mi_vista=vista #atributo para la apertura de la primera ventana
self._mi_modelo= modelo #apertura del modelo
self._mi2_ventana=vista2 #apertura de la segunda ventana
def recibirruta(self, r): #recibe la ruta del archivo y se la pasa al modelo
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile): #recibe el tipo de archivo para poder hacer el cambio de ventana
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self,l):#devuelve los valores iniciales de tiempo segun el tipo de señal
mini, maxi=self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self,ch,m,mx): #retorna la señal (todos o un solo canal) y los valores de tiempo que se vana graficar
senal= self._mi_modelo.graph(ch,m,mx)
return senal
def filtrar(self,ch,tr,tw,tt): #retorna la señal (canal) original y la señal filtrada que devuelve el modelo dependiendo del tipo del filtro
senal, senalfiltrada= self._mi_modelo.filtrar(ch,tr,tw,tt)
return senal, senalfiltrada
def guardarfil(self,ch,archivo): #recibe la ruta, nombre de archivo y canal para guardar la señal filtrada
self._mi_modelo.guardarfil(ch,archivo)
def esposible(self): #habilita el botón de guardar señal filtrada
return self._mi_modelo.possiblesave
if __name__ == '__main__': #inicio del programa, es el programa principal que se corre
app=QApplication(sys.argv)
mi_vista=Ventanainicio(); #objeto asociado a la ventana inicial
mi_modelo=ventanadentrada();# objeto asociado al modelo
mi_2vista=dosventana(); #objeto asociado a la ventana de visualizacion
mi_controlador= Controlador(mi_vista,mi_modelo,mi_2vista)# objeto que enlaza las ventanas con los modelos
#asignarle el controlador a la vista
mi_vista.asignarcontrolador(mi_controlador) #se usa para realizar el enlace entre la vista y el controlador
mi_vista.show() #genera la ventana inicial
app.exec_();
if (mi_modelo.changepage==1): #si es posible pasar a la segunda ventana se genera la ventana secundaria
mi_2vista.asignarcontrolador(mi_controlador)
mi_2vista.show();
sys.exit(app.exec_());
| [
7,
8,
9,
10,
12
] |
1,829 | 05573b4ff68ca8638f8e13946b410df2a012840a | <mask token>
| <mask token>
def song_inference():
sp_total_model_path = 'sp_total'
train = pd.read_json('./dataset/train.json', typ='frame', encoding='utf-8')
song = pd.read_json('./dataset/song_meta.json', typ='frame', encoding=
'utf-8')
plylst_tag = train['tags']
tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])
tag_dict = {x: tag_counter[x] for x in tag_counter}
tag_id_tid = dict()
tag_tid_id = dict()
for i, t in enumerate(tag_dict):
tag_id_tid[t] = i
tag_tid_id[i] = t
n_tags = len(tag_dict)
plylst_song = train['songs']
song_dict = {x: x for x in song['id']}
n_songs = len(song_dict)
train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
song_cate = []
for i in range(len(train)):
gnr = []
songs = train.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
train['plylst_genre'] = song_cate
plylst_genre = train['plylst_genre']
genre_counter = Counter([gen for genre in plylst_genre for gen in genre])
genre_dict = {x: genre_counter[x] for x in genre_counter}
genre_id_tid = dict()
genre_tid_id = dict()
for i, t in enumerate(genre_dict):
genre_id_tid[t] = i
genre_tid_id[i] = t
n_genre = len(genre_dict)
train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
gnr_array = np.zeros((len(train), n_genre))
for i, index in enumerate(train.index):
if i % 10000 == 0:
print(i)
counter = Counter(train.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_array[i][k] = c
gnr_array.shape
song['issue_date'] = song['issue_date'].astype('str').map(lambda x: x[:6])
plylst_use = train[['plylst_title', 'updt_date', 'tags_id', 'songs']]
plylst_use.loc[:, 'num_songs'] = plylst_use['songs'].map(len)
plylst_use.loc[:, 'num_tags'] = plylst_use['tags_id'].map(len)
plylst_train = plylst_use
n_train = len(plylst_train)
row = np.repeat(range(n_train), plylst_train['num_songs'])
col = [song for songs in plylst_train['songs'] for song in songs]
dat = np.repeat(1, plylst_train['num_songs'].sum())
train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,
n_songs))
row = np.repeat(range(n_train), plylst_train['num_tags'])
col = [tag for tags in plylst_train['tags_id'] for tag in tags]
dat = np.repeat(1, plylst_train['num_tags'].sum())
train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,
n_tags))
train_user_songs_A_T = train_user_songs_A.T.tocsr()
train_user_songs_A_T
train_user_tags_A_T = train_user_tags_A.T.tocsr()
train_user_tags_A_T
val = pd.read_json('./dataset/val.json', typ='frame', encoding='utf-8')
song_cate = []
for i in range(len(val)):
gnr = []
songs = val.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
val['plylst_genre'] = song_cate
val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
val.loc[:, 'num_songs'] = val['songs'].map(len)
val.loc[:, 'num_tags'] = val['tags_id'].map(len)
gnr_val = np.zeros((len(val), n_genre))
for i, index in enumerate(val.index):
if i % 10000 == 0:
print(i)
counter = Counter(val.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_val[i][k] = c
gnr_val.shape
n_val = len(val)
row = np.repeat(range(n_val), val['num_songs'])
col = [song for songs in val['songs'] for song in songs]
dat = np.repeat(1, val['num_songs'].sum())
val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)
)
row = np.repeat(range(n_val), val['num_tags'])
col = [tag for tags in val['tags_id'] for tag in tags]
dat = np.repeat(1, val['num_tags'].sum())
val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))
val_user_songs_A_T = val_user_songs_A.T.tocsr()
val_user_tags_A_T = val_user_tags_A.T.tocsr()
test = pd.read_json('./dataset/test.json', typ='frame', encoding='utf-8')
song_cate = []
for i in range(len(test)):
gnr = []
songs = test.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
test['plylst_genre'] = song_cate
test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
test.loc[:, 'num_songs'] = test['songs'].map(len)
test.loc[:, 'num_tags'] = test['tags_id'].map(len)
gnr_test = np.zeros((len(test), n_genre))
for i, index in enumerate(test.index):
if i % 10000 == 0:
print(i)
counter = Counter(test.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_test[i][k] = c
gnr_test.shape
n_test = len(test)
row = np.repeat(range(n_test), test['num_songs'])
col = [song for songs in test['songs'] for song in songs]
dat = np.repeat(1, test['num_songs'].sum())
test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test,
n_songs))
row = np.repeat(range(n_test), test['num_tags'])
col = [tag for tags in test['tags_id'] for tag in tags]
dat = np.repeat(1, test['num_tags'].sum())
test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags)
)
test_user_songs_A_T = test_user_songs_A.T.tocsr()
test_user_tags_A_T = test_user_tags_A.T.tocsr()
data_all = pd.concat([train, val, test])
data_all.index = range(len(data_all))
arts = song['artist_id_basket'].map(lambda x: x[0])
arts = pd.DataFrame(arts)
art_counts = arts['artist_id_basket'].value_counts().reset_index()
art_counts.columns = ['artist_id_basket', 'counts']
arts2 = pd.merge(arts, art_counts, how='left', on=['artist_id_basket'])
song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]
song_art = song_art[['artist_id_basket']]
ART_cate = []
for i in tqdm_notebook(range(len(data_all))):
ART = []
songs = data_all.loc[i, 'songs']
for j in songs:
if j in song_art.index:
for k in song_art.loc[j, 'artist_id_basket']:
ART.append(k)
ART_cate.append(ART)
data_all['plylst_ARTIST'] = ART_cate
plylst_ARTIST = data_all['plylst_ARTIST']
ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in
ARTIST])
ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}
ARTIST_id_tid = dict()
ARTIST_tid_id = dict()
for i, t in enumerate(ARTIST_dict):
ARTIST_id_tid[t] = i
ARTIST_tid_id[i] = t
n_ARTIST = len(ARTIST_dict)
data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x:
[ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])
ART_data_all = np.zeros((len(data_all), n_ARTIST))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])
for k, c in counter.items():
ART_data_all[i][k] = c
ART_data_all.shape
ART_array = ART_data_all[:len(train)]
ART_val = ART_data_all[len(train):len(train) + len(val)]
ART_test = ART_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del ART_data_all
ART_array = sparse.csr_matrix(ART_array)
ART_val = sparse.csr_matrix(ART_val)
ART_test = sparse.csr_matrix(ART_test)
tim_cate = []
for i in tqdm_notebook(range(len(data_all))):
tim = []
songs = data_all.loc[i, 'songs']
for j in songs:
tim.append(song.loc[j, 'issue_date'])
tim_cate.append(tim)
data_all['plylst_times'] = tim_cate
plylst_times = data_all['plylst_times']
times_counter = Counter([tim for times in plylst_times for tim in times])
times_dict = {x: times_counter[x] for x in times_counter}
times_id_tid = dict()
times_tid_id = dict()
for i, t in enumerate(times_dict):
times_id_tid[t] = i
times_tid_id[i] = t
n_times = len(times_dict)
data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [
times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])
tim_data_all = np.zeros((len(data_all), n_times))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_times_id'])
for k, c in counter.items():
tim_data_all[i][k] = c
tim_array = tim_data_all[:len(train)]
tim_val = tim_data_all[len(train):len(train) + len(val)]
tim_test = tim_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del tim_data_all
tim_array = sparse.csr_matrix(tim_array)
tim_val = sparse.csr_matrix(tim_val)
tim_test = sparse.csr_matrix(tim_test)
GEN_cate = []
for i in tqdm_notebook(range(len(data_all))):
GEN = []
songs = data_all.loc[i, 'songs']
for j in songs:
for k in song.loc[j, 'song_gn_gnr_basket']:
GEN.append(k)
GEN_cate.append(GEN)
data_all['plylst_GENRE'] = GEN_cate
plylst_GENRE = data_all['plylst_GENRE']
GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])
GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}
GENRE_id_tid = dict()
GENRE_tid_id = dict()
for i, t in enumerate(GENRE_dict):
GENRE_id_tid[t] = i
GENRE_tid_id[i] = t
n_GENRE = len(GENRE_dict)
data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [
GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])
GEN_data_all = np.zeros((len(data_all), n_GENRE))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_GENRE_id'])
for k, c in counter.items():
GEN_data_all[i][k] = c
GEN_array = GEN_data_all[:len(train)]
GEN_val = GEN_data_all[len(train):len(train) + len(val)]
GEN_test = GEN_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del GEN_data_all
GEN_array = sparse.csr_matrix(GEN_array)
GEN_val = sparse.csr_matrix(GEN_val)
GEN_test = sparse.csr_matrix(GEN_test)
content = data_all['plylst_title']
if '{}.model'.format(sp_total_model_path) not in os.listdir():
makeSentencepieceModel(data_all, sp_total_model_path)
sp = SentencePieceProcessor()
sp.Load('{}.model'.format(sp_total_model_path))
cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)
content = data_all['plylst_title']
tdm = cv.fit_transform(content)
title_tdm = tdm.toarray()
title_tr = title_tdm[:len(train)]
title_va = title_tdm[len(train):len(train) + len(val)]
title_ts = title_tdm[len(train) + len(val):len(train) + len(val) + len(
test)]
title_gnr = np.concatenate((gnr_array, title_tr), axis=1)
val_title_gnr = np.concatenate((gnr_val, title_va), axis=1)
test_title_gnr = np.concatenate((gnr_test, title_ts), axis=1)
title_sp = sparse.csr_matrix(title_tdm)
title_gnr = sparse.csr_matrix(title_gnr)
val_title_gnr = sparse.csr_matrix(val_title_gnr)
test_title_gnr = sparse.csr_matrix(test_title_gnr)
title_gnr = vstack([title_gnr, val_title_gnr, test_title_gnr])
song_sp = vstack([train_user_songs_A, val_user_songs_A, test_user_songs_A])
tag_sp = vstack([train_user_tags_A, val_user_tags_A, test_user_tags_A])
times_sp = vstack([tim_array, tim_val, tim_test])
GEN_sp = vstack([GEN_array, GEN_val, GEN_test])
ART_sp = vstack([ART_array, ART_val, ART_test])
model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=25, n_jobs=-1)
model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=40, n_jobs=-1)
model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_song25.fit(song_sp)
model_knn_tag25.fit(tag_sp)
model_knn_title25.fit(title_sp)
model_knn_title_gnr25.fit(title_gnr)
model_knn_times25.fit(times_sp)
model_knn_GEN25.fit(GEN_sp)
model_knn_ART25.fit(ART_sp)
model_knn_song40.fit(song_sp)
model_knn_tag40.fit(tag_sp)
model_knn_title40.fit(title_sp)
model_knn_title_gnr40.fit(title_gnr)
model_knn_times40.fit(times_sp)
model_knn_GEN40.fit(GEN_sp)
model_knn_ART40.fit(ART_sp)
train.loc[:, 'num_songs'] = train['songs'].map(len)
train.loc[:, 'num_tags'] = train['tags_id'].map(len)
data_all = pd.concat([train, val, test])
data_all.index = range(len(data_all))
res = []
for i in tqdm_notebook(range(len(test))):
data = test.iloc[i]
pid = i
if len(data['songs']) >= 2 and len(data['tags_id']) >= 2:
p = np.zeros((707989, 1))
p[data['songs']] = 1
pp = np.zeros((n_tags, 1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_tag * test_title_genre * test_tim *
test_GEN * test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]
row = np.repeat(range(40), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_tag * test_title_genre * test_tim *
test_GEN * test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1, cand2, how='outer', on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2
cand_song_idx = list(cand_all.sort_values(by=['pred'],
ascending=False)[:100]['index'])
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['songs']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_title_genre * test_tim * test_GEN *
test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(40), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_title_genre * test_tim * test_GEN *
test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1, cand2, how='outer', on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2
cand_song_idx = list(cand_all.sort_values(by=['pred'],
ascending=False)[:100]['index'])
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['tags_id']) != 0:
p = np.zeros((n_tags, 1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_songs'])
col = [song for songs in tra_tag['songs'] for song in songs]
dat = np.repeat(1, tra_tag['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
testi = cosine_similarity(tra_tag_sp, pp.T)
if len(data['plylst_title']) != 0:
tra_title_gnr = title_tdm[model_knn_title25.kneighbors(
title_ts[i:i + 1])[1][0]]
testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +
1])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': list(
cand_song_idx), 'tags': rec_tag_idx})
else:
cand_song = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i
:i + 1])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i
:i + 1])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[
:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10
].index)
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
for i in range(len(res)):
if len(res[i]['songs']) != 100:
print('song 에서 {}번째 오류 발생'.format(i))
if len(res[i]['tags']) != 10:
print('tag 에서 {}번째 오류 발생'.format(i))
rec = []
for i in range(len(res)):
rec.append({'id': res[i]['id'], 'songs': list(res[i]['songs']),
'tags': res[i]['tags']})
result1 = pd.DataFrame(rec)
model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=50, n_jobs=-1)
model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_song.fit(song_sp)
model_knn_tag.fit(tag_sp)
model_knn_title.fit(title_sp)
model_knn_title_gnr.fit(title_gnr)
model_knn_times.fit(times_sp)
model_knn_GEN.fit(GEN_sp)
model_knn_ART.fit(ART_sp)
res2 = []
for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):
data = test.iloc[i]
pid = i
if len(data['songs']) != 0 and len(data['tags_id']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
pp = np.zeros((n_tags, 1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]
)[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = test_song * test_tag * test_title_genre * test_GEN
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['songs']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]
)[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = test_song * test_title_genre * test_tim * test_GEN
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['tags_id']) != 0:
p = np.zeros((n_tags, 1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_songs'])
col = [song for songs in tra_tag['songs'] for song in songs]
dat = np.repeat(1, tra_tag['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
testi = cosine_similarity(tra_tag_sp, pp.T)
if len(data['plylst_title']) != 0:
tra_title_gnr = title_tdm[model_knn_title.kneighbors(
title_ts[i:i + 1])[1][0]]
testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +
1])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
else:
cand_song = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +
1])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +
1])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[
:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10
].index)
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
pd.DataFrame(res2)
rec2 = []
for i in range(len(res2)):
rec2.append({'id': res2[i]['id'], 'songs': list(res2[i]['songs']),
'tags': res2[i]['tags']})
result2 = pd.DataFrame(rec2)['songs']
n_index = [10498, 6361, 1960, 8705, 9310]
result2.index = n_index
result1.loc[n_index, 'songs'] = result2
result1['songs'].apply(len).sort_values()
s = []
for song in train.songs.tolist():
s += song
r1 = dict(Counter(s))
r_song = sorted(r1.items(), key=lambda x: -x[1])
r_song_top = r_song[:100]
list_song = list(dict(r_song_top).keys())
len(list_song)
sub = []
for j in range(len(result1)):
sub.append(result1.loc[j].to_dict())
sub[6361]['songs'] = list_song
pd.DataFrame(sub)['songs'].apply(len).sort_values()
write_json(sub, 'final_songs.json')
return sub
<mask token>
| <mask token>
def song_inference():
sp_total_model_path = 'sp_total'
train = pd.read_json('./dataset/train.json', typ='frame', encoding='utf-8')
song = pd.read_json('./dataset/song_meta.json', typ='frame', encoding=
'utf-8')
plylst_tag = train['tags']
tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])
tag_dict = {x: tag_counter[x] for x in tag_counter}
tag_id_tid = dict()
tag_tid_id = dict()
for i, t in enumerate(tag_dict):
tag_id_tid[t] = i
tag_tid_id[i] = t
n_tags = len(tag_dict)
plylst_song = train['songs']
song_dict = {x: x for x in song['id']}
n_songs = len(song_dict)
train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
song_cate = []
for i in range(len(train)):
gnr = []
songs = train.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
train['plylst_genre'] = song_cate
plylst_genre = train['plylst_genre']
genre_counter = Counter([gen for genre in plylst_genre for gen in genre])
genre_dict = {x: genre_counter[x] for x in genre_counter}
genre_id_tid = dict()
genre_tid_id = dict()
for i, t in enumerate(genre_dict):
genre_id_tid[t] = i
genre_tid_id[i] = t
n_genre = len(genre_dict)
train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
gnr_array = np.zeros((len(train), n_genre))
for i, index in enumerate(train.index):
if i % 10000 == 0:
print(i)
counter = Counter(train.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_array[i][k] = c
gnr_array.shape
song['issue_date'] = song['issue_date'].astype('str').map(lambda x: x[:6])
plylst_use = train[['plylst_title', 'updt_date', 'tags_id', 'songs']]
plylst_use.loc[:, 'num_songs'] = plylst_use['songs'].map(len)
plylst_use.loc[:, 'num_tags'] = plylst_use['tags_id'].map(len)
plylst_train = plylst_use
n_train = len(plylst_train)
row = np.repeat(range(n_train), plylst_train['num_songs'])
col = [song for songs in plylst_train['songs'] for song in songs]
dat = np.repeat(1, plylst_train['num_songs'].sum())
train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,
n_songs))
row = np.repeat(range(n_train), plylst_train['num_tags'])
col = [tag for tags in plylst_train['tags_id'] for tag in tags]
dat = np.repeat(1, plylst_train['num_tags'].sum())
train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,
n_tags))
train_user_songs_A_T = train_user_songs_A.T.tocsr()
train_user_songs_A_T
train_user_tags_A_T = train_user_tags_A.T.tocsr()
train_user_tags_A_T
val = pd.read_json('./dataset/val.json', typ='frame', encoding='utf-8')
song_cate = []
for i in range(len(val)):
gnr = []
songs = val.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
val['plylst_genre'] = song_cate
val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
val.loc[:, 'num_songs'] = val['songs'].map(len)
val.loc[:, 'num_tags'] = val['tags_id'].map(len)
gnr_val = np.zeros((len(val), n_genre))
for i, index in enumerate(val.index):
if i % 10000 == 0:
print(i)
counter = Counter(val.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_val[i][k] = c
gnr_val.shape
n_val = len(val)
row = np.repeat(range(n_val), val['num_songs'])
col = [song for songs in val['songs'] for song in songs]
dat = np.repeat(1, val['num_songs'].sum())
val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)
)
row = np.repeat(range(n_val), val['num_tags'])
col = [tag for tags in val['tags_id'] for tag in tags]
dat = np.repeat(1, val['num_tags'].sum())
val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))
val_user_songs_A_T = val_user_songs_A.T.tocsr()
val_user_tags_A_T = val_user_tags_A.T.tocsr()
test = pd.read_json('./dataset/test.json', typ='frame', encoding='utf-8')
song_cate = []
for i in range(len(test)):
gnr = []
songs = test.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
test['plylst_genre'] = song_cate
test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
test.loc[:, 'num_songs'] = test['songs'].map(len)
test.loc[:, 'num_tags'] = test['tags_id'].map(len)
gnr_test = np.zeros((len(test), n_genre))
for i, index in enumerate(test.index):
if i % 10000 == 0:
print(i)
counter = Counter(test.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_test[i][k] = c
gnr_test.shape
n_test = len(test)
row = np.repeat(range(n_test), test['num_songs'])
col = [song for songs in test['songs'] for song in songs]
dat = np.repeat(1, test['num_songs'].sum())
test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test,
n_songs))
row = np.repeat(range(n_test), test['num_tags'])
col = [tag for tags in test['tags_id'] for tag in tags]
dat = np.repeat(1, test['num_tags'].sum())
test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags)
)
test_user_songs_A_T = test_user_songs_A.T.tocsr()
test_user_tags_A_T = test_user_tags_A.T.tocsr()
data_all = pd.concat([train, val, test])
data_all.index = range(len(data_all))
arts = song['artist_id_basket'].map(lambda x: x[0])
arts = pd.DataFrame(arts)
art_counts = arts['artist_id_basket'].value_counts().reset_index()
art_counts.columns = ['artist_id_basket', 'counts']
arts2 = pd.merge(arts, art_counts, how='left', on=['artist_id_basket'])
song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]
song_art = song_art[['artist_id_basket']]
ART_cate = []
for i in tqdm_notebook(range(len(data_all))):
ART = []
songs = data_all.loc[i, 'songs']
for j in songs:
if j in song_art.index:
for k in song_art.loc[j, 'artist_id_basket']:
ART.append(k)
ART_cate.append(ART)
data_all['plylst_ARTIST'] = ART_cate
plylst_ARTIST = data_all['plylst_ARTIST']
ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in
ARTIST])
ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}
ARTIST_id_tid = dict()
ARTIST_tid_id = dict()
for i, t in enumerate(ARTIST_dict):
ARTIST_id_tid[t] = i
ARTIST_tid_id[i] = t
n_ARTIST = len(ARTIST_dict)
data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x:
[ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])
ART_data_all = np.zeros((len(data_all), n_ARTIST))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])
for k, c in counter.items():
ART_data_all[i][k] = c
ART_data_all.shape
ART_array = ART_data_all[:len(train)]
ART_val = ART_data_all[len(train):len(train) + len(val)]
ART_test = ART_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del ART_data_all
ART_array = sparse.csr_matrix(ART_array)
ART_val = sparse.csr_matrix(ART_val)
ART_test = sparse.csr_matrix(ART_test)
tim_cate = []
for i in tqdm_notebook(range(len(data_all))):
tim = []
songs = data_all.loc[i, 'songs']
for j in songs:
tim.append(song.loc[j, 'issue_date'])
tim_cate.append(tim)
data_all['plylst_times'] = tim_cate
plylst_times = data_all['plylst_times']
times_counter = Counter([tim for times in plylst_times for tim in times])
times_dict = {x: times_counter[x] for x in times_counter}
times_id_tid = dict()
times_tid_id = dict()
for i, t in enumerate(times_dict):
times_id_tid[t] = i
times_tid_id[i] = t
n_times = len(times_dict)
data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [
times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])
tim_data_all = np.zeros((len(data_all), n_times))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_times_id'])
for k, c in counter.items():
tim_data_all[i][k] = c
tim_array = tim_data_all[:len(train)]
tim_val = tim_data_all[len(train):len(train) + len(val)]
tim_test = tim_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del tim_data_all
tim_array = sparse.csr_matrix(tim_array)
tim_val = sparse.csr_matrix(tim_val)
tim_test = sparse.csr_matrix(tim_test)
GEN_cate = []
for i in tqdm_notebook(range(len(data_all))):
GEN = []
songs = data_all.loc[i, 'songs']
for j in songs:
for k in song.loc[j, 'song_gn_gnr_basket']:
GEN.append(k)
GEN_cate.append(GEN)
data_all['plylst_GENRE'] = GEN_cate
plylst_GENRE = data_all['plylst_GENRE']
GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])
GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}
GENRE_id_tid = dict()
GENRE_tid_id = dict()
for i, t in enumerate(GENRE_dict):
GENRE_id_tid[t] = i
GENRE_tid_id[i] = t
n_GENRE = len(GENRE_dict)
data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [
GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])
GEN_data_all = np.zeros((len(data_all), n_GENRE))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_GENRE_id'])
for k, c in counter.items():
GEN_data_all[i][k] = c
GEN_array = GEN_data_all[:len(train)]
GEN_val = GEN_data_all[len(train):len(train) + len(val)]
GEN_test = GEN_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del GEN_data_all
GEN_array = sparse.csr_matrix(GEN_array)
GEN_val = sparse.csr_matrix(GEN_val)
GEN_test = sparse.csr_matrix(GEN_test)
content = data_all['plylst_title']
if '{}.model'.format(sp_total_model_path) not in os.listdir():
makeSentencepieceModel(data_all, sp_total_model_path)
sp = SentencePieceProcessor()
sp.Load('{}.model'.format(sp_total_model_path))
cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)
content = data_all['plylst_title']
tdm = cv.fit_transform(content)
title_tdm = tdm.toarray()
title_tr = title_tdm[:len(train)]
title_va = title_tdm[len(train):len(train) + len(val)]
title_ts = title_tdm[len(train) + len(val):len(train) + len(val) + len(
test)]
title_gnr = np.concatenate((gnr_array, title_tr), axis=1)
val_title_gnr = np.concatenate((gnr_val, title_va), axis=1)
test_title_gnr = np.concatenate((gnr_test, title_ts), axis=1)
title_sp = sparse.csr_matrix(title_tdm)
title_gnr = sparse.csr_matrix(title_gnr)
val_title_gnr = sparse.csr_matrix(val_title_gnr)
test_title_gnr = sparse.csr_matrix(test_title_gnr)
title_gnr = vstack([title_gnr, val_title_gnr, test_title_gnr])
song_sp = vstack([train_user_songs_A, val_user_songs_A, test_user_songs_A])
tag_sp = vstack([train_user_tags_A, val_user_tags_A, test_user_tags_A])
times_sp = vstack([tim_array, tim_val, tim_test])
GEN_sp = vstack([GEN_array, GEN_val, GEN_test])
ART_sp = vstack([ART_array, ART_val, ART_test])
model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=25, n_jobs=-1)
model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=40, n_jobs=-1)
model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_song25.fit(song_sp)
model_knn_tag25.fit(tag_sp)
model_knn_title25.fit(title_sp)
model_knn_title_gnr25.fit(title_gnr)
model_knn_times25.fit(times_sp)
model_knn_GEN25.fit(GEN_sp)
model_knn_ART25.fit(ART_sp)
model_knn_song40.fit(song_sp)
model_knn_tag40.fit(tag_sp)
model_knn_title40.fit(title_sp)
model_knn_title_gnr40.fit(title_gnr)
model_knn_times40.fit(times_sp)
model_knn_GEN40.fit(GEN_sp)
model_knn_ART40.fit(ART_sp)
train.loc[:, 'num_songs'] = train['songs'].map(len)
train.loc[:, 'num_tags'] = train['tags_id'].map(len)
data_all = pd.concat([train, val, test])
data_all.index = range(len(data_all))
res = []
for i in tqdm_notebook(range(len(test))):
data = test.iloc[i]
pid = i
if len(data['songs']) >= 2 and len(data['tags_id']) >= 2:
p = np.zeros((707989, 1))
p[data['songs']] = 1
pp = np.zeros((n_tags, 1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_tag * test_title_genre * test_tim *
test_GEN * test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]
row = np.repeat(range(40), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_tag * test_title_genre * test_tim *
test_GEN * test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1, cand2, how='outer', on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2
cand_song_idx = list(cand_all.sort_values(by=['pred'],
ascending=False)[:100]['index'])
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['songs']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_title_genre * test_tim * test_GEN *
test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(40), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_title_genre * test_tim * test_GEN *
test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1, cand2, how='outer', on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2
cand_song_idx = list(cand_all.sort_values(by=['pred'],
ascending=False)[:100]['index'])
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['tags_id']) != 0:
p = np.zeros((n_tags, 1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_songs'])
col = [song for songs in tra_tag['songs'] for song in songs]
dat = np.repeat(1, tra_tag['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
testi = cosine_similarity(tra_tag_sp, pp.T)
if len(data['plylst_title']) != 0:
tra_title_gnr = title_tdm[model_knn_title25.kneighbors(
title_ts[i:i + 1])[1][0]]
testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +
1])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': list(
cand_song_idx), 'tags': rec_tag_idx})
else:
cand_song = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i
:i + 1])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i
:i + 1])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[
:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10
].index)
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
for i in range(len(res)):
if len(res[i]['songs']) != 100:
print('song 에서 {}번째 오류 발생'.format(i))
if len(res[i]['tags']) != 10:
print('tag 에서 {}번째 오류 발생'.format(i))
rec = []
for i in range(len(res)):
rec.append({'id': res[i]['id'], 'songs': list(res[i]['songs']),
'tags': res[i]['tags']})
result1 = pd.DataFrame(rec)
model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=50, n_jobs=-1)
model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_song.fit(song_sp)
model_knn_tag.fit(tag_sp)
model_knn_title.fit(title_sp)
model_knn_title_gnr.fit(title_gnr)
model_knn_times.fit(times_sp)
model_knn_GEN.fit(GEN_sp)
model_knn_ART.fit(ART_sp)
res2 = []
for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):
data = test.iloc[i]
pid = i
if len(data['songs']) != 0 and len(data['tags_id']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
pp = np.zeros((n_tags, 1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]
)[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = test_song * test_tag * test_title_genre * test_GEN
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['songs']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]
)[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = test_song * test_title_genre * test_tim * test_GEN
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['tags_id']) != 0:
p = np.zeros((n_tags, 1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_songs'])
col = [song for songs in tra_tag['songs'] for song in songs]
dat = np.repeat(1, tra_tag['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
testi = cosine_similarity(tra_tag_sp, pp.T)
if len(data['plylst_title']) != 0:
tra_title_gnr = title_tdm[model_knn_title.kneighbors(
title_ts[i:i + 1])[1][0]]
testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +
1])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
else:
cand_song = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +
1])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +
1])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[
:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10
].index)
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
pd.DataFrame(res2)
rec2 = []
for i in range(len(res2)):
rec2.append({'id': res2[i]['id'], 'songs': list(res2[i]['songs']),
'tags': res2[i]['tags']})
result2 = pd.DataFrame(rec2)['songs']
n_index = [10498, 6361, 1960, 8705, 9310]
result2.index = n_index
result1.loc[n_index, 'songs'] = result2
result1['songs'].apply(len).sort_values()
s = []
for song in train.songs.tolist():
s += song
r1 = dict(Counter(s))
r_song = sorted(r1.items(), key=lambda x: -x[1])
r_song_top = r_song[:100]
list_song = list(dict(r_song_top).keys())
len(list_song)
sub = []
for j in range(len(result1)):
sub.append(result1.loc[j].to_dict())
sub[6361]['songs'] = list_song
pd.DataFrame(sub)['songs'].apply(len).sort_values()
write_json(sub, 'final_songs.json')
return sub
if __name__ == '__main__':
_data = Dataset()
pre_tag.run(_data.test, _data.n_songs, _data.n_tags, _data.spr_list,
_data.tag_tid_id)
final_tags = word2vec_for_tag.run(_data.total, _data.test)
final_songs = song_inference()
result = []
for f_songs, f_tags in zip(final_songs, final_tags):
result.append({'id': f_songs['id'], 'songs': f_songs['songs'],
'tags': f_tags['tags']})
write_json(result, 'results.json')
| from datetime import timedelta, datetime
import glob
import json
import os
import re
import pickle
import os, time
import pandas as pd
import numpy as np
from collections import Counter
from sentencepiece import SentencePieceTrainer
from sentencepiece import SentencePieceProcessor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import vstack
from scipy import sparse
import scipy.sparse as spr
from scipy.sparse import vstack
from scipy import sparse
from util import write_json, makeSentencepieceModel
from sklearn.feature_extraction.text import CountVectorizer
from tqdm import tqdm_notebook
from sklearn.neighbors import NearestNeighbors
from Dataset import Dataset
import pre_tag, word2vec_for_tag
def song_inference():
sp_total_model_path = 'sp_total'
train = pd.read_json('./dataset/train.json', typ='frame', encoding='utf-8')
song = pd.read_json('./dataset/song_meta.json', typ='frame', encoding=
'utf-8')
plylst_tag = train['tags']
tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])
tag_dict = {x: tag_counter[x] for x in tag_counter}
tag_id_tid = dict()
tag_tid_id = dict()
for i, t in enumerate(tag_dict):
tag_id_tid[t] = i
tag_tid_id[i] = t
n_tags = len(tag_dict)
plylst_song = train['songs']
song_dict = {x: x for x in song['id']}
n_songs = len(song_dict)
train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
song_cate = []
for i in range(len(train)):
gnr = []
songs = train.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
train['plylst_genre'] = song_cate
plylst_genre = train['plylst_genre']
genre_counter = Counter([gen for genre in plylst_genre for gen in genre])
genre_dict = {x: genre_counter[x] for x in genre_counter}
genre_id_tid = dict()
genre_tid_id = dict()
for i, t in enumerate(genre_dict):
genre_id_tid[t] = i
genre_tid_id[i] = t
n_genre = len(genre_dict)
train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
gnr_array = np.zeros((len(train), n_genre))
for i, index in enumerate(train.index):
if i % 10000 == 0:
print(i)
counter = Counter(train.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_array[i][k] = c
gnr_array.shape
song['issue_date'] = song['issue_date'].astype('str').map(lambda x: x[:6])
plylst_use = train[['plylst_title', 'updt_date', 'tags_id', 'songs']]
plylst_use.loc[:, 'num_songs'] = plylst_use['songs'].map(len)
plylst_use.loc[:, 'num_tags'] = plylst_use['tags_id'].map(len)
plylst_train = plylst_use
n_train = len(plylst_train)
row = np.repeat(range(n_train), plylst_train['num_songs'])
col = [song for songs in plylst_train['songs'] for song in songs]
dat = np.repeat(1, plylst_train['num_songs'].sum())
train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,
n_songs))
row = np.repeat(range(n_train), plylst_train['num_tags'])
col = [tag for tags in plylst_train['tags_id'] for tag in tags]
dat = np.repeat(1, plylst_train['num_tags'].sum())
train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,
n_tags))
train_user_songs_A_T = train_user_songs_A.T.tocsr()
train_user_songs_A_T
train_user_tags_A_T = train_user_tags_A.T.tocsr()
train_user_tags_A_T
val = pd.read_json('./dataset/val.json', typ='frame', encoding='utf-8')
song_cate = []
for i in range(len(val)):
gnr = []
songs = val.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
val['plylst_genre'] = song_cate
val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
val.loc[:, 'num_songs'] = val['songs'].map(len)
val.loc[:, 'num_tags'] = val['tags_id'].map(len)
gnr_val = np.zeros((len(val), n_genre))
for i, index in enumerate(val.index):
if i % 10000 == 0:
print(i)
counter = Counter(val.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_val[i][k] = c
gnr_val.shape
n_val = len(val)
row = np.repeat(range(n_val), val['num_songs'])
col = [song for songs in val['songs'] for song in songs]
dat = np.repeat(1, val['num_songs'].sum())
val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)
)
row = np.repeat(range(n_val), val['num_tags'])
col = [tag for tags in val['tags_id'] for tag in tags]
dat = np.repeat(1, val['num_tags'].sum())
val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))
val_user_songs_A_T = val_user_songs_A.T.tocsr()
val_user_tags_A_T = val_user_tags_A.T.tocsr()
test = pd.read_json('./dataset/test.json', typ='frame', encoding='utf-8')
song_cate = []
for i in range(len(test)):
gnr = []
songs = test.iloc[i, 3]
for j in songs:
for k in song.loc[j, 'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
test['plylst_genre'] = song_cate
test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in
x if tag_id_tid.get(t) != None])
test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [
genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
test.loc[:, 'num_songs'] = test['songs'].map(len)
test.loc[:, 'num_tags'] = test['tags_id'].map(len)
gnr_test = np.zeros((len(test), n_genre))
for i, index in enumerate(test.index):
if i % 10000 == 0:
print(i)
counter = Counter(test.loc[index]['plylst_genre_id'])
for k, c in counter.items():
gnr_test[i][k] = c
gnr_test.shape
n_test = len(test)
row = np.repeat(range(n_test), test['num_songs'])
col = [song for songs in test['songs'] for song in songs]
dat = np.repeat(1, test['num_songs'].sum())
test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test,
n_songs))
row = np.repeat(range(n_test), test['num_tags'])
col = [tag for tags in test['tags_id'] for tag in tags]
dat = np.repeat(1, test['num_tags'].sum())
test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags)
)
test_user_songs_A_T = test_user_songs_A.T.tocsr()
test_user_tags_A_T = test_user_tags_A.T.tocsr()
data_all = pd.concat([train, val, test])
data_all.index = range(len(data_all))
arts = song['artist_id_basket'].map(lambda x: x[0])
arts = pd.DataFrame(arts)
art_counts = arts['artist_id_basket'].value_counts().reset_index()
art_counts.columns = ['artist_id_basket', 'counts']
arts2 = pd.merge(arts, art_counts, how='left', on=['artist_id_basket'])
song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]
song_art = song_art[['artist_id_basket']]
ART_cate = []
for i in tqdm_notebook(range(len(data_all))):
ART = []
songs = data_all.loc[i, 'songs']
for j in songs:
if j in song_art.index:
for k in song_art.loc[j, 'artist_id_basket']:
ART.append(k)
ART_cate.append(ART)
data_all['plylst_ARTIST'] = ART_cate
plylst_ARTIST = data_all['plylst_ARTIST']
ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in
ARTIST])
ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}
ARTIST_id_tid = dict()
ARTIST_tid_id = dict()
for i, t in enumerate(ARTIST_dict):
ARTIST_id_tid[t] = i
ARTIST_tid_id[i] = t
n_ARTIST = len(ARTIST_dict)
data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x:
[ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])
ART_data_all = np.zeros((len(data_all), n_ARTIST))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])
for k, c in counter.items():
ART_data_all[i][k] = c
ART_data_all.shape
ART_array = ART_data_all[:len(train)]
ART_val = ART_data_all[len(train):len(train) + len(val)]
ART_test = ART_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del ART_data_all
ART_array = sparse.csr_matrix(ART_array)
ART_val = sparse.csr_matrix(ART_val)
ART_test = sparse.csr_matrix(ART_test)
tim_cate = []
for i in tqdm_notebook(range(len(data_all))):
tim = []
songs = data_all.loc[i, 'songs']
for j in songs:
tim.append(song.loc[j, 'issue_date'])
tim_cate.append(tim)
data_all['plylst_times'] = tim_cate
plylst_times = data_all['plylst_times']
times_counter = Counter([tim for times in plylst_times for tim in times])
times_dict = {x: times_counter[x] for x in times_counter}
times_id_tid = dict()
times_tid_id = dict()
for i, t in enumerate(times_dict):
times_id_tid[t] = i
times_tid_id[i] = t
n_times = len(times_dict)
data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [
times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])
tim_data_all = np.zeros((len(data_all), n_times))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_times_id'])
for k, c in counter.items():
tim_data_all[i][k] = c
tim_array = tim_data_all[:len(train)]
tim_val = tim_data_all[len(train):len(train) + len(val)]
tim_test = tim_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del tim_data_all
tim_array = sparse.csr_matrix(tim_array)
tim_val = sparse.csr_matrix(tim_val)
tim_test = sparse.csr_matrix(tim_test)
GEN_cate = []
for i in tqdm_notebook(range(len(data_all))):
GEN = []
songs = data_all.loc[i, 'songs']
for j in songs:
for k in song.loc[j, 'song_gn_gnr_basket']:
GEN.append(k)
GEN_cate.append(GEN)
data_all['plylst_GENRE'] = GEN_cate
plylst_GENRE = data_all['plylst_GENRE']
GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])
GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}
GENRE_id_tid = dict()
GENRE_tid_id = dict()
for i, t in enumerate(GENRE_dict):
GENRE_id_tid[t] = i
GENRE_tid_id[i] = t
n_GENRE = len(GENRE_dict)
data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [
GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])
GEN_data_all = np.zeros((len(data_all), n_GENRE))
for i, index in enumerate(data_all.index):
if i % 10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_GENRE_id'])
for k, c in counter.items():
GEN_data_all[i][k] = c
GEN_array = GEN_data_all[:len(train)]
GEN_val = GEN_data_all[len(train):len(train) + len(val)]
GEN_test = GEN_data_all[len(train) + len(val):len(train) + len(val) +
len(test)]
del GEN_data_all
GEN_array = sparse.csr_matrix(GEN_array)
GEN_val = sparse.csr_matrix(GEN_val)
GEN_test = sparse.csr_matrix(GEN_test)
content = data_all['plylst_title']
if '{}.model'.format(sp_total_model_path) not in os.listdir():
makeSentencepieceModel(data_all, sp_total_model_path)
sp = SentencePieceProcessor()
sp.Load('{}.model'.format(sp_total_model_path))
cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)
content = data_all['plylst_title']
tdm = cv.fit_transform(content)
title_tdm = tdm.toarray()
title_tr = title_tdm[:len(train)]
title_va = title_tdm[len(train):len(train) + len(val)]
title_ts = title_tdm[len(train) + len(val):len(train) + len(val) + len(
test)]
title_gnr = np.concatenate((gnr_array, title_tr), axis=1)
val_title_gnr = np.concatenate((gnr_val, title_va), axis=1)
test_title_gnr = np.concatenate((gnr_test, title_ts), axis=1)
title_sp = sparse.csr_matrix(title_tdm)
title_gnr = sparse.csr_matrix(title_gnr)
val_title_gnr = sparse.csr_matrix(val_title_gnr)
test_title_gnr = sparse.csr_matrix(test_title_gnr)
title_gnr = vstack([title_gnr, val_title_gnr, test_title_gnr])
song_sp = vstack([train_user_songs_A, val_user_songs_A, test_user_songs_A])
tag_sp = vstack([train_user_tags_A, val_user_tags_A, test_user_tags_A])
times_sp = vstack([tim_array, tim_val, tim_test])
GEN_sp = vstack([GEN_array, GEN_val, GEN_test])
ART_sp = vstack([ART_array, ART_val, ART_test])
model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=25, n_jobs=-1)
model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=25, n_jobs=-1)
model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=40, n_jobs=-1)
model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=40, n_jobs=-1)
model_knn_song25.fit(song_sp)
model_knn_tag25.fit(tag_sp)
model_knn_title25.fit(title_sp)
model_knn_title_gnr25.fit(title_gnr)
model_knn_times25.fit(times_sp)
model_knn_GEN25.fit(GEN_sp)
model_knn_ART25.fit(ART_sp)
model_knn_song40.fit(song_sp)
model_knn_tag40.fit(tag_sp)
model_knn_title40.fit(title_sp)
model_knn_title_gnr40.fit(title_gnr)
model_knn_times40.fit(times_sp)
model_knn_GEN40.fit(GEN_sp)
model_knn_ART40.fit(ART_sp)
train.loc[:, 'num_songs'] = train['songs'].map(len)
train.loc[:, 'num_tags'] = train['tags_id'].map(len)
data_all = pd.concat([train, val, test])
data_all.index = range(len(data_all))
res = []
for i in tqdm_notebook(range(len(test))):
data = test.iloc[i]
pid = i
if len(data['songs']) >= 2 and len(data['tags_id']) >= 2:
p = np.zeros((707989, 1))
p[data['songs']] = 1
pp = np.zeros((n_tags, 1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_tag * test_title_genre * test_tim *
test_GEN * test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]
row = np.repeat(range(40), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_tag * test_title_genre * test_tim *
test_GEN * test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1, cand2, how='outer', on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2
cand_song_idx = list(cand_all.sort_values(by=['pred'],
ascending=False)[:100]['index'])
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['songs']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_title_genre * test_tim * test_GEN *
test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(40), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i +
1])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[
1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[
1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = (test_song * test_title_genre * test_tim * test_GEN *
test_ART)
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False]
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1, cand2, how='outer', on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2
cand_song_idx = list(cand_all.sort_values(by=['pred'],
ascending=False)[:100]['index'])
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['tags_id']) != 0:
p = np.zeros((n_tags, 1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_songs'])
col = [song for songs in tra_tag['songs'] for song in songs]
dat = np.repeat(1, tra_tag['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
testi = cosine_similarity(tra_tag_sp, pp.T)
if len(data['plylst_title']) != 0:
tra_title_gnr = title_tdm[model_knn_title25.kneighbors(
title_ts[i:i + 1])[1][0]]
testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +
1])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({'id': test.loc[pid, 'id'], 'songs': list(
cand_song_idx), 'tags': rec_tag_idx})
else:
cand_song = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i
:i + 1])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i
:i + 1])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[
:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10
].index)
res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
for i in range(len(res)):
if len(res[i]['songs']) != 100:
print('song 에서 {}번째 오류 발생'.format(i))
if len(res[i]['tags']) != 10:
print('tag 에서 {}번째 오류 발생'.format(i))
rec = []
for i in range(len(res)):
rec.append({'id': res[i]['id'], 'songs': list(res[i]['songs']),
'tags': res[i]['tags']})
result1 = pd.DataFrame(rec)
model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm=
'brute', n_neighbors=50, n_jobs=-1)
model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute',
n_neighbors=50, n_jobs=-1)
model_knn_song.fit(song_sp)
model_knn_tag.fit(tag_sp)
model_knn_title.fit(title_sp)
model_knn_title_gnr.fit(title_gnr)
model_knn_times.fit(times_sp)
model_knn_GEN.fit(GEN_sp)
model_knn_ART.fit(ART_sp)
res2 = []
for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):
data = test.iloc[i]
pid = i
if len(data['songs']) != 0 and len(data['tags_id']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
pp = np.zeros((n_tags, 1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]
)[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
songs_already = data['songs']
tags_already = data['tags_id']
test_song = cosine_similarity(tra_song_sp, p.T)
test_tag = cosine_similarity(tra_tag_sp, pp.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = test_song * test_tag * test_title_genre * test_GEN
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['songs']) != 0:
p = np.zeros((707989, 1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs'])
col = [song for songs in tra_song['songs'] for song in songs]
dat = np.repeat(1, tra_song['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]
)[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(
test_title_gnr[i:i + 1])[1][0]]
test_song = cosine_similarity(tra_song_sp, p.T)
test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])
test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])
test_title_genre = cosine_similarity(tra_title_gnr,
test_title_gnr[i:i + 1])
testi = test_song * test_title_genre * test_tim * test_GEN
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
elif len(data['tags_id']) != 0:
p = np.zeros((n_tags, 1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_songs'])
col = [song for songs in tra_tag['songs'] for song in songs]
dat = np.repeat(1, tra_tag['num_songs'].sum())
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)
)
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data['songs']
tags_already = data['tags_id']
testi = cosine_similarity(tra_tag_sp, pp.T)
if len(data['plylst_title']) != 0:
tra_title_gnr = title_tdm[model_knn_title.kneighbors(
title_ts[i:i + 1])[1][0]]
testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +
1])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi)
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]
cand_song_idx = cand_song_idx[np.isin(cand_song_idx,
songs_already) == False][:100]
cand_tag = tra_tag_sp_T.dot(testi)
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==
False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
else:
cand_song = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +
1])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +
1])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[
:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10
].index)
res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,
'tags': rec_tag_idx})
pd.DataFrame(res2)
rec2 = []
for i in range(len(res2)):
rec2.append({'id': res2[i]['id'], 'songs': list(res2[i]['songs']),
'tags': res2[i]['tags']})
result2 = pd.DataFrame(rec2)['songs']
n_index = [10498, 6361, 1960, 8705, 9310]
result2.index = n_index
result1.loc[n_index, 'songs'] = result2
result1['songs'].apply(len).sort_values()
s = []
for song in train.songs.tolist():
s += song
r1 = dict(Counter(s))
r_song = sorted(r1.items(), key=lambda x: -x[1])
r_song_top = r_song[:100]
list_song = list(dict(r_song_top).keys())
len(list_song)
sub = []
for j in range(len(result1)):
sub.append(result1.loc[j].to_dict())
sub[6361]['songs'] = list_song
pd.DataFrame(sub)['songs'].apply(len).sort_values()
write_json(sub, 'final_songs.json')
return sub
if __name__ == '__main__':
_data = Dataset()
pre_tag.run(_data.test, _data.n_songs, _data.n_tags, _data.spr_list,
_data.tag_tid_id)
final_tags = word2vec_for_tag.run(_data.total, _data.test)
final_songs = song_inference()
result = []
for f_songs, f_tags in zip(final_songs, final_tags):
result.append({'id': f_songs['id'], 'songs': f_songs['songs'],
'tags': f_tags['tags']})
write_json(result, 'results.json')
| from datetime import timedelta, datetime
import glob
import json
import os
import re
import pickle
import os,time
import pandas as pd
import numpy as np
from collections import Counter
from sentencepiece import SentencePieceTrainer
from sentencepiece import SentencePieceProcessor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import vstack
from scipy import sparse
import scipy.sparse as spr
from scipy.sparse import vstack
from scipy import sparse
from util import write_json,makeSentencepieceModel
from sklearn.feature_extraction.text import CountVectorizer
from tqdm import tqdm_notebook
from sklearn.neighbors import NearestNeighbors
from Dataset import Dataset
import pre_tag,word2vec_for_tag
def song_inference():
sp_total_model_path = "sp_total"
train = pd.read_json('./dataset/train.json', typ = 'frame',encoding='utf-8')
song = pd.read_json('./dataset/song_meta.json', typ = 'frame',encoding='utf-8')
plylst_tag = train['tags']
tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])
tag_dict = {x: tag_counter[x] for x in tag_counter}
tag_id_tid = dict()
tag_tid_id = dict()
for i, t in enumerate(tag_dict):
tag_id_tid[t] = i
tag_tid_id[i] = t
n_tags = len(tag_dict)
plylst_song = train['songs']
song_dict = {x: x for x in song['id']}
n_songs = len(song_dict)
train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])
# song genre 내용 가져오기.
song_cate = []
for i in range(len(train)):
gnr = []
songs = train.iloc[i,3]
for j in songs:
for k in song.loc[j,'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
train['plylst_genre'] = song_cate
plylst_genre = train['plylst_genre']
genre_counter = Counter([gen for genre in plylst_genre for gen in genre])
genre_dict = {x: genre_counter[x] for x in genre_counter}
genre_id_tid = dict()
genre_tid_id = dict()
for i, t in enumerate(genre_dict):
genre_id_tid[t] = i
genre_tid_id[i] = t
n_genre = len(genre_dict)
train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
gnr_array = np.zeros((len(train),n_genre))
for i,index in enumerate(train.index):
if i%10000 == 0:
print(i)
counter = Counter(train.loc[index]['plylst_genre_id'])
for (k,c) in counter.items():
gnr_array[i][k] = c
gnr_array.shape
song['issue_date'] = song['issue_date'].astype('str').map(lambda x : x[:6])
plylst_use = train[['plylst_title','updt_date','tags_id','songs']]
plylst_use.loc[:,'num_songs'] = plylst_use['songs'].map(len)
plylst_use.loc[:,'num_tags'] = plylst_use['tags_id'].map(len)
plylst_train = plylst_use
n_train = len(plylst_train)
row = np.repeat(range(n_train), plylst_train['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in plylst_train['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, plylst_train['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_songs)) # csr_matrix 제작
row = np.repeat(range(n_train), plylst_train['num_tags'])
col = [tag for tags in plylst_train['tags_id'] for tag in tags]
dat = np.repeat(1, plylst_train['num_tags'].sum())
train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_tags))
train_user_songs_A_T = train_user_songs_A.T.tocsr()
train_user_songs_A_T # 행에는 노래 columns에는 User 정보 삽입
train_user_tags_A_T = train_user_tags_A.T.tocsr()
train_user_tags_A_T # 행에는 Tangs columns에는 User 정보 삽입
val = pd.read_json('./dataset/val.json', typ = 'frame',encoding='utf-8')
song_cate = []
for i in range(len(val)):
gnr = []
songs = val.iloc[i,3]
for j in songs:
for k in song.loc[j,'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
val['plylst_genre'] = song_cate
val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])
val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
val.loc[:,'num_songs'] = val['songs'].map(len)
val.loc[:,'num_tags'] = val['tags_id'].map(len)
# val_title = cv.transform(val['plylst_title']).toarray()
gnr_val = np.zeros((len(val),n_genre))
for i,index in enumerate(val.index):
if i%10000 == 0:
print(i)
counter = Counter(val.loc[index]['plylst_genre_id'])
for (k,c) in counter.items():
gnr_val[i][k] = c
gnr_val.shape
n_val = len(val)
row = np.repeat(range(n_val), val['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in val['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, val['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)) # csr_matrix 제작
row = np.repeat(range(n_val), val['num_tags'])
col = [tag for tags in val['tags_id'] for tag in tags]
dat = np.repeat(1, val['num_tags'].sum())
val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))
val_user_songs_A_T = val_user_songs_A.T.tocsr()
val_user_tags_A_T = val_user_tags_A.T.tocsr()
test = pd.read_json('./dataset/test.json', typ = 'frame',encoding='utf-8')
song_cate = []
for i in range(len(test)):
gnr = []
songs = test.iloc[i,3]
for j in songs:
for k in song.loc[j,'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
test['plylst_genre'] = song_cate
test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])
test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
test.loc[:,'num_songs'] = test['songs'].map(len)
test.loc[:,'num_tags'] = test['tags_id'].map(len)
# test_title = cv.transform(test['plylst_title']).toarray()
gnr_test = np.zeros((len(test),n_genre))
for i,index in enumerate(test.index):
if i%10000 == 0:
print(i)
counter = Counter(test.loc[index]['plylst_genre_id'])
for (k,c) in counter.items():
gnr_test[i][k] = c
gnr_test.shape
n_test = len(test)
row = np.repeat(range(n_test), test['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in test['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, test['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_songs)) # csr_matrix 제작
row = np.repeat(range(n_test), test['num_tags'])
col = [tag for tags in test['tags_id'] for tag in tags]
dat = np.repeat(1, test['num_tags'].sum())
test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags))
test_user_songs_A_T = test_user_songs_A.T.tocsr()
test_user_tags_A_T = test_user_tags_A.T.tocsr()
data_all = pd.concat([train,val,test])
data_all.index = range(len(data_all))
arts = song['artist_id_basket'].map(lambda x : x[0])
arts = pd.DataFrame(arts)
art_counts = arts['artist_id_basket'].value_counts().reset_index()
art_counts.columns = ['artist_id_basket','counts']
arts2 = pd.merge(arts,art_counts,how='left',on=['artist_id_basket'])
song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]
song_art = song_art[['artist_id_basket']]
#아티스트 대분류
ART_cate = []
for i in tqdm_notebook(range(len(data_all))):
ART = []
songs = data_all.loc[i,'songs']
for j in songs:
if j in song_art.index :
for k in song_art.loc[j,'artist_id_basket'] :
ART.append(k)
ART_cate.append(ART)
data_all['plylst_ARTIST'] = ART_cate
plylst_ARTIST = data_all['plylst_ARTIST']
ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in ARTIST])
ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}
ARTIST_id_tid = dict()
ARTIST_tid_id = dict()
for i, t in enumerate(ARTIST_dict):
ARTIST_id_tid[t] = i
ARTIST_tid_id[i] = t
n_ARTIST = len(ARTIST_dict)
data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x: [ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])
ART_data_all = np.zeros((len(data_all),n_ARTIST))
for i,index in enumerate(data_all.index):
if i%10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])
for (k,c) in counter.items():
ART_data_all[i][k] = c
ART_data_all.shape
ART_array = ART_data_all[:len(train)]
ART_val = ART_data_all[len(train):len(train)+len(val)]
ART_test = ART_data_all[len(train)+len(val):len(train)+len(val)+len(test)]
# ART_data_all = sparse.csr_matrix(ART_data_all)
del ART_data_all
ART_array = sparse.csr_matrix(ART_array)
ART_val = sparse.csr_matrix(ART_val)
ART_test = sparse.csr_matrix(ART_test)
# song tim 내용 가져오기.
tim_cate = []
for i in tqdm_notebook(range(len(data_all))):
tim = []
songs = data_all.loc[i,'songs']
for j in songs:
tim.append(song.loc[j,'issue_date'])
tim_cate.append(tim)
data_all['plylst_times'] = tim_cate
plylst_times = data_all['plylst_times']
times_counter = Counter([tim for times in plylst_times for tim in times])
times_dict = {x: times_counter[x] for x in times_counter}
times_id_tid = dict()
times_tid_id = dict()
for i, t in enumerate(times_dict):
times_id_tid[t] = i
times_tid_id[i] = t
n_times = len(times_dict)
data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])
tim_data_all = np.zeros((len(data_all),n_times))
for i,index in enumerate(data_all.index):
if i%10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_times_id'])
for (k,c) in counter.items():
tim_data_all[i][k] = c
tim_array = tim_data_all[:len(train)]
tim_val = tim_data_all[len(train):len(train)+len(val)]
tim_test = tim_data_all[len(train)+len(val):len(train)+len(val)+len(test)]
# tim_data_all = sparse.csr_matrix(tim_data_all)
del tim_data_all
tim_array = sparse.csr_matrix(tim_array)
tim_val = sparse.csr_matrix(tim_val)
tim_test = sparse.csr_matrix(tim_test)
#장르 대분류
GEN_cate = []
for i in tqdm_notebook(range(len(data_all))):
GEN = []
songs = data_all.loc[i,'songs']
for j in songs:
for k in song.loc[j,'song_gn_gnr_basket'] :
GEN.append(k)
GEN_cate.append(GEN)
data_all['plylst_GENRE'] = GEN_cate
plylst_GENRE = data_all['plylst_GENRE']
GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])
GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}
GENRE_id_tid = dict()
GENRE_tid_id = dict()
for i, t in enumerate(GENRE_dict):
GENRE_id_tid[t] = i
GENRE_tid_id[i] = t
n_GENRE = len(GENRE_dict)
data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])
GEN_data_all = np.zeros((len(data_all),n_GENRE))
for i,index in enumerate(data_all.index):
if i%10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_GENRE_id'])
for (k,c) in counter.items():
GEN_data_all[i][k] = c
GEN_array = GEN_data_all[:len(train)]
GEN_val = GEN_data_all[len(train):len(train)+len(val)]
GEN_test = GEN_data_all[len(train)+len(val):len(train)+len(val)+len(test)]
# GEN_data_all = sparse.csr_matrix(GEN_data_all)
del GEN_data_all
GEN_array = sparse.csr_matrix(GEN_array)
GEN_val = sparse.csr_matrix(GEN_val)
GEN_test = sparse.csr_matrix(GEN_test)
content = data_all['plylst_title']
if "{}.model".format(sp_total_model_path) not in os.listdir():
makeSentencepieceModel(data_all,sp_total_model_path)
sp = SentencePieceProcessor()
sp.Load("{}.model".format(sp_total_model_path))
cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)
content = data_all['plylst_title']
tdm = cv.fit_transform(content)
title_tdm = tdm.toarray()
title_tr = title_tdm[:len(train)]
title_va = title_tdm[len(train):len(train)+len(val)]
title_ts = title_tdm[len(train)+len(val):len(train)+len(val)+len(test)]
title_gnr = np.concatenate((gnr_array,title_tr),axis=1)
val_title_gnr = np.concatenate((gnr_val,title_va),axis=1)
test_title_gnr = np.concatenate((gnr_test,title_ts),axis=1)
title_sp = sparse.csr_matrix(title_tdm)
title_gnr = sparse.csr_matrix(title_gnr)
val_title_gnr = sparse.csr_matrix(val_title_gnr)
test_title_gnr = sparse.csr_matrix(test_title_gnr)
title_gnr = vstack([title_gnr,val_title_gnr,test_title_gnr])
song_sp = vstack([train_user_songs_A,val_user_songs_A,test_user_songs_A])
tag_sp = vstack([train_user_tags_A,val_user_tags_A,test_user_tags_A])
times_sp = vstack([tim_array,tim_val,tim_test])
GEN_sp = vstack([GEN_array,GEN_val,GEN_test])
ART_sp = vstack([ART_array,ART_val,ART_test])
# song_sp_T = song_sp.T.tocsr()
# tag_sp_T = tag_sp.T.tocsr()
model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_song25.fit(song_sp)
model_knn_tag25.fit(tag_sp)
model_knn_title25.fit(title_sp)
model_knn_title_gnr25.fit(title_gnr)
model_knn_times25.fit(times_sp)
model_knn_GEN25.fit(GEN_sp)
model_knn_ART25.fit(ART_sp)
model_knn_song40.fit(song_sp)
model_knn_tag40.fit(tag_sp)
model_knn_title40.fit(title_sp)
model_knn_title_gnr40.fit(title_gnr)
model_knn_times40.fit(times_sp)
model_knn_GEN40.fit(GEN_sp)
model_knn_ART40.fit(ART_sp)
train.loc[:,'num_songs'] = train['songs'].map(len)
train.loc[:,'num_tags'] = train['tags_id'].map(len)
data_all = pd.concat([train,val,test])
data_all.index = range(len(data_all))
res = []
for i in tqdm_notebook(range(len(test))):
data = test.iloc[i]
pid = i
if len(data['songs']) >= 2 and len(data['tags_id']) >=2 :
p = np.zeros((707989,1))
p[data['songs']] = 1
pp = np.zeros((n_tags,1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
songs_already = data["songs"]
tags_already = data["tags_id"]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tag = cosine_similarity(tra_tag_sp,pp.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_tag * test_title_genre * test_tim * test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
####### 40 ####################################################
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]
row = np.repeat(range(40), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tag = cosine_similarity(tra_tag_sp,pp.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_tag * test_title_genre * test_tim * test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1,cand2,how='outer',on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y'])/2
cand_song_idx = list(cand_all.sort_values(by=['pred'],ascending=False)[:100]['index'])
######tag######
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['songs']) != 0:
p = np.zeros((707989,1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
# tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]
row = np.repeat(range(25), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
songs_already = data["songs"]
tags_already = data["tags_id"]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song*test_title_genre*test_tim*test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
####### 40 ####################################################
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(40), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_title_genre * test_tim * test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1,cand2,how='outer',on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y'])/2
cand_song_idx = list(cand_all.sort_values(by=['pred'],ascending=False)[:100]['index'])
#######tag########
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['tags_id']) !=0:
p = np.zeros((n_tags,1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_tag['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_tag['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data["songs"]
tags_already = data["tags_id"]
testi = cosine_similarity(tra_tag_sp,pp.T)
if len(data['plylst_title']) != 0 :
tra_title_gnr = title_tdm[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]]
testi_title = cosine_similarity(tra_title_gnr,title_ts[i:(i+1)])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({
"id": test.loc[pid,'id'],
"songs": list(cand_song_idx),
"tags": rec_tag_idx
})
else :
cand_song = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10].index)
res.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
for i in range(len(res)):
if len(res[i]['songs']) != 100:
print('song 에서 {}번째 오류 발생'.format(i))
if len(res[i]['tags']) != 10:
print('tag 에서 {}번째 오류 발생'.format(i))
rec = []
for i in range(len(res)):
rec.append({
"id": res[i]['id'],
"songs": list(res[i]['songs']),
"tags": res[i]['tags']
})
result1 = pd.DataFrame(rec)
model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_song.fit(song_sp)
model_knn_tag.fit(tag_sp)
model_knn_title.fit(title_sp)
model_knn_title_gnr.fit(title_gnr)
model_knn_times.fit(times_sp)
model_knn_GEN.fit(GEN_sp)
model_knn_ART.fit(ART_sp)
res2 = []
for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):
data = test.iloc[i]
pid = i
if len(data['songs']) != 0 and len(data['tags_id']) != 0:
p = np.zeros((707989,1))
p[data['songs']] = 1
pp = np.zeros((n_tags,1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
songs_already = data["songs"]
tags_already = data["tags_id"]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tag = cosine_similarity(tra_tag_sp,pp.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_tag * test_title_genre * test_GEN
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['songs']) != 0:
p = np.zeros((707989,1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data["songs"]
tags_already = data["tags_id"]
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song*test_title_genre*test_tim*test_GEN
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['tags_id']) !=0:
p = np.zeros((n_tags,1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_tag['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_tag['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data["songs"]
tags_already = data["tags_id"]
testi = cosine_similarity(tra_tag_sp,pp.T)
if len(data['plylst_title']) != 0 :
tra_title_gnr = title_tdm[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]]
testi_title = cosine_similarity(tra_title_gnr,title_ts[i:(i+1)])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
else:
cand_song = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10].index)
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
pd.DataFrame(res2)
rec2 = []
for i in range(len(res2)):
rec2.append({
"id": res2[i]['id'],
"songs": list(res2[i]['songs']),
"tags": res2[i]['tags']
})
result2 = pd.DataFrame(rec2)['songs']
n_index = [10498,6361,1960,8705,9310]
result2.index = n_index
result1.loc[n_index,'songs'] = result2
result1['songs'].apply(len).sort_values()
#그럼에도 채워지지 않은 6361에 대해서 상위 100곡 추천
s = []
for song in train.songs.tolist():
s += song
r1 = dict(Counter(s))
r_song = sorted(r1.items(), key=lambda x: -x[1])
r_song_top = r_song[:100] # 몇 곡 할지도 정해야 함
list_song = list(dict(r_song_top).keys())
len(list_song)
sub= []
for j in range(len(result1)) :
sub.append(result1.loc[j].to_dict())
sub[6361]['songs'] = list_song
pd.DataFrame(sub)['songs'].apply(len).sort_values()
write_json(sub,'final_songs.json')
return sub
if __name__ == '__main__':
_data = Dataset()
pre_tag.run(_data.test,_data.n_songs,_data.n_tags,_data.spr_list,_data.tag_tid_id)
final_tags = word2vec_for_tag.run(_data.total,_data.test)
final_songs = song_inference()
result = []
for f_songs, f_tags in zip(final_songs,final_tags):
result.append({
'id':f_songs['id'],
'songs':f_songs['songs'],
'tags':f_tags['tags']
})
write_json(result, 'results.json')
| [
0,
1,
2,
3,
4
] |
1,830 | b29f85ccf396640c2a63bf634b549a3eaa0dbb1b | <mask token>
def extract2(inp):
two = 0
while inp % 2 == 0:
inp //= 2
two += 1
return inp, two
<mask token>
def solve():
x = RSA.importKey(open('pub.pem', 'rb').read())
d0 = THE_LEAKED_PRIVATE_KEY % mod
r = 304
mod = 2 ** r
e = x.e
N = x.n
d0e = d0 * e
cnt = 0
now = timer()
total_time = 0
for k in range(1, e, 2):
print('k : ', k)
k_left, two_right = extract2(k)
k_left_1 = gmpy2.invert(k_left, mod)
left = N + 1 + k_left_1 - k_left_1 * d0e
left %= mod
_, two_left = extract2(left)
assert two_left - two_right > 0
poss_s = []
random_length = two_left - two_right - 1
poss_set = it.product('01', repeat=random_length)
poss_set = map(''.join, poss_set)
os.system('rm -rf ./ans')
for s in poss_set:
s += bin(left)[2:].rjust(r, '0')
assert len(s) == r
os.system('python3.6 ./tools_on_git/Hensel.py {} {}'.format(int
(s, 2), N))
os.system('sage ./tools_on_git/coppersmith.sage {}'.format(N))
cnt += 1
total_time += timer() - now
now = timer()
print('\tcnt : ', cnt)
print('\tavg : ', total_time * 1.0 / cnt)
if os.path.isfile('ans'):
print('answer found !')
exit()
<mask token>
| <mask token>
def extract2(inp):
two = 0
while inp % 2 == 0:
inp //= 2
two += 1
return inp, two
def genkey():
while 1:
while 1:
p = getPrime(512)
q = getPrime(512)
if len(bin(abs(p - q))[2:]) < 450 or p + q & 1 != 0:
continue
phi = (p - 1) * (q - 1)
e = 257
print(gmpy2.gcd(phi, e))
try:
d = int(gmpy2.invert(e, phi))
except:
continue
assert d * e % phi == 1
ret = (d * e - 1) // phi
break
print('d : ', d)
r = 256
mod = 2 ** r
d0 = d % mod
d0e = d0 * e
print(bin(d0e)[-10:])
if d0e & 1 << 2:
x = RSA.construct((p * q, e, d, p, q))
output = x.exportKey('PEM')
with open('pri.pem', 'w') as f:
f.write(output)
output = x.publickey().exportKey('PEM')
with open('pub.pem', 'w') as f:
f.write(output)
break
return ret
def solve():
x = RSA.importKey(open('pub.pem', 'rb').read())
d0 = THE_LEAKED_PRIVATE_KEY % mod
r = 304
mod = 2 ** r
e = x.e
N = x.n
d0e = d0 * e
cnt = 0
now = timer()
total_time = 0
for k in range(1, e, 2):
print('k : ', k)
k_left, two_right = extract2(k)
k_left_1 = gmpy2.invert(k_left, mod)
left = N + 1 + k_left_1 - k_left_1 * d0e
left %= mod
_, two_left = extract2(left)
assert two_left - two_right > 0
poss_s = []
random_length = two_left - two_right - 1
poss_set = it.product('01', repeat=random_length)
poss_set = map(''.join, poss_set)
os.system('rm -rf ./ans')
for s in poss_set:
s += bin(left)[2:].rjust(r, '0')
assert len(s) == r
os.system('python3.6 ./tools_on_git/Hensel.py {} {}'.format(int
(s, 2), N))
os.system('sage ./tools_on_git/coppersmith.sage {}'.format(N))
cnt += 1
total_time += timer() - now
now = timer()
print('\tcnt : ', cnt)
print('\tavg : ', total_time * 1.0 / cnt)
if os.path.isfile('ans'):
print('answer found !')
exit()
<mask token>
| <mask token>
def extract2(inp):
two = 0
while inp % 2 == 0:
inp //= 2
two += 1
return inp, two
def genkey():
while 1:
while 1:
p = getPrime(512)
q = getPrime(512)
if len(bin(abs(p - q))[2:]) < 450 or p + q & 1 != 0:
continue
phi = (p - 1) * (q - 1)
e = 257
print(gmpy2.gcd(phi, e))
try:
d = int(gmpy2.invert(e, phi))
except:
continue
assert d * e % phi == 1
ret = (d * e - 1) // phi
break
print('d : ', d)
r = 256
mod = 2 ** r
d0 = d % mod
d0e = d0 * e
print(bin(d0e)[-10:])
if d0e & 1 << 2:
x = RSA.construct((p * q, e, d, p, q))
output = x.exportKey('PEM')
with open('pri.pem', 'w') as f:
f.write(output)
output = x.publickey().exportKey('PEM')
with open('pub.pem', 'w') as f:
f.write(output)
break
return ret
def solve():
x = RSA.importKey(open('pub.pem', 'rb').read())
d0 = THE_LEAKED_PRIVATE_KEY % mod
r = 304
mod = 2 ** r
e = x.e
N = x.n
d0e = d0 * e
cnt = 0
now = timer()
total_time = 0
for k in range(1, e, 2):
print('k : ', k)
k_left, two_right = extract2(k)
k_left_1 = gmpy2.invert(k_left, mod)
left = N + 1 + k_left_1 - k_left_1 * d0e
left %= mod
_, two_left = extract2(left)
assert two_left - two_right > 0
poss_s = []
random_length = two_left - two_right - 1
poss_set = it.product('01', repeat=random_length)
poss_set = map(''.join, poss_set)
os.system('rm -rf ./ans')
for s in poss_set:
s += bin(left)[2:].rjust(r, '0')
assert len(s) == r
os.system('python3.6 ./tools_on_git/Hensel.py {} {}'.format(int
(s, 2), N))
os.system('sage ./tools_on_git/coppersmith.sage {}'.format(N))
cnt += 1
total_time += timer() - now
now = timer()
print('\tcnt : ', cnt)
print('\tavg : ', total_time * 1.0 / cnt)
if os.path.isfile('ans'):
print('answer found !')
exit()
solve()
| from Crypto.PublicKey import RSA
from Crypto.Util.number import *
from timeit import default_timer as timer
import os
import gmpy2
import itertools as it
def extract2(inp):
two = 0
while inp % 2 == 0:
inp //= 2
two += 1
return inp, two
def genkey():
while 1:
while 1:
p = getPrime(512)
q = getPrime(512)
if len(bin(abs(p - q))[2:]) < 450 or p + q & 1 != 0:
continue
phi = (p - 1) * (q - 1)
e = 257
print(gmpy2.gcd(phi, e))
try:
d = int(gmpy2.invert(e, phi))
except:
continue
assert d * e % phi == 1
ret = (d * e - 1) // phi
break
print('d : ', d)
r = 256
mod = 2 ** r
d0 = d % mod
d0e = d0 * e
print(bin(d0e)[-10:])
if d0e & 1 << 2:
x = RSA.construct((p * q, e, d, p, q))
output = x.exportKey('PEM')
with open('pri.pem', 'w') as f:
f.write(output)
output = x.publickey().exportKey('PEM')
with open('pub.pem', 'w') as f:
f.write(output)
break
return ret
def solve():
x = RSA.importKey(open('pub.pem', 'rb').read())
d0 = THE_LEAKED_PRIVATE_KEY % mod
r = 304
mod = 2 ** r
e = x.e
N = x.n
d0e = d0 * e
cnt = 0
now = timer()
total_time = 0
for k in range(1, e, 2):
print('k : ', k)
k_left, two_right = extract2(k)
k_left_1 = gmpy2.invert(k_left, mod)
left = N + 1 + k_left_1 - k_left_1 * d0e
left %= mod
_, two_left = extract2(left)
assert two_left - two_right > 0
poss_s = []
random_length = two_left - two_right - 1
poss_set = it.product('01', repeat=random_length)
poss_set = map(''.join, poss_set)
os.system('rm -rf ./ans')
for s in poss_set:
s += bin(left)[2:].rjust(r, '0')
assert len(s) == r
os.system('python3.6 ./tools_on_git/Hensel.py {} {}'.format(int
(s, 2), N))
os.system('sage ./tools_on_git/coppersmith.sage {}'.format(N))
cnt += 1
total_time += timer() - now
now = timer()
print('\tcnt : ', cnt)
print('\tavg : ', total_time * 1.0 / cnt)
if os.path.isfile('ans'):
print('answer found !')
exit()
solve()
| #!/usr/bin/env python2
from Crypto.PublicKey import RSA
from Crypto.Util.number import *
from timeit import default_timer as timer
import os
import gmpy2
import itertools as it
def extract2(inp):
two = 0
while inp % 2 == 0:
inp //= 2
two += 1
return inp, two
def genkey():
while 1:
while 1:
p = getPrime(512)
q = getPrime(512)
if len(bin(abs(p - q))[2:]) < 450 or (p + q) & 1 != 0:
continue
phi = (p - 1) * (q - 1)
e = 257
print (gmpy2.gcd(phi, e))
try:
d = int(gmpy2.invert(e, phi))
except:
continue
assert (d * e) % phi == 1
ret = (d*e - 1) // phi
break
print ('d : ', d)
r = 256
mod = 2 ** r
d0 = d % mod
d0e = d0 * e
print (bin(d0e)[-10:])
if d0e & (1 << 2):
x = RSA.construct((p*q, e, d, p, q))
output = x.exportKey("PEM")
with open('pri.pem', 'w') as f:
f.write(output)
output = x.publickey().exportKey("PEM")
with open('pub.pem', 'w') as f:
f.write(output)
break
return ret
def solve():
x = RSA.importKey(open('pub.pem', 'rb').read())
# LSb you can get
# you should put the leaked private key here
d0 = THE_LEAKED_PRIVATE_KEY % mod
r = 304
mod = 2 ** r
e = x.e
N = x.n
d0e = d0 * e
cnt = 0
now = timer()
total_time = 0
for k in range(1, e, 2):
print ('k : ', k)
k_left, two_right = extract2(k)
k_left_1 = gmpy2.invert(k_left, mod)
left = N + 1 + k_left_1 - k_left_1 * d0e
left %= mod
_, two_left = extract2(left)
assert two_left - two_right > 0
poss_s = []
random_length = two_left - two_right - 1
poss_set = it.product('01', repeat=random_length)
poss_set = map(''.join, poss_set)
os.system('rm -rf ./ans')
for s in poss_set:
s += bin(left)[2:].rjust(r, '0')
assert len(s) == r
# Hensel
os.system('python3.6 ./tools_on_git/Hensel.py {} {}'.format(int(s, 2), N))
# solving univariate polynomial, similar to sage's small_roots
os.system('sage ./tools_on_git/coppersmith.sage {}'.format(N))
cnt += 1
total_time += timer() - now
now = timer()
print ('\tcnt : ', cnt)
print ('\tavg : ', total_time * 1.0 / cnt)
if os.path.isfile('ans'):
print ('answer found !')
exit()
#ret = genkey()
#print ('mutiplier : ', ret)
solve()
| [
2,
3,
4,
5,
6
] |
1,831 | f1fbbbe4258d0fb0a43505f4718730934fd595ec | <mask token>
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
<mask token>
| <mask token>
server.bind(('', port))
server.listen()
<mask token>
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print('SERVER STARTED...')
recieve()
| <mask token>
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 12321
server.bind(('', port))
server.listen()
client_names = []
clients = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print('SERVER STARTED...')
recieve()
| import socket
import threading
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 12321
server.bind(('', port))
server.listen()
client_names = []
clients = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print('SERVER STARTED...')
recieve()
| import socket
import threading
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 12321
server.bind(('', port))
server.listen()
client_names = []
clients = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f"{client_name} left the chat!".encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f"Connected with {str(address)}!")
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f"Name of the client is {name}")
broadcast(f"{name} joined the chat!".encode("ascii"))
client.send("Connected to the Noob Coder's Server!".encode("ascii"))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print("SERVER STARTED...")
recieve()
| [
3,
4,
5,
6,
7
] |
1,832 | 018b9533074d2766dc5010ff9c5e70888d249b45 | <mask token>
| <mask token>
[(x * x) for x in a]
| a = range(10)
[(x * x) for x in a]
| a = range(10)
[x*x for x in a]
| null | [
0,
1,
2,
3
] |
1,833 | 7f33effa86fc3a80fce0e5e1ecf97ab4ca80402d | <mask token>
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
<mask token>
| <mask token>
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
<mask token>
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f"\nTitle: {movie_data['Title']}")
print(f"Year: {movie_data['Year']}")
print(f"Rating: {movie_data['Rated']}")
print(f"Running Time: {movie_data['Runtime']}")
print(f"\nDescription: {movie_data['Plot']}")
print('\n' + rating_msg(int(movie_data['Metascore'])), end='')
else:
print(r)
| <mask token>
BASE_URL = 'http://www.omdbapi.com/'
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
api_key = sys.argv[1]
title = input('Enter the name of a movie: ')
data = {'apikey': api_key, 't': title}
r = requests.get(BASE_URL, data)
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f"\nTitle: {movie_data['Title']}")
print(f"Year: {movie_data['Year']}")
print(f"Rating: {movie_data['Rated']}")
print(f"Running Time: {movie_data['Runtime']}")
print(f"\nDescription: {movie_data['Plot']}")
print('\n' + rating_msg(int(movie_data['Metascore'])), end='')
else:
print(r)
| import sys
import json
import requests
BASE_URL = 'http://www.omdbapi.com/'
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
api_key = sys.argv[1]
title = input('Enter the name of a movie: ')
data = {'apikey': api_key, 't': title}
r = requests.get(BASE_URL, data)
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f"\nTitle: {movie_data['Title']}")
print(f"Year: {movie_data['Year']}")
print(f"Rating: {movie_data['Rated']}")
print(f"Running Time: {movie_data['Runtime']}")
print(f"\nDescription: {movie_data['Plot']}")
print('\n' + rating_msg(int(movie_data['Metascore'])), end='')
else:
print(r)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import requests
BASE_URL = 'http://www.omdbapi.com/'
def rating_msg(rating):
if rating > 80:
return 'You should watch this movie right now!\n'
elif rating < 50:
return 'Avoid this movie at all cost!\n'
else:
return ''
api_key = sys.argv[1]
title = input('Enter the name of a movie: ')
data = {'apikey': api_key, 't': title}
r = requests.get(BASE_URL, data)
if r.status_code == requests.status_codes.codes.ok:
movie_data = json.loads(r.text)
if 'Error' in movie_data:
print(movie_data['Error'])
exit(1)
print(f'\nTitle: {movie_data["Title"]}')
print(f'Year: {movie_data["Year"]}')
print(f'Rating: {movie_data["Rated"]}')
print(f'Running Time: {movie_data["Runtime"]}')
print(f'\nDescription: {movie_data["Plot"]}')
print('\n' + rating_msg(int(movie_data['Metascore'])), end="")
else:
print(r)
| [
1,
2,
3,
4,
5
] |
1,834 | 23bcef07326db084d4e0e6337beb00faba329193 | // Time Complexity : O(n)
// Space Complexity : O(n)
// Did this code successfully run on Leetcode : Yes
// // Any problem you faced while coding this : No
// Your code here along with comments explaining your approach
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
res=[]
left=[]
right=[]
product=1
for i in range(len(nums)): # calculate all the products in left of the index element
left.append(product)
product=product*nums[i]
product=1
for i in range(len(nums)-1,-1,-1): # calculate all the products in right of the index element starting from reverse
right.insert(0,product)
product=product*nums[i]
print(left,right)
for i in range(len(nums)): # calculate result by multiplying the left and right of every index
res.append(left[i]*right[i])
return res | null | null | null | null | [
0
] |
1,835 | 7d5f41cfa2d5423c6db2678f1eb8160638b50c02 | <mask token>
class CoordinatesDataParser:
<mask token>
<mask token>
| <mask token>
class CoordinatesDataParser:
def __init__(self):
return
<mask token>
| <mask token>
class CoordinatesDataParser:
def __init__(self):
return
def get_coords(self, response):
html = response.xpath('.//body').extract_first()
longitude = re.search('-\\d+\\.\\d{5,}', html)
longitude = longitude.group() if longitude else None
if longitude:
html_section = html.split(longitude)
for section in html_section:
latitude = re.search('\\d{2}\\.\\d{5,}', section)
latitude = latitude.group() if latitude else None
if latitude:
break
return {'latitude': latitude, 'longitude': longitude}
return None
| import re
class CoordinatesDataParser:
def __init__(self):
return
def get_coords(self, response):
html = response.xpath('.//body').extract_first()
longitude = re.search('-\\d+\\.\\d{5,}', html)
longitude = longitude.group() if longitude else None
if longitude:
html_section = html.split(longitude)
for section in html_section:
latitude = re.search('\\d{2}\\.\\d{5,}', section)
latitude = latitude.group() if latitude else None
if latitude:
break
return {'latitude': latitude, 'longitude': longitude}
return None
| import re
class CoordinatesDataParser:
def __init__(self):
return
def get_coords(self, response):
html = response.xpath('.//body').extract_first()
longitude = re.search(r'-\d+\.\d{5,}', html)
longitude = longitude.group() if longitude else None
if longitude:
# split response.html into a list of 2 parts
html_section = html.split(longitude)
# find latitude based on location of longitude
for section in html_section:
latitude = re.search(r'\d{2}\.\d{5,}', section)
latitude = latitude.group() if latitude else None
if latitude:
break
return {'latitude': latitude, 'longitude': longitude}
return None
| [
1,
2,
3,
4,
5
] |
1,836 | b720a52f1c2e6e6be7c0887cd94441d248382242 | <mask token>
| <mask token>
def main():
paths = ['..', '.']
absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]
for i in absOfEntries:
print(i)
<mask token>
| <mask token>
def main():
paths = ['..', '.']
absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]
for i in absOfEntries:
print(i)
if __name__ == '__main__':
main()
<mask token>
| from joecceasy import Easy
def main():
paths = ['..', '.']
absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]
for i in absOfEntries:
print(i)
if __name__ == '__main__':
main()
<mask token>
| from joecceasy import Easy
def main():
paths = ['..','.']
absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]
for i in absOfEntries:
print( i )
if __name__=='__main__':
main()
"""
def main(maxEntries = 99):
i = -1
print( "Walker test, Walking current directory:" )
for entry in Easy.WalkAnIter( ['.'] ):
i += 1 ## because i start at -1, 1st run of line will be 0
if i > maxEntries:
break
print(entry.abs)
print( ' \n ' )
"""
#isFileByPython = os.path.isfile(entry.abs)
# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,
# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')
#end='' )
#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )
#print( entry.__dict__ ) | [
0,
1,
2,
3,
4
] |
1,837 | de884413dcbd0e89e8bfcf5657fe189156d9a661 | <mask token>
| <mask token>
setup(name='sk_processor', packages=find_packages())
| from setuptools import setup, find_packages
setup(name='sk_processor', packages=find_packages())
| from setuptools import setup, find_packages
setup(name="sk_processor", packages=find_packages()) | null | [
0,
1,
2,
3
] |
1,838 | b3bba1119bfaf0c1e684e8835259ec6fa8c42cf7 | <mask token>
| <mask token>
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)
| from text_to_word_cloud import *
from collections import Counter
from preprocess import *
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)
| from text_to_word_cloud import *
from collections import Counter
from preprocess import *
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file("cloudofw.txt",cnt,len(words),call_R=True)
| null | [
0,
1,
2,
3
] |
1,839 | c67cd3c16c15d6aab02a07736c83bbdd5bd98514 | <mask token>
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
<mask token>
| <mask token>
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [''])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == 'stream' or 'text/event-stream' in accepts:
request.content_type = 'text/event-stream'
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = 'application/json;boundary=NL'
for row in json_rows:
request.write(row + '\n')
except ImportError:
request.status_code = 501
request.write(u'abusehelper package not available')
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'")
| <mask token>
try:
import simplejson as json
except ImportError:
import json
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [''])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == 'stream' or 'text/event-stream' in accepts:
request.content_type = 'text/event-stream'
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = 'application/json;boundary=NL'
for row in json_rows:
request.write(row + '\n')
except ImportError:
request.status_code = 501
request.write(u'abusehelper package not available')
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'")
| <mask token>
from graphingwiki import values_to_form
from graphingwiki.editing import iter_metas
try:
import simplejson as json
except ImportError:
import json
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return 'data: ' + row + '\n\n'
except StopIteration:
self.done = True
return 'event: done\ndata: \n\n'
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [''])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == 'stream' or 'text/event-stream' in accepts:
request.content_type = 'text/event-stream'
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = 'application/json;boundary=NL'
for row in json_rows:
request.write(row + '\n')
except ImportError:
request.status_code = 501
request.write(u'abusehelper package not available')
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'")
| # -*- coding: utf-8 -*-"
"""
getMetaStream action for graphingwiki
- alternative meta retrieval action that uses
abuse-sa query language for filtering metas
and returns Line Delimeted JSON or event-stream
@copyright: 2015 Lauri Pokka <[email protected]>
@license: MIT <http://www.opensource.org/licenses/mit-license.php>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from graphingwiki import values_to_form
from graphingwiki.editing import iter_metas
try:
import simplejson as json
except ImportError:
import json
def metas_to_json(req, q):
def flatten(arr):
if len(arr) == 1:
return arr[0]
else:
return arr
for page, metas in iter_metas(req, q):
flattened = [(key, flatten(val)) for key, val in metas.items()]
yield json.dumps(dict(flattened + [('gwikipagename', page)]))
class MetaStreamer(object):
def __init__(self, iterator):
self.iterator = iterator
self.done = False
def read(self, *args):
if not self.done:
try:
row = self.iterator.next()
return "data: " + row + "\n\n"
except StopIteration:
self.done = True
return "event: done\ndata: \n\n"
else:
return None
def close(self):
self.done = True
def execute(pagename, request):
form = values_to_form(request.values)
query = form.get('q', [None])[0]
output_format = form.get('type', [""])[0]
try:
json_rows = metas_to_json(request, query)
accepts = unicode(request.request.accept_mimetypes)
if output_format == "stream" or "text/event-stream" in accepts:
request.content_type = "text/event-stream"
## send_file seems to be the least hacky way
## for sending streamed content in MoinMoin
request.send_file(MetaStreamer(json_rows))
else:
request.content_type = "application/json;boundary=NL"
for row in json_rows:
request.write(row + "\n")
except ImportError:
request.status_code = 501
request.write(u"abusehelper package not available")
except ValueError:
request.status_code = 400
request.write(u"invalid query '" + query + u"'") | [
5,
6,
7,
8,
9
] |
1,840 | 7f72f6a2ff0c7ceacb0f893d04c20402e850421a | <mask token>
| <mask token>
print(len(ss) - ss.count(' '))
| ss = str(input())
print(len(ss) - ss.count(' '))
| null | null | [
0,
1,
2
] |
1,841 | 1972e3733918da654cd156a500432a35a239aed4 | <mask token>
| <mask token>
for ti in range(tn):
rn, cn = [int(x) for x in input().split()]
evenRow = '-'.join(['+'] * (cn + 1))
oddRow = '.'.join(['|'] * (cn + 1))
artrn = rn * 2 + 1
print(f'Case #{ti + 1}:')
for ri in range(artrn):
defaultRow = evenRow if ri % 2 == 0 else oddRow
if ri // 2 == 0:
print('..' + defaultRow[2:])
else:
print(defaultRow)
| tn = int(input())
for ti in range(tn):
rn, cn = [int(x) for x in input().split()]
evenRow = '-'.join(['+'] * (cn + 1))
oddRow = '.'.join(['|'] * (cn + 1))
artrn = rn * 2 + 1
print(f'Case #{ti + 1}:')
for ri in range(artrn):
defaultRow = evenRow if ri % 2 == 0 else oddRow
if ri // 2 == 0:
print('..' + defaultRow[2:])
else:
print(defaultRow)
| tn=int(input())
for ti in range(tn):
#ans = work()
rn,cn = [int(x) for x in input().split()]
evenRow='-'.join(['+']*(cn+1))
oddRow='.'.join(['|']*(cn+1))
artrn = rn*2+1
print(f'Case #{ti+1}:')
for ri in range(artrn):
defaultRow = evenRow if ri%2==0 else oddRow
if ri//2==0:
print('..'+defaultRow[2:])
else:
print(defaultRow)
| null | [
0,
1,
2,
3
] |
1,842 | 07a0ba3ded8a2d4a980cfb8e3dbd6fd491ea24b0 | <mask token>
class survey:
<mask token>
class index:
def GET(self):
i = web.input(enter=None)
date = datetime.datetime.now().ctime()
hour = datetime.datetime.now().hour
return render.index(i.enter, date, hour)
<mask token>
| <mask token>
class survey:
def GET(self):
return render.survey()
class index:
def GET(self):
i = web.input(enter=None)
date = datetime.datetime.now().ctime()
hour = datetime.datetime.now().hour
return render.index(i.enter, date, hour)
<mask token>
| <mask token>
class survey:
def GET(self):
return render.survey()
class index:
def GET(self):
i = web.input(enter=None)
date = datetime.datetime.now().ctime()
hour = datetime.datetime.now().hour
return render.index(i.enter, date, hour)
if __name__ == '__main__':
app = web.application(urls, globals())
app.run()
| import web
import datetime
urls = '/', 'index', '/survey', 'survey'
render = web.template.render('templates/')
class survey:
def GET(self):
return render.survey()
class index:
def GET(self):
i = web.input(enter=None)
date = datetime.datetime.now().ctime()
hour = datetime.datetime.now().hour
return render.index(i.enter, date, hour)
if __name__ == '__main__':
app = web.application(urls, globals())
app.run()
| import web
import datetime
# run with sudo to run on port 80 and use GPIO
urls = ('/', 'index', '/survey', 'survey')
render = web.template.render('templates/')
class survey:
def GET(self):
return render.survey()
class index:
def GET(self):
i = web.input(enter=None)
#if i.enter=='allow':
# do something
#elif i.enter=='deny':
# do something else
date = datetime.datetime.now().ctime()
hour = datetime.datetime.now().hour
return render.index(i.enter, date, hour)
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
| [
3,
4,
5,
7,
8
] |
1,843 | d07046e33bbfa404c354fef3e8990a3fa0203060 | #Task 4 - writing a code that prints all the commit message from repository
import requests
r = requests.get('https://api.github.com/repos/smeiklej/secu2002_2017/commits')
text = r.json()
#asking the code to print out the commit message for all rows in the text
for row in text:
print row['commit']['message']
| null | null | null | null | [
0
] |
1,844 | 72d41f939a586fbd8459927983d9d62a96b650e2 | <mask token>
class IntegratedRegressor:
<mask token>
<mask token>
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
<mask token>
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
| <mask token>
class IntegratedRegressor:
<mask token>
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
<mask token>
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
| <mask token>
class IntegratedRegressor:
<mask token>
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
def predict(self, X):
pred = np.zeros((X.shape[0],))
for reg in self.regs:
if self.predict_log:
pred += np.expm1(reg.predict(X))
else:
pred += reg.predict(X)
return np.intp(pred.round())
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
| from __future__ import division, print_function
import numpy as np
from copy import deepcopy
class IntegratedRegressor:
regs = []
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
def predict(self, X):
pred = np.zeros((X.shape[0],))
for reg in self.regs:
if self.predict_log:
pred += np.expm1(reg.predict(X))
else:
pred += reg.predict(X)
return np.intp(pred.round())
class DayNightRegressor:
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0
].index.tolist()
return np.intp([x for _, x in sorted(zip(idx, pred))])
| from __future__ import division, print_function
import numpy as np
from copy import deepcopy
class IntegratedRegressor():
regs = []
def __init__(self, reg, predict_log=True):
self.reg = reg
self.predict_log = predict_log
def fit(self, X, y):
self.regs = []
for target in y.columns:
tmp = deepcopy(self.reg)
if self.predict_log:
tmp.fit(X, np.log1p(y[target]))
else:
tmp.fit(X, y[target])
self.regs.append(tmp)
def predict(self, X):
pred = np.zeros((X.shape[0],))
for reg in self.regs:
if self.predict_log:
pred += np.expm1(reg.predict(X))
else:
pred += reg.predict(X)
return np.intp(pred.round())
class DayNightRegressor():
def __init__(self, reg):
self.night_reg = deepcopy(reg)
self.day_reg = deepcopy(reg)
def fit(self, X, y):
self.night_reg.fit(X[X['night'] == 1], y[X['night'] == 1])
self.day_reg.fit(X[X['night'] == 0], y[X['night'] == 0])
def predict(self, X):
pred = []
pred = np.append(pred, self.night_reg.predict(X[X['night'] == 1]))
pred = np.append(pred, self.day_reg.predict(X[X['night'] == 0]))
idx = X[X['night'] == 1].index.tolist() + X[X['night'] == 0].index.tolist()
return np.intp([x for (_, x) in sorted(zip(idx, pred))])
| [
6,
7,
8,
10,
11
] |
1,845 | 6946601050802aaaa559d25612d0d4f5116559eb | <mask token>
def article_list(request):
articles = Article.objects.all()
return render(request, 'board/list.html', {'articles': articles})
def article_detail(request, article_id):
article = get_object_or_404(Article, id=article_id)
comments = article.comment_set.all()
return render(request, 'board/detail.html', {'article': article,
'comments': comments})
<mask token>
def update_article(request, article_id):
if request.method == 'GET':
article = get_object_or_404(Article, id=article_id)
return render(request, 'board/edit.html', {'article': article})
else:
article = get_object_or_404(Article, id=article_id)
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
<mask token>
def create_comment(request, article_id):
if request.method == 'POST':
comment = Comment()
comment.article = get_object_or_404(Article, id=article_id)
comment.content = request.POST.get('comment')
comment.save()
return redirect('board:article_detail', article_id)
def delete_comment(request, article_id, comment_id):
if request.method == 'POST':
comment = get_object_or_404(Comment, id=comment_id)
comment.delete()
return redirect('board:article_detail', article_id)
| <mask token>
def article_list(request):
articles = Article.objects.all()
return render(request, 'board/list.html', {'articles': articles})
def article_detail(request, article_id):
article = get_object_or_404(Article, id=article_id)
comments = article.comment_set.all()
return render(request, 'board/detail.html', {'article': article,
'comments': comments})
def create_article(request):
if request.method == 'GET':
return render(request, 'board/new.html')
else:
article = Article()
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
def update_article(request, article_id):
if request.method == 'GET':
article = get_object_or_404(Article, id=article_id)
return render(request, 'board/edit.html', {'article': article})
else:
article = get_object_or_404(Article, id=article_id)
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
<mask token>
def create_comment(request, article_id):
if request.method == 'POST':
comment = Comment()
comment.article = get_object_or_404(Article, id=article_id)
comment.content = request.POST.get('comment')
comment.save()
return redirect('board:article_detail', article_id)
def delete_comment(request, article_id, comment_id):
if request.method == 'POST':
comment = get_object_or_404(Comment, id=comment_id)
comment.delete()
return redirect('board:article_detail', article_id)
| <mask token>
def article_list(request):
articles = Article.objects.all()
return render(request, 'board/list.html', {'articles': articles})
def article_detail(request, article_id):
article = get_object_or_404(Article, id=article_id)
comments = article.comment_set.all()
return render(request, 'board/detail.html', {'article': article,
'comments': comments})
def create_article(request):
if request.method == 'GET':
return render(request, 'board/new.html')
else:
article = Article()
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
def update_article(request, article_id):
if request.method == 'GET':
article = get_object_or_404(Article, id=article_id)
return render(request, 'board/edit.html', {'article': article})
else:
article = get_object_or_404(Article, id=article_id)
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
def delete_article(request, article_id):
if request.method == 'POST':
article = get_object_or_404(Article, id=article_id)
article.delete()
return redirect('board:article_list')
def create_comment(request, article_id):
if request.method == 'POST':
comment = Comment()
comment.article = get_object_or_404(Article, id=article_id)
comment.content = request.POST.get('comment')
comment.save()
return redirect('board:article_detail', article_id)
def delete_comment(request, article_id, comment_id):
if request.method == 'POST':
comment = get_object_or_404(Comment, id=comment_id)
comment.delete()
return redirect('board:article_detail', article_id)
| from django.shortcuts import render, redirect, get_object_or_404
from .models import Article, Comment
def article_list(request):
articles = Article.objects.all()
return render(request, 'board/list.html', {'articles': articles})
def article_detail(request, article_id):
article = get_object_or_404(Article, id=article_id)
comments = article.comment_set.all()
return render(request, 'board/detail.html', {'article': article,
'comments': comments})
def create_article(request):
if request.method == 'GET':
return render(request, 'board/new.html')
else:
article = Article()
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
def update_article(request, article_id):
if request.method == 'GET':
article = get_object_or_404(Article, id=article_id)
return render(request, 'board/edit.html', {'article': article})
else:
article = get_object_or_404(Article, id=article_id)
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
def delete_article(request, article_id):
if request.method == 'POST':
article = get_object_or_404(Article, id=article_id)
article.delete()
return redirect('board:article_list')
def create_comment(request, article_id):
if request.method == 'POST':
comment = Comment()
comment.article = get_object_or_404(Article, id=article_id)
comment.content = request.POST.get('comment')
comment.save()
return redirect('board:article_detail', article_id)
def delete_comment(request, article_id, comment_id):
if request.method == 'POST':
comment = get_object_or_404(Comment, id=comment_id)
comment.delete()
return redirect('board:article_detail', article_id)
| from django.shortcuts import render, redirect, get_object_or_404
from .models import Article, Comment
# from IPython import embed
# Create your views here.
def article_list(request):
articles = Article.objects.all()
return render(request, 'board/list.html', {
'articles': articles,
})
def article_detail(request, article_id):
article = get_object_or_404(Article, id=article_id)
comments = article.comment_set.all()
return render(request, 'board/detail.html', {
'article': article,
'comments': comments,
})
# def new_article(request):
# return render(request, 'board/new.html')
def create_article(request):
if request.method == 'GET':
return render(request, 'board/new.html')
else: # request.method == 'POST'
article = Article()
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
# def edit_article(request, article_id):
# pass
def update_article(request, article_id):
if request.method == 'GET':
article = get_object_or_404(Article, id=article_id)
return render(request, 'board/edit.html', {
'article': article,
})
else: # request.method == 'POST'
article = get_object_or_404(Article, id=article_id)
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.save()
return redirect('board:article_detail', article.id)
def delete_article(request, article_id):
if request.method == 'POST':
article = get_object_or_404(Article, id=article_id)
article.delete()
return redirect('board:article_list')
def create_comment(request, article_id):
if request.method == 'POST':
comment = Comment()
comment.article = get_object_or_404(Article, id=article_id)
comment.content = request.POST.get('comment')
comment.save()
return redirect('board:article_detail', article_id)
def delete_comment(request, article_id, comment_id):
if request.method == 'POST':
comment = get_object_or_404(Comment, id=comment_id)
comment.delete()
return redirect('board:article_detail', article_id)
| [
5,
6,
7,
8,
9
] |
1,846 | 81da2aab9ca11e63dafdd4eefc340d37b326fc6f | <mask token>
| <mask token>
def PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):
PlotSettings()
t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions, t_array.size), dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')
f[j, :] = f_lambdify(t_array)
fig, ax = plt.subplots(figsize=(7, 4.8))
for j in range(NumFunctions):
ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +
StartFunctionIndex))
ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)
ax.set_xlim([t_array[0], t_array[-1]])
ax.set_ylim([-1, 1])
ax.set_yticks([-1, 0, 1])
ax.set_xlabel('$t$')
ax.set_ylabel('$\\phi_i^{\\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
SaveDir = os.path.join(ParentDirectory, 'doc', 'images')
if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):
SaveDir = os.getcwd()
if os.access(SaveDir, os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')
plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')
print('')
print('Plot saved to "%s".' % SaveFullname_SVG)
print('Plot saved to "%s".' % SaveFullname_PDF)
else:
print('Cannot save plot to %s. Directory is not writable.' % SaveDir)
if matplotlib.get_backend() != 'agg':
plt.show()
| <mask token>
def PlotSettings():
"""
General settings for the plot.
"""
import seaborn as sns
sns.set(font_scale=1.2)
if find_executable('latex'):
plt.rc('text', usetex=True)
matplotlib.font_manager._rebuild()
sns.set_style('white')
sns.set_style('ticks')
plt.rc('font', family='serif')
plt.rcParams['svg.fonttype'] = 'none'
def PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):
PlotSettings()
t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions, t_array.size), dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')
f[j, :] = f_lambdify(t_array)
fig, ax = plt.subplots(figsize=(7, 4.8))
for j in range(NumFunctions):
ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +
StartFunctionIndex))
ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)
ax.set_xlim([t_array[0], t_array[-1]])
ax.set_ylim([-1, 1])
ax.set_yticks([-1, 0, 1])
ax.set_xlabel('$t$')
ax.set_ylabel('$\\phi_i^{\\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
SaveDir = os.path.join(ParentDirectory, 'doc', 'images')
if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):
SaveDir = os.getcwd()
if os.access(SaveDir, os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')
plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')
print('')
print('Plot saved to "%s".' % SaveFullname_SVG)
print('Plot saved to "%s".' % SaveFullname_PDF)
else:
print('Cannot save plot to %s. Directory is not writable.' % SaveDir)
if matplotlib.get_backend() != 'agg':
plt.show()
| import os
import sympy
import numpy
from distutils.spawn import find_executable
import matplotlib
import matplotlib.pyplot as plt
from .Declarations import n, t
def PlotSettings():
"""
General settings for the plot.
"""
import seaborn as sns
sns.set(font_scale=1.2)
if find_executable('latex'):
plt.rc('text', usetex=True)
matplotlib.font_manager._rebuild()
sns.set_style('white')
sns.set_style('ticks')
plt.rc('font', family='serif')
plt.rcParams['svg.fonttype'] = 'none'
def PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):
PlotSettings()
t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions, t_array.size), dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')
f[j, :] = f_lambdify(t_array)
fig, ax = plt.subplots(figsize=(7, 4.8))
for j in range(NumFunctions):
ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +
StartFunctionIndex))
ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)
ax.set_xlim([t_array[0], t_array[-1]])
ax.set_ylim([-1, 1])
ax.set_yticks([-1, 0, 1])
ax.set_xlabel('$t$')
ax.set_ylabel('$\\phi_i^{\\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
SaveDir = os.path.join(ParentDirectory, 'doc', 'images')
if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):
SaveDir = os.getcwd()
if os.access(SaveDir, os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')
plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')
print('')
print('Plot saved to "%s".' % SaveFullname_SVG)
print('Plot saved to "%s".' % SaveFullname_PDF)
else:
print('Cannot save plot to %s. Directory is not writable.' % SaveDir)
if matplotlib.get_backend() != 'agg':
plt.show()
| # =======
# Imports
# =======
import os
import sympy
import numpy
from distutils.spawn import find_executable
import matplotlib
import matplotlib.pyplot as plt
from .Declarations import n,t
# =============
# Plot Settings
# =============
def PlotSettings():
"""
General settings for the plot.
"""
# Color palette
import seaborn as sns
# sns.set()
# Axes font size
sns.set(font_scale=1.2)
# LaTeX
if find_executable('latex'):
plt.rc('text',usetex=True)
matplotlib.font_manager._rebuild()
# Style sheet
sns.set_style("white")
sns.set_style("ticks")
# Font (Note: this should be AFTER the plt.style.use)
plt.rc('font', family='serif')
plt.rcParams['svg.fonttype'] = 'none' # text in svg file will be text not path.
# ==============
# Plot Functions
# ==============
def PlotFunctions(phi_orthonormalized_list,StartFunctionIndex,Interval):
# Run plot settings
PlotSettings()
# Axis
t_array = numpy.logspace(-7,numpy.log10(Interval[1]),1000)
# Evaluate functions
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions,t_array.size),dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t,phi_orthonormalized_list[j],'numpy')
f[j,:] = f_lambdify(t_array)
# Plot
fig,ax = plt.subplots(figsize=(7,4.8))
for j in range(NumFunctions):
ax.semilogx(t_array,f[j,:],label=r'$i = %d$'%(j+StartFunctionIndex))
ax.legend(ncol=3,loc='lower left',borderpad=0.5,frameon=False)
ax.set_xlim([t_array[0],t_array[-1]])
ax.set_ylim([-1,1])
ax.set_yticks([-1,0,1])
ax.set_xlabel(r'$t$')
ax.set_ylabel(r'$\phi_i^{\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
# Get the root directory of the package (parent directory of this script)
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
# Try to save in the doc/images dirctory. Check if exists and writable
SaveDir = os.path.join(ParentDirectory,'doc','images')
if (not os.path.isdir(SaveDir)) or (not os.access(SaveDir,os.W_OK)):
# Write in the current working directory
SaveDir = os.getcwd()
# Save plot in both svg and pdf format
if os.access(SaveDir,os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir,'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir,'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG,transparent=True,bbox_inches='tight')
plt.savefig(SaveFullname_PDF,transparent=True,bbox_inches='tight')
print('')
print('Plot saved to "%s".'%(SaveFullname_SVG))
print('Plot saved to "%s".'%(SaveFullname_PDF))
else:
print('Cannot save plot to %s. Directory is not writable.'%SaveDir)
# If no display backend is enabled, do not plot in the interactive mode
if matplotlib.get_backend() != 'agg':
plt.show()
| [
0,
1,
2,
3,
4
] |
1,847 | 3bfe4021d5cf9bd24c0fb778b252bc04c6ac47ed | <mask token>
class Bitcoin:
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Bitcoin:
<mask token>
def __init__(self):
pass
<mask token>
<mask token>
| <mask token>
class Bitcoin:
<mask token>
def __init__(self):
pass
def get_current_price(self, url=coindesk):
self.resp = requests.get(url)
if self.resp.status_code == 200:
return json.loads(self.resp.content.decode('utf-8'))
else:
return None
def float_price(self, json_response):
if json_response is not None:
rate = json_response['bpi']['EUR']['rate_float']
try:
return float(rate)
except:
return None
else:
return None
| <mask token>
class Bitcoin:
coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'
def __init__(self):
pass
def get_current_price(self, url=coindesk):
self.resp = requests.get(url)
if self.resp.status_code == 200:
return json.loads(self.resp.content.decode('utf-8'))
else:
return None
def float_price(self, json_response):
if json_response is not None:
rate = json_response['bpi']['EUR']['rate_float']
try:
return float(rate)
except:
return None
else:
return None
| import json
import requests
class Bitcoin:
coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'
def __init__(self):
pass
def get_current_price(self, url=coindesk):
self.resp = requests.get(url)
if self.resp.status_code == 200:
return json.loads(self.resp.content.decode('utf-8'))
else:
return None
def float_price(self, json_response):
if json_response is not None:
rate = json_response['bpi']['EUR']['rate_float']
try:
return float(rate)
except:
return None
else:
return None
| [
1,
2,
4,
5,
6
] |
1,848 | 1dd62264aafe8ee745a3cfdfb994ac6a40c1af42 | <mask token>
class LogisticRegression:
<mask token>
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
<mask token>
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
<mask token>
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
<mask token>
<mask token>
<mask token>
| <mask token>
class LogisticRegression:
<mask token>
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.
max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return ('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
<mask token>
| <mask token>
matplotlib.use('TkAgg')
<mask token>
class LogisticRegression:
"""LogisticRegression for binary classification
max_iter: the maximum iteration times for training
learning_rate: learing rate for gradiend decsend training
Input's shape should be [sample_nums, data_dims]
attrs:
max_iter
learning_rate
(after fit)
w
b
costs
methods:
fit
predict
predict_proba
score
"""
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.
max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return ('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
if __name__ == '__main__':
from sklearn import datasets
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
x = data.data[:100, [0, 1]]
y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train, y_train.reshape(len(y_train), -1), True)
print('Train Score:{}'.format(model.score(x_train, y_train)))
print('Test Score:{}'.format(model.score(x_test, y_test)))
plt.subplot(211)
x_samples = np.linspace(4, 7, 500)
y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]
plt.plot(x_samples, y_samples, 'r')
plt.scatter(x[:50, 0], x[:50, 1], label='negative')
plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('LosRes Results on iris datasets')
plt.legend()
plt.subplots_adjust(hspace=0.5, wspace=0.25)
plt.subplot(212)
plt.plot(range(len(model.costs)), model.costs, '-o')
plt.xlabel('steps')
plt.ylabel('loss')
plt.title('loss function')
plt.show()
| import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
class LogisticRegression:
"""LogisticRegression for binary classification
max_iter: the maximum iteration times for training
learning_rate: learing rate for gradiend decsend training
Input's shape should be [sample_nums, data_dims]
attrs:
max_iter
learning_rate
(after fit)
w
b
costs
methods:
fit
predict
predict_proba
score
"""
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.
max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return ('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
if __name__ == '__main__':
from sklearn import datasets
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
x = data.data[:100, [0, 1]]
y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train, y_train.reshape(len(y_train), -1), True)
print('Train Score:{}'.format(model.score(x_train, y_train)))
print('Test Score:{}'.format(model.score(x_test, y_test)))
plt.subplot(211)
x_samples = np.linspace(4, 7, 500)
y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]
plt.plot(x_samples, y_samples, 'r')
plt.scatter(x[:50, 0], x[:50, 1], label='negative')
plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('LosRes Results on iris datasets')
plt.legend()
plt.subplots_adjust(hspace=0.5, wspace=0.25)
plt.subplot(212)
plt.plot(range(len(model.costs)), model.costs, '-o')
plt.xlabel('steps')
plt.ylabel('loss')
plt.title('loss function')
plt.show()
| import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
class LogisticRegression:
'''LogisticRegression for binary classification
max_iter: the maximum iteration times for training
learning_rate: learing rate for gradiend decsend training
Input's shape should be [sample_nums, data_dims]
attrs:
max_iter
learning_rate
(after fit)
w
b
costs
methods:
fit
predict
predict_proba
score
'''
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'.format(
self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert (w.shape == (dim, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert (dw.shape == w.shape)
assert (db.dtype == float)
cost = np.squeeze(cost)
assert (cost.shape == ())
grads = {'dw': dw,
'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter+1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f'%(i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return 'LogisticRegression Model(learning_rate={}, max_iteration={})'.format(
self.learning_rate, self.max_iter)
if __name__ == '__main__':
from sklearn import datasets
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
x = data.data[:100, [0,1]]
# x = np.hstack([np.ones((100, 1)), x])
y = np.array([1 if i > 0 else 0 for i in data.target[:100]])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train, y_train.reshape(len(y_train), -1), True)
print('Train Score:{}'.format(model.score(x_train, y_train)))
print('Test Score:{}'.format(model.score(x_test, y_test)))
plt.subplot(211)
x_samples = np.linspace(4, 7, 500)
y_samples = (- model.b - model.w[0]*x_samples) / model.w[1]
plt.plot(x_samples, y_samples, 'r')
plt.scatter(x[:50, 0], x[:50, 1], label='negative')
plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('LosRes Results on iris datasets')
plt.legend()
plt.subplots_adjust(hspace=0.5,wspace=0.25)
plt.subplot(212)
plt.plot(range(len(model.costs)), model.costs, '-o')
plt.xlabel('steps')
plt.ylabel('loss')
plt.title('loss function')
plt.show() | [
7,
11,
13,
14,
15
] |
1,849 | aa817b86e26cf8cd9771aeb276914a1f5869c737 | #!/usr/bin/python
import csv
import sys
import os.path
#Versao 2 do gerador das RawZones
#global
DIR_PYGEN = "/bid/_temporario_/pythonGeneration/"
DIR_INTGR_PAR = "/bid/integration_layer/par/"
DIR_INTGR_JOB = "/bid/integration_layer/job/"
def Sqoop(filename,source_database,source_table, split_field, sourcesystem, tablename):
nomearquivo_INI = DIR_INTGR_PAR + filename + ".ini"
nomearquivo_SH = DIR_INTGR_JOB + filename + ".sh"
with open(DIR_PYGEN + "Par_Examples/HD008.ini") as f, open(nomearquivo_INI, "w") as f1:
for line in f:
if "# HD008.ini" in line:
Modifiedline = "# " + filename + ".ini\n"
f1.write(Modifiedline)
elif "export SOURCE_DATABASE=" in line:
Modifiedline2 = "export SOURCE_DATABASE=\"" + source_database + "\"\n"
f1.write(Modifiedline2)
elif "export SOURCE_TABLE=" in line:
Modifiedline = "export SOURCE_TABLE=\"" + source_table + "\"\n"
f1.write(Modifiedline)
elif "export SPLIT_FIELD=" in line:
Modifiedline = "export SPLIT_FIELD=\"" + split_field + "\"\n"
f1.write(Modifiedline)
elif "export SOURCESYSTEM=" in line:
Modifiedline = "export SOURCESYSTEM=\"" + sourcesystem + "\"\n"
f1.write(Modifiedline)
elif "export TABLENAME=" in line:
Modifiedline = "export TABLENAME=\"" + tablename + "\"\n"
f1.write(Modifiedline)
else:
f1.write(line)
print "01 : Created the ini file " + nomearquivo_INI + " sucessfully."
GenerateSH("01",nomearquivo_SH,DIR_PYGEN + "Job_Examples/HD008.sh")
def TransferirArquivos(filename,database,tablename):
nomearquivo_INI = DIR_INTGR_PAR + filename + ".ini"
nomearquivo_SH = DIR_INTGR_JOB + filename + ".sh"
with open(DIR_PYGEN + "Par_Examples/HD001.ini") as f, open(nomearquivo_INI, "w") as f1:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + filename + ".ini\n"
f1.write(Modifiedline)
elif "export DATABASE=" in line:
Modifiedline = "export DATABASE=\"" + database + "\"\n"
f1.write(Modifiedline)
elif "export TABLENAME=" in line:
Modifiedline2 = "export TABLENAME=\"" + tablename + "\"\n"
f1.write(Modifiedline2)
else:
f1.write(line)
print "01 : Created the " + nomearquivo_INI + " sucessfully."
GenerateSH("01",nomearquivo_SH,DIR_PYGEN + "Job_Examples/HD001.sh")
def CargaRawZone(filename,database,tablename):
nomearquivo_INI = DIR_INTGR_PAR + filename + ".ini"
nomearquivo_SH = DIR_INTGR_JOB + filename + ".sh"
with open(DIR_PYGEN + "Par_Examples/HD002.ini") as f, open(nomearquivo_INI, "w") as f1:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + filename + ".ini\n"
f1.write(Modifiedline)
elif "export DATABASE=" in line:
Modifiedline = "export DATABASE=\"" + database + "\"\n"
f1.write(Modifiedline)
elif "export TABLENAME=" in line:
Modifiedline2 = "export TABLENAME=\"" + tablename + "\"\n"
f1.write(Modifiedline2)
else:
f1.write(line)
print "02 : Created the " + nomearquivo_INI + " sucessfully."
GenerateSH("02",nomearquivo_SH,DIR_PYGEN + "Job_Examples/HD002.sh")
def Profile(filename,database,tablename,columnsnames):
nomearquivo_INI = DIR_INTGR_PAR + filename + ".ini"
nomearquivo_SH = DIR_INTGR_JOB + filename + ".sh"
with open(DIR_PYGEN + "Par_Examples/HD003.ini") as f, open(nomearquivo_INI, "w") as f1:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + filename + ".ini\n"
f1.write(Modifiedline)
elif "export DATABASE=" in line:
Modifiedline = "export DATABASE=\"" + database + "\"\n"
f1.write(Modifiedline)
elif "export TABLENAME=" in line:
Modifiedline2 = "export TABLENAME=\"" + tablename + "\"\n"
f1.write(Modifiedline2)
elif "export COLUNA=" in line:
Modifiedline2 = "export COLUNA=\"" + columnsnames + "\"\n"
f1.write(Modifiedline2)
else:
f1.write(line)
print "03 : Created the " + nomearquivo_INI + " sucessfully."
GenerateSH("03",nomearquivo_SH,DIR_PYGEN + "Job_Examples/HD003.sh")
def Stats(filename,database,tablename,opcao):
nomearquivo_INI = DIR_INTGR_PAR + filename + ".ini"
nomearquivo_SH = DIR_INTGR_JOB + filename + ".sh"
with open(DIR_PYGEN + "Par_Examples/HD004.ini") as f, open(nomearquivo_INI, "w") as f1:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + filename + ".ini\n"
f1.write(Modifiedline)
elif "export DATABASE=" in line:
Modifiedline = "export DATABASE=\"" + database + "\"\n"
f1.write(Modifiedline)
elif "export TABLENAME=" in line:
Modifiedline2 = "export TABLENAME=\"" + tablename + "\"\n"
f1.write(Modifiedline2)
elif "export TP_CHAMADA=" in line:
Modifiedline2 = "export TP_CHAMADA=\"" + opcao + "\"\n"
f1.write(Modifiedline2)
else:
f1.write(line)
print " Created the " + nomearquivo_INI + " sucessfully."
GenerateSH("04",nomearquivo_SH,DIR_PYGEN + "Job_Examples/HD004.sh")
FourStepIntegrated(filename)
def GenerateSH(stepNumber,nomearquivo_SH,moduleExample_sh):
with open(moduleExample_sh) as f2, open(nomearquivo_SH,"w") as sh:
for line in f2:
sh.write(line)
print "Created the " + nomearquivo_SH + " sucessfully."
def Refined_Trusted(filename,database,tablename,qtd_steps):
qtd = 0
if 1 == qtd_steps:
OneStepIntegrated(filename)
if 2 == qtd_steps:
TwoStepIntegrated(filename)
if 3 == qtd_steps:
ThreeStepIntegrated(filename)
if 4 == qtd_steps:
FiveStepIntegrated(filename)
if 5 == qtd_steps:
FiveStepIntegrated(filename)
if 6 == qtd_steps:
SixStepIntegrated(filename)
if 7 == qtd_steps:
SevenStepIntegrated(filename)
if 8 == qtd_steps:
EightStepIntegrated(filename)
if 9 == qtd_steps:
NineStepIntegrated(filename)
if qtd_steps > 9:
maisde10steps(filename,qtd_steps)
while (qtd < int(qtd_steps)):
qtd = 1 + qtd
nomearquivo_INI = DIR_INTGR_PAR + filename + "{0:02d}".format(qtd) + ".ini"
nomearquivo_SH = DIR_INTGR_JOB + filename + "{0:02d}".format(qtd) + ".sh"
with open(DIR_PYGEN + "Par_Examples/HD007.ini") as f, open(nomearquivo_INI, "w") as f1:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + filename + ".ini\n"
f1.write(Modifiedline)
elif "export DATABASE=" in line:
Modifiedline = "export DATABASE=\"" + database + "\"\n"
f1.write(Modifiedline)
elif "export TABLENAME=" in line:
Modifiedline2 = "export TABLENAME=\"" + tablename + "\"\n"
f1.write(Modifiedline2)
else:
f1.write(line)
GenerateSH("04",nomearquivo_SH,DIR_PYGEN + "Job_Examples/HD007.sh")
if qtd == qtd_steps:
break
def OneStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_1Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def TwoStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_2Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def ThreeStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_3Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step03" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "03" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def FourStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_4Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step03" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "03" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step04" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "04" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def FiveStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_5Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step03" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "03" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step04" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "04" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step05" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "05" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def SixStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_6Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step03" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "03" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step04" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "04" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step05" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "05" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step06" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "06" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def SevenStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_7Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step03" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "03" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step04" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "04" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step05" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "05" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step06" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "06" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step07" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "07" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def EightStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_8Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step03" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "03" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step04" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "04" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step05" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "05" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step06" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "06" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step07" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "07" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step08" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "08" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def NineStepIntegrated(filename):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
with open(DIR_PYGEN + "template/Template_9Job.sh") as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step01" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "01" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step02" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "02" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step03" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "03" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step04" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "04" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step05" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "05" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step06" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "06" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step07" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "07" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step08" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "08" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
elif "/step09" in line:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + fname + "09" + ".sh ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
else:
ag.write(line)
print "Created the directory " + DIR_INTGR_JOB + fname + ".sh"
def maisde10steps(filename,qtd_steps):
fname = filename[:8]
filenamecompled = DIR_INTGR_JOB + fname + ".sh"
steps, step_n, flg_replace_steps = range(1,qtd_steps+1),0,True
if not os.path.exists(DIR_PYGEN + "template/Template_{0}Job.sh".format(qtd_steps)):
with open(DIR_PYGEN + "template/Template_{0:02d}Job.sh".format(qtd_steps),"w+") as f:
f.write("#\n")
f.write("# SCRIPTNAMEGOESHERE\n")
f.write("#\n")
f.write("# AUTOR........: Fabricio Quiles - [email protected]\n")
f.write("# DATA.........: 18/01/2018\n")
f.write("# VERSAO.......: 1.0\n")
f.write("# COMENTARIO...: Criacao de shell\n")
f.write("#\n")
f.write("# OBJETIVO: Job para execucao com STEP\n")
f.write("#\n")
f.write("# OBSERVACOES: Parametros de chamada:\n")
f.write("#\n")
f.write("# 1 - Movto do tws formato YYYYMMDD\n")
f.write("# 2 - Sistema (verificar se pode setar no proprio TWS)\n")
f.write("# 3 - Diretorio Job (verificar se pode setar no proprio TWS)\n")
f.write("# 4 - Diretorio arquivo configuracao global (verificar se pode setar no proprio TWS)\n")
f.write("# 5 - Step a ser executado (opcional)\n")
f.write("###################################################################################################################################\n")
f.write("\n")
f.write("echo Inicio do processamento - `date`\n")
f.write("echo --------------------------------------------------------------------------\n")
f.write("\n")
f.write("export MOVTO=$1\n")
f.write("export HOME_BID_COMMON_PAR=$2\n")
f.write("export JOBNAME=$(basename $0 .sh)\n")
f.write("export step=$3\n")
f.write("source ${HOME_BID_COMMON_PAR}/config_global.ini\n")
f.write("source ${HOME_BID_COMMON_FUNCTION}/function.cfg\n")
f.write("source ${HOME_BID_COMMON_PAR}/config_local.ini\n")
f.write("\n")
f.write("bn=$0\n")
f.write("\n")
f.write("if [ \"$step\" == \"\" ]; then\n")
f.write("\n")
f.write(" echo \"Iniciando script $bn - MOVTO $MOVTO a partir do inicio\"\n")
f.write("\n")
f.write("else\n")
f.write("\n")
f.write(" echo \"Iniciando script $bn - MOVTO $MOVTO a partir do step $step\"\n")
f.write("\n")
f.write("fi\n")
f.write("\n")
f.write("case $step in\n")
f.write("\n")
f.write("\"\")\n")
f.write("\n")
f.write(" echo Inicio do script $bn;&\n")
f.write("\n")
f.write("\"step00\")\n")
f.write("\n")
f.write(" echo Inicio do step step00 - `date`\n")
f.write(" echo Testando se existe arquivo de controle ${JOBNAME}.ok ....\n")
f.write("\n")
f.write(" if [ -f \"${HOME_INTEGRATION_LAYER_JOB}/${JOBNAME}.ok\" ]; then\n")
f.write("\n")
f.write(" echo Arquivo ${JOBNAME}.ok encontrado em ${HOME_INTEGRATION_LAYER_JOB}.\n")
f.write(" echo Removendo arquivo de controle...\n")
f.write("\n")
f.write(" rm ${HOME_INTEGRATION_LAYER_JOB}/${JOBNAME}.ok\n")
f.write(" status=$?\n")
f.write("\n")
f.write(" if [ ${status} -ne 0 ]; then\n")
f.write("\n")
f.write(" echo Final do step step00 com erro - `date`\n")
f.write(" echo Falha ao tentar remover o arquivo de controle ${JOBNAME}.ok de ${HOME_INTEGRATION_LAYER_JOB}\n")
f.write(" echo --------------------------------------------------------------------------\n")
f.write(" exit 10\n")
f.write("\n")
f.write(" else\n")
f.write("\n")
f.write(" echo Final do step step00 com sucesso - `date`\n")
f.write(" echo --------------------------------------------------------------------------\n")
f.write("\n")
f.write(" fi\n")
f.write("\n")
f.write(" else\n")
f.write("\n")
f.write(" echo Final do step step00 com erro - `date`\n")
f.write(" echo Nao foi encontrado o arquivo de controle ${JOBNAME}.ok em ${HOME_INTEGRATION_LAYER_JOB}\n")
f.write(" echo Isto pode indicar que a execucao anterior nao finalizou com sucesso\n")
f.write(" echo --------------------------------------------------------------------------\n")
f.write(" exit 10\n")
f.write("\n")
f.write(" fi\n")
f.write("\n")
f.write(" ;&\n")
for i in steps:
f.write("\n")
f.write("\"step{0:02d}\")\n".format(i))
f.write("\n")
f.write(" echo Inicio do step step{0:02d} - `date`\n".format(i))
f.write("\n")
f.write(" # colocar o nome correto do job. Ex.: BI03A0101.txt\n")
f.write(" ${HOME_INTEGRATION_LAYER_JOB}/step" + "{0:02d}\n".format(i))
f.write(" status=$?\n")
f.write("\n")
f.write(" if [ ${status} -ne 0 ]; then\n")
f.write("\n")
f.write(" echo Final do step step{0:02d} com erro - `date`\n".format(i))
f.write(" echo Falha ao tentar executar o job ${HOME_INTEGRATION_LAYER_JOB}/teste_shell_pcp.txt\n")
f.write(" echo --------------------------------------------------------------------------\n")
f.write(" exit 10\n")
f.write("\n")
f.write(" else\n")
f.write("\n")
f.write(" echo Final do step step{0:02d} com sucesso - `date`\n".format(i))
f.write(" echo --------------------------------------------------------------------------\n")
f.write("\n")
f.write(" fi\n")
f.write("\n")
f.write(" ;&\n")
f.write("\n")
f.write("\"step99\")\n")
f.write("\n")
f.write(" echo Inicio do step step99 - `date`\n")
f.write(" touch ${HOME_INTEGRATION_LAYER_JOB}/${JOBNAME}.ok\n")
f.write(" status=$?\n")
f.write("\n")
f.write(" if [ ${status} -ne 0 ]; then\n")
f.write("\n")
f.write(" echo Final do step step99 com erro - `date`\n")
f.write(" echo --------------------------------------------------------------------------\n")
f.write(" exit 10\n")
f.write("\n")
f.write(" else\n")
f.write("\n")
f.write(" echo Final do step step99 com sucesso - `date`\n")
f.write(" echo --------------------------------------------------------------------------\n")
f.write("\n")
f.write(" fi\n")
f.write(" ;;\n")
f.write("\n")
f.write("*)\n")
f.write("\n")
f.write(" echo ------ step $step nao existe --------\n")
f.write(" exit 10\n")
f.write(" ;;\n")
f.write("\n")
f.write("esac\n")
with open(DIR_PYGEN + "template/Template_{0}Job.sh".format(qtd_steps)) as f, open(filenamecompled, "w") as ag:
for line in f:
if "# SCRIPTNAMEGOESHERE" in line:
Modifiedline = "# " + fname + ".ini\n"
ag.write(Modifiedline)
elif "/step{0:02d}".format(steps[step_n]) in line and flg_replace_steps:
Modifiedline = " ${HOME_INTEGRATION_LAYER_JOB}/" + "{0}{1:02d}.sh".format(fname,steps[step_n]) + " ${MOVTO} ${HOME_BID_COMMON_PAR}\n"
ag.write(Modifiedline)
if step_n+1 == qtd_steps:
flg_replace_steps = False
else:
step_n += 1
else:
ag.write(line)
print "Created file" + DIR_INTGR_JOB + fname + ".sh"
def main():
print ' '
print '---------------------------------- Inicio Do Programa ----------------------------------'
print ' '
with open(DIR_PYGEN + "Step01.csv") as myFile:
reader = csv.reader(myFile)
meuArquivo = list(reader)
for x in meuArquivo:
createScript_Yes_or_No = str(x[0])
step = str(x[1])
if createScript_Yes_or_No == 'yes':
if '1' in step:
TransferirArquivos(str(x[2]),str(x[3]),str(x[4]))
if '2' in step:
CargaRawZone(str(x[2]),str(x[3]),str(x[4]))
if '3' in step:
Profile(str(x[2]),str(x[3]),str(x[4]),str(x[5]))
if '4' in step:
Stats(str(x[2]),str(x[3]),str(x[4]),str(x[5]))
if '5' in step:
Refined_Trusted(str(x[2]),str(x[3]),str(x[4]),int(x[5]))
if '6' in step:
Sqoop(str(x[2]),str(x[3]),str(x[4]),str(x[5]),str(x[6]),str(x[7]))
print ' '
print '---------------------------------- Fim Do Programa ----------------------------------'
main()
| null | null | null | null | [
0
] |
1,850 | 29298ee7ddb4e524a23000abf86854d72f49954c | <mask token>
class Posts(db.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
<mask token>
| <mask token>
class Posts(db.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
@validates('status')
def validate_status(self, key, status):
if not status:
raise AssertionError('Status is required')
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError('{} is not supported status'.format(status))
return status
| <mask token>
class Posts(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
category = db.Column(db.String(100))
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, default=datetime.utcnow)
status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=
'postsStatus'), default='Draft')
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
@validates('status')
def validate_status(self, key, status):
if not status:
raise AssertionError('Status is required')
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError('{} is not supported status'.format(status))
return status
| from app import db
from datetime import datetime
from sqlalchemy.orm import validates
class Posts(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
category = db.Column(db.String(100))
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, default=datetime.utcnow)
status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=
'postsStatus'), default='Draft')
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates('title')
def validate_title(self, key, title):
if not title:
raise AssertionError('Title is required')
if len(title) < 20:
raise AssertionError('Title must be at least 20 character')
return title
@validates('content')
def validate_content(self, key, content):
if not content:
raise AssertionError('Content is required')
if len(content) < 200:
raise AssertionError('Content must be at least 200 character')
return content
@validates('category')
def validate_category(self, key, category):
if not category:
raise AssertionError('Category is required')
if len(category) < 3:
raise AssertionError('Category must be at least 3 character')
return category
@validates('status')
def validate_status(self, key, status):
if not status:
raise AssertionError('Status is required')
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError('{} is not supported status'.format(status))
return status
| from app import db
from datetime import datetime
from sqlalchemy.orm import validates
class Posts(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
category = db.Column(db.String(100))
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, default=datetime.utcnow)
status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name='postsStatus'), default='Draft')
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates("title")
def validate_title(self, key, title):
if not title:
raise AssertionError("Title is required")
if len(title) < 20:
raise AssertionError("Title must be at least 20 character")
return title
@validates("content")
def validate_content(self, key, content):
if not content:
raise AssertionError("Content is required")
if len(content) < 200:
raise AssertionError("Content must be at least 200 character")
return content
@validates("category")
def validate_category(self, key, category):
if not category:
raise AssertionError("Category is required")
if len(category) < 3:
raise AssertionError("Category must be at least 3 character")
return category
@validates("status")
def validate_status(self, key, status):
if not status:
raise AssertionError("Status is required")
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError("{} is not supported status".format(status))
return status
| [
5,
6,
7,
8,
9
] |
1,851 | 20ac73789fa7297a9230a6a2b814349d2b7da5fb | <mask token>
| <mask token>
print(b)
print(c)
| <mask token>
a = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)
b = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)
c = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)
print(b)
print(c)
| import hashlib
a = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)
b = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)
c = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)
print(b)
print(c)
| import hashlib
a = hashlib.pbkdf2_hmac("sha256", b"hallo", b"salt", 1)
b = hashlib.pbkdf2_hmac("sha256", a, b"salt", 1)
c = hashlib.pbkdf2_hmac("sha256", b"hallo", b"salt", 2)
print(b)
print(c) | [
0,
1,
2,
3,
4
] |
1,852 | 7040db119f8fd6da78499fc732e291280228ca10 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('problem', '0002_problem_video')]
operations = [migrations.AddField(model_name='problem', name='likes',
field=models.ManyToManyField(blank=True, related_name=
'problemLikes', to=settings.AUTH_USER_MODEL)), migrations.
AlterField(model_name='problem', name='author', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'accounts.user')), migrations.CreateModel(name='ProblemLike',
fields=[('id', models.BigAutoField(auto_created=True, primary_key=
True, serialize=False, verbose_name='ID')), ('problem', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'problem.problem')), ('user', models.ForeignKey(on_delete=django.db
.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))])]
| from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('problem', '0002_problem_video')]
operations = [migrations.AddField(model_name='problem', name='likes',
field=models.ManyToManyField(blank=True, related_name=
'problemLikes', to=settings.AUTH_USER_MODEL)), migrations.
AlterField(model_name='problem', name='author', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'accounts.user')), migrations.CreateModel(name='ProblemLike',
fields=[('id', models.BigAutoField(auto_created=True, primary_key=
True, serialize=False, verbose_name='ID')), ('problem', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'problem.problem')), ('user', models.ForeignKey(on_delete=django.db
.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))])]
| # Generated by Django 3.2.3 on 2021-05-16 07:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0002_problem_video'),
]
operations = [
migrations.AddField(
model_name='problem',
name='likes',
field=models.ManyToManyField(blank=True, related_name='problemLikes', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='problem',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.user'),
),
migrations.CreateModel(
name='ProblemLike',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='problem.problem')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
0,
1,
2,
3,
4
] |
1,853 | e267108177841110493061a4f84ae3d29850d028 | <mask token>
| <mask token>
urlpatterns = [path('', views.home), path('teams', views.showTeams), path(
'teams/new', views.new), path('teams/<teamname>', views.showSpecificTeam)]
| from django.urls import path
from . import views
urlpatterns = [path('', views.home), path('teams', views.showTeams), path(
'teams/new', views.new), path('teams/<teamname>', views.showSpecificTeam)]
| from django.urls import path
from . import views
urlpatterns = [
# @app.route("/")
path('', views.home),
path("teams", views.showTeams),
path("teams/new", views.new),
path("teams/<teamname>", views.showSpecificTeam),
# path("allfood", views.showAllFoodItems),
# path("team/<teamname>", views.showSpecificTeam)
] | null | [
0,
1,
2,
3
] |
1,854 | 080110e404cf5edfe53622a5942b53f9188ddd76 | <mask token>
| <mask token>
if __name__ == '__main__':
with open('id_generator.bin', 'rb') as f:
print(pickle.load(f))
| import pickle
if __name__ == '__main__':
with open('id_generator.bin', 'rb') as f:
print(pickle.load(f))
| null | null | [
0,
1,
2
] |
1,855 | e9a6baf10efc5b6bd07af1fe352b0b17ecc172bd | <mask token>
class LinearBot(object):
def __init__(self, player, player_name, weights_file, basis):
self.reader = StateEncapsulator(player, player_name)
with open(STATE_FILENAME, 'r') as f:
data = json.load(f)
self.state = self.reader.parse_state(data)
with open(weights_file, 'rb') as pkl:
self.weights = pickle.load(pkl)
self.action_mapper = ActionMapper()
self.basis = basis
self.command = ''
<mask token>
def write_action(self):
action_list = self.__get_next_action(self.state)
if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2
] != -1:
self.command = str(action_list[0]) + ',' + str(action_list[1]
) + ',' + str(action_list[2])
with open('command.txt', 'w') as outfl:
outfl.write(self.command)
<mask token>
| <mask token>
class LinearBot(object):
def __init__(self, player, player_name, weights_file, basis):
self.reader = StateEncapsulator(player, player_name)
with open(STATE_FILENAME, 'r') as f:
data = json.load(f)
self.state = self.reader.parse_state(data)
with open(weights_file, 'rb') as pkl:
self.weights = pickle.load(pkl)
self.action_mapper = ActionMapper()
self.basis = basis
self.command = ''
def __get_next_action(self, sp):
sp = sp.flatten()
q_values = []
for action in self.action_mapper.triples:
sp_ap = np.array(list(sp) + list(action))
sp_ap = self.basis(sp_ap)
q_values.append(np.dot(sp_ap, self.weights))
return list(self.action_mapper.triples[np.argmax(q_values)])
def write_action(self):
action_list = self.__get_next_action(self.state)
if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2
] != -1:
self.command = str(action_list[0]) + ',' + str(action_list[1]
) + ',' + str(action_list[2])
with open('command.txt', 'w') as outfl:
outfl.write(self.command)
if __name__ == '__main__':
with open(CONFIG_FILENAME, 'r') as f:
data = json.load(f)
player_name = data['nickName']
player = 'A' if player_name == 'Guido' else 'B'
bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)
bot.write_action()
| <mask token>
STATE_FILENAME = 'state3.json'
CONFIG_FILENAME = 'bot.json'
WEIGHTS_FILENAME = 'weights.pkl'
DO_NOTHING_ACTION = [-1, -1, -1]
class LinearBot(object):
def __init__(self, player, player_name, weights_file, basis):
self.reader = StateEncapsulator(player, player_name)
with open(STATE_FILENAME, 'r') as f:
data = json.load(f)
self.state = self.reader.parse_state(data)
with open(weights_file, 'rb') as pkl:
self.weights = pickle.load(pkl)
self.action_mapper = ActionMapper()
self.basis = basis
self.command = ''
def __get_next_action(self, sp):
sp = sp.flatten()
q_values = []
for action in self.action_mapper.triples:
sp_ap = np.array(list(sp) + list(action))
sp_ap = self.basis(sp_ap)
q_values.append(np.dot(sp_ap, self.weights))
return list(self.action_mapper.triples[np.argmax(q_values)])
def write_action(self):
action_list = self.__get_next_action(self.state)
if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2
] != -1:
self.command = str(action_list[0]) + ',' + str(action_list[1]
) + ',' + str(action_list[2])
with open('command.txt', 'w') as outfl:
outfl.write(self.command)
if __name__ == '__main__':
with open(CONFIG_FILENAME, 'r') as f:
data = json.load(f)
player_name = data['nickName']
player = 'A' if player_name == 'Guido' else 'B'
bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)
bot.write_action()
| import numpy as np
import json
import random
from encapsulate_state import StateEncapsulator
from scalar_to_action import ActionMapper
import pickle
from basis_functions import identity_basis, interactive_basis, actions_only_basis, actions_cubic_basis, BASIS_MAP
import matplotlib.pyplot as plt
STATE_FILENAME = 'state3.json'
CONFIG_FILENAME = 'bot.json'
WEIGHTS_FILENAME = 'weights.pkl'
DO_NOTHING_ACTION = [-1, -1, -1]
class LinearBot(object):
def __init__(self, player, player_name, weights_file, basis):
self.reader = StateEncapsulator(player, player_name)
with open(STATE_FILENAME, 'r') as f:
data = json.load(f)
self.state = self.reader.parse_state(data)
with open(weights_file, 'rb') as pkl:
self.weights = pickle.load(pkl)
self.action_mapper = ActionMapper()
self.basis = basis
self.command = ''
def __get_next_action(self, sp):
sp = sp.flatten()
q_values = []
for action in self.action_mapper.triples:
sp_ap = np.array(list(sp) + list(action))
sp_ap = self.basis(sp_ap)
q_values.append(np.dot(sp_ap, self.weights))
return list(self.action_mapper.triples[np.argmax(q_values)])
def write_action(self):
action_list = self.__get_next_action(self.state)
if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2
] != -1:
self.command = str(action_list[0]) + ',' + str(action_list[1]
) + ',' + str(action_list[2])
with open('command.txt', 'w') as outfl:
outfl.write(self.command)
if __name__ == '__main__':
with open(CONFIG_FILENAME, 'r') as f:
data = json.load(f)
player_name = data['nickName']
player = 'A' if player_name == 'Guido' else 'B'
bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)
bot.write_action()
| import numpy as np
import json
import random
from encapsulate_state import StateEncapsulator
from scalar_to_action import ActionMapper
import pickle
from basis_functions import identity_basis, interactive_basis, actions_only_basis, actions_cubic_basis, BASIS_MAP
import matplotlib.pyplot as plt
STATE_FILENAME = "state3.json"
CONFIG_FILENAME = "bot.json"
WEIGHTS_FILENAME = "weights.pkl"
DO_NOTHING_ACTION = [-1, -1, -1]
class LinearBot(object):
def __init__(self, player, player_name, weights_file, basis):
self.reader = StateEncapsulator(player, player_name)
with open(STATE_FILENAME, "r") as f:
data = json.load(f)
self.state = self.reader.parse_state(data)
with open(weights_file, "rb") as pkl:
self.weights = pickle.load(pkl)
self.action_mapper = ActionMapper()
self.basis = basis
self.command = ""
# Expects as input a 3D tensor representing the state, un-flattened; returns a list action
def __get_next_action(self, sp):
sp = sp.flatten()
q_values = []
for action in self.action_mapper.triples:
sp_ap = np.array(list(sp) + list(action))
sp_ap = self.basis(sp_ap)
q_values.append(np.dot(sp_ap, self.weights))
return list(self.action_mapper.triples[np.argmax(q_values)])
def write_action(self):
action_list = self.__get_next_action(self.state)
if (not np.all(action_list == DO_NOTHING_ACTION)) and action_list[2] != -1:
self.command = str(action_list[0]) + "," + str(action_list[1]) + "," + str(action_list[2])
with open("command.txt", "w") as outfl:
outfl.write(self.command)
############################################################################################
if __name__ == "__main__":
with open(CONFIG_FILENAME, "r") as f:
data = json.load(f)
player_name = data["nickName"]
player = "A" if player_name == "Guido" else "B"
bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)
bot.write_action()
| [
3,
5,
6,
7,
8
] |
1,856 | 365e2059d5ed3d7f8d9dbb4e44f563b79d68b087 | <mask token>
def get_labelled_data_from_directories(data_dir, maxlen=None):
texts = []
labels_index = {}
labels = []
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in os.listdir(path):
fpath = os.path.join(path, fname)
f = open(fpath)
t = f.read()
if maxlen is not None:
t = get_first_n_words(t, maxlen)
texts.append(t)
f.close()
labels.append(label_id)
return texts, labels_index, labels
| <mask token>
def filter_not_punctuation():
return '"#$%&()*+-/:;<=>@[\\]^_`{|}~\t\n'
<mask token>
def get_labelled_data_from_directories(data_dir, maxlen=None):
texts = []
labels_index = {}
labels = []
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in os.listdir(path):
fpath = os.path.join(path, fname)
f = open(fpath)
t = f.read()
if maxlen is not None:
t = get_first_n_words(t, maxlen)
texts.append(t)
f.close()
labels.append(label_id)
return texts, labels_index, labels
| <mask token>
def filter_not_punctuation():
return '"#$%&()*+-/:;<=>@[\\]^_`{|}~\t\n'
def get_first_n_words(text, n):
string_sequence = text_to_word_sequence(text, filters=
filter_not_punctuation())
truncated_string = ''
for word in string_sequence[:n]:
truncated_string = truncated_string + word + ' '
return truncated_string
def get_labelled_data_from_directories(data_dir, maxlen=None):
texts = []
labels_index = {}
labels = []
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in os.listdir(path):
fpath = os.path.join(path, fname)
f = open(fpath)
t = f.read()
if maxlen is not None:
t = get_first_n_words(t, maxlen)
texts.append(t)
f.close()
labels.append(label_id)
return texts, labels_index, labels
| from keras.preprocessing.text import text_to_word_sequence
import os
def filter_not_punctuation():
return '"#$%&()*+-/:;<=>@[\\]^_`{|}~\t\n'
def get_first_n_words(text, n):
string_sequence = text_to_word_sequence(text, filters=
filter_not_punctuation())
truncated_string = ''
for word in string_sequence[:n]:
truncated_string = truncated_string + word + ' '
return truncated_string
def get_labelled_data_from_directories(data_dir, maxlen=None):
texts = []
labels_index = {}
labels = []
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in os.listdir(path):
fpath = os.path.join(path, fname)
f = open(fpath)
t = f.read()
if maxlen is not None:
t = get_first_n_words(t, maxlen)
texts.append(t)
f.close()
labels.append(label_id)
return texts, labels_index, labels
| from keras.preprocessing.text import text_to_word_sequence
import os
# keras NLP tools filter out certain tokens by default
# this function replaces the default with a smaller set of things to filter out
def filter_not_punctuation():
return '"#$%&()*+-/:;<=>@[\\]^_`{|}~\t\n'
def get_first_n_words(text, n):
string_sequence = text_to_word_sequence(text, filters=filter_not_punctuation())
truncated_string = ''
for word in string_sequence[:n]:
truncated_string = truncated_string + word + ' '
return truncated_string
# gets text data from files with only maxlen words from each file. Gets whole file if maxlen is None
def get_labelled_data_from_directories(data_dir, maxlen=None):
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in os.listdir(path):
fpath = os.path.join(path, fname)
f = open(fpath)
t = f.read()
if maxlen is not None:
t = get_first_n_words(t, maxlen)
texts.append(t)
f.close()
labels.append(label_id)
return texts, labels_index, labels
| [
1,
2,
3,
4,
5
] |
1,857 | d17f1176ac60a3f6836c706883ab1847b61f50bf | import ConfigParser
''' Merge as many as ConfigParser as you want'''
def Config_Append(SRC_Config ,DST_Config):
import tempfile
temp_src = tempfile.NamedTemporaryFile(delete=True)
temp_dst = tempfile.NamedTemporaryFile(delete=True)
with open(temp_src.name,'wb') as src, open(temp_dst.name,'wb') as dst:
SRC_Config.write(src)
DST_Config.write(dst)
DST_Config.read([temp_src.name,temp_dst.name])
return DST_Config
if __name__ == '__main__':
# initial
config_one = ConfigParser.RawConfigParser()
config_two = ConfigParser.RawConfigParser()
config_three = ConfigParser.RawConfigParser()
:
:
:
# read config
config_one.read('one.ini')
config_two.read('two.ini')
config_three.read('three.ini')
:
:
:
# data manipulation
blah blah
# config merge
config_final = reduce(Config_Append, (config_one ,config_two, config_three, ...))
# show
for i in config_final.sections():
print '[',i,']'
print config_final.items(i)
| null | null | null | null | [
0
] |
1,858 | afacc2c54584c070963c4cb3cabbae64bb0e3159 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('course', '0002_auto_20201103_1648')]
operations = [migrations.AddField(model_name='course', name=
'course_video', field=models.FileField(blank=True, max_length=255,
null=True, upload_to='video', verbose_name='封面视频')), migrations.
AlterField(model_name='course', name='brief', field=
ckeditor_uploader.fields.RichTextUploadingField(blank=True,
max_length=2048, null=True, verbose_name='详情介绍'))]
| import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('course', '0002_auto_20201103_1648')]
operations = [migrations.AddField(model_name='course', name=
'course_video', field=models.FileField(blank=True, max_length=255,
null=True, upload_to='video', verbose_name='封面视频')), migrations.
AlterField(model_name='course', name='brief', field=
ckeditor_uploader.fields.RichTextUploadingField(blank=True,
max_length=2048, null=True, verbose_name='详情介绍'))]
| # Generated by Django 2.2.16 on 2020-11-04 12:48
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0002_auto_20201103_1648'),
]
operations = [
migrations.AddField(
model_name='course',
name='course_video',
field=models.FileField(blank=True, max_length=255, null=True, upload_to='video', verbose_name='封面视频'),
),
migrations.AlterField(
model_name='course',
name='brief',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=2048, null=True, verbose_name='详情介绍'),
),
]
| [
0,
1,
2,
3,
4
] |
1,859 | 3135483c68880eeeaf7ebc085a6cd3c0c7f0550c | <mask token>
| <mask token>
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',
password='383240gyz', db='bycicle', charset='utf8')
print(conn)
try:
with conn.cursor() as cursor:
cursor.execute('drop table if exists pymysql')
cursor.execute(' create table pymysql (a int,b int)')
cursor.execute('insert into pymysql(a,b) values(1,1) ')
conn.commit()
except pymysql.MySQLError as e:
print(e)
conn.rollback()
finally:
conn.close()
<mask token>
| <mask token>
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',
password='383240gyz', db='bycicle', charset='utf8')
print(conn)
try:
with conn.cursor() as cursor:
cursor.execute('drop table if exists pymysql')
cursor.execute(' create table pymysql (a int,b int)')
cursor.execute('insert into pymysql(a,b) values(1,1) ')
conn.commit()
except pymysql.MySQLError as e:
print(e)
conn.rollback()
finally:
conn.close()
if __name__ == '__main__':
main()
| import pymysql
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',
password='383240gyz', db='bycicle', charset='utf8')
print(conn)
try:
with conn.cursor() as cursor:
cursor.execute('drop table if exists pymysql')
cursor.execute(' create table pymysql (a int,b int)')
cursor.execute('insert into pymysql(a,b) values(1,1) ')
conn.commit()
except pymysql.MySQLError as e:
print(e)
conn.rollback()
finally:
conn.close()
if __name__ == '__main__':
main()
| import pymysql
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306,user='root',password='383240gyz',db='bycicle',charset='utf8')
print(conn)
try:
with conn.cursor() as cursor: # 上下文语法否则需要 # cursor.close()
cursor.execute('''drop table if exists pymysql''')
cursor.execute(''' create table pymysql (a int,b int)''')
cursor.execute('''insert into pymysql(a,b) values(1,1) ''')
conn.commit()
except pymysql.MySQLError as e:
print(e)
conn.rollback()
finally:
conn.close()
if __name__ == '__main__':
main()
| [
0,
1,
2,
3,
4
] |
1,860 | 9666c87b4d4dc721683ea33fdbbeadefc65a0cd1 | <mask token>
| <mask token>
if number <= 100:
print('Your number is smaller than equal to 100')
else:
print('Your number is greater than 100')
| number = int(input('Enter an integer'))
if number <= 100:
print('Your number is smaller than equal to 100')
else:
print('Your number is greater than 100')
| #!/usr/bin/env python
number=int(input("Enter an integer"))
if number<=100:
print("Your number is smaller than equal to 100")
else:
print("Your number is greater than 100")
| null | [
0,
1,
2,
3
] |
1,861 | 6e5b8be6182f39f185f4547f0abd84a4e404bf34 | <mask token>
| <mask token>
print(mydict)
print(mylist0)
print(mylist1)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist0
else:
mydict[c] = mylist0
print(mydict)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist1
else:
mydict[c] = mylist1
print(mydict)
| <mask token>
mydict = {}
mylist0 = np.array([1, 2, 3, 4, 5])
mylist1 = np.array([2, 3, 4, 5, 6])
print(mydict)
print(mylist0)
print(mylist1)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist0
else:
mydict[c] = mylist0
print(mydict)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist1
else:
mydict[c] = mylist1
print(mydict)
| import numpy as np
mydict = {}
mylist0 = np.array([1, 2, 3, 4, 5])
mylist1 = np.array([2, 3, 4, 5, 6])
print(mydict)
print(mylist0)
print(mylist1)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist0
else:
mydict[c] = mylist0
print(mydict)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist1
else:
mydict[c] = mylist1
print(mydict)
| null | [
0,
1,
2,
3
] |
1,862 | 0527dc2b6fa0fe703b604c6e28fba44fe6def83b | def main():
# defaults to 0
print a
a = 7
a *= 6
print a
| null | null | null | null | [
0
] |
1,863 | 6424fccb7990b0a1722d5d787e7eb5acb4ff1a74 | <mask token>
| <mask token>
print(response)
| <mask token>
ec2 = boto3.resource('ec2')
response = client.allocate_address(Domain='standard')
print(response)
| import boto3
ec2 = boto3.resource('ec2')
response = client.allocate_address(Domain='standard')
print(response)
| null | [
0,
1,
2,
3
] |
1,864 | 2561db1264fe399db85460e9f32213b70ddf03ff | <mask token>
| <mask token>
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ':' + string
return res
<mask token>
| <mask token>
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ':' + string
return res
def decode(string):
strs = []
i = 0
while i < len(string):
index = string.find(':', i)
size = int(string[i:index])
strs.append(string[index + 1:index + 1 + size])
i = index + 1 + size
return strs
<mask token>
| <mask token>
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ':' + string
return res
def decode(string):
strs = []
i = 0
while i < len(string):
index = string.find(':', i)
size = int(string[i:index])
strs.append(string[index + 1:index + 1 + size])
i = index + 1 + size
return strs
if __name__ == '__main__':
strs = 'keno is awesome'
r = encode(strs)
print(r)
r = decode(r)
print(r)
| #!/usr/bin/env python
# encoding: utf-8
"""
@author: swensun
@github:https://github.com/yunshuipiao
@software: python
@file: encode_decode.py
@desc: 字符串编解码
@hint:
"""
def encode(strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ''
for string in strs.split():
res += str(len(string)) + ":" + string
return res
def decode(string):
strs = []
i = 0
while i < len(string):
index = string.find(":", i)
# print(index)
size = int(string[i:index])
strs.append(string[index + 1: index + 1 + size])
i = index + 1 + size
return strs
if __name__ == '__main__':
strs = "keno is awesome"
r = encode(strs)
print(r)
r = decode(r)
print(r)
| [
0,
1,
2,
3,
4
] |
1,865 | fc5a4c27a21c2bd3900a6ad0bff68c249fe29d7a | <mask token>
| <mask token>
driver.get('https://www.zhaopin.com/')
time.sleep(5)
driver.find_element_by_id('KeyWord_kw2').send_keys('技术')
driver.find_element_by_class_name('doSearch').click()
time.sleep(5)
| <mask token>
driver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver')
driver.get('https://www.zhaopin.com/')
time.sleep(5)
driver.find_element_by_id('KeyWord_kw2').send_keys('技术')
driver.find_element_by_class_name('doSearch').click()
time.sleep(5)
| from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import requests
import time
driver = webdriver.Chrome(executable_path='/home/bc/桌面/chromedriver')
driver.get('https://www.zhaopin.com/')
time.sleep(5)
driver.find_element_by_id('KeyWord_kw2').send_keys('技术')
driver.find_element_by_class_name('doSearch').click()
time.sleep(5)
| null | [
0,
1,
2,
3
] |
1,866 | 03a024140d8d0136bf9838f8942539f6d19bb351 | <mask token>
class CMD(Cmd):
def __init__(self):
pass
<mask token>
<mask token>
<mask token>
def do_drawCard(self):
pass
<mask token>
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
<mask token>
<mask token>
| <mask token>
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == 'up':
self.game.movePlayer(0, 1)
elif direction == 'down':
self.game.movePlayer(0, -1)
elif direction == 'left':
self.game.movePlayer(-1, 0)
elif direction == 'right':
self.game.movePlayer(1, 0)
else:
print('No valid direction given.')
def do_rotateTile(self, rotation):
pass
<mask token>
def do_drawCard(self):
pass
<mask token>
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
def loadFile(filePath):
self.game.loadFile(filePath)
| <mask token>
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == 'up':
self.game.movePlayer(0, 1)
elif direction == 'down':
self.game.movePlayer(0, -1)
elif direction == 'left':
self.game.movePlayer(-1, 0)
elif direction == 'right':
self.game.movePlayer(1, 0)
else:
print('No valid direction given.')
def do_rotateTile(self, rotation):
pass
<mask token>
def do_drawCard(self):
pass
def do_run(self):
pass
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
def loadFile(filePath):
self.game.loadFile(filePath)
| <mask token>
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == 'up':
self.game.movePlayer(0, 1)
elif direction == 'down':
self.game.movePlayer(0, -1)
elif direction == 'left':
self.game.movePlayer(-1, 0)
elif direction == 'right':
self.game.movePlayer(1, 0)
else:
print('No valid direction given.')
def do_rotateTile(self, rotation):
pass
def do_placeTile(self):
pass
def do_drawCard(self):
pass
def do_run(self):
pass
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
def loadFile(filePath):
self.game.loadFile(filePath)
| from cmd import Cmd
class CMD(Cmd):
def __init__(self):
pass
def do_move(self, direction):
if direction == "up":
self.game.movePlayer(0, 1)
elif direction == "down":
self.game.movePlayer(0, -1)
elif direction == "left":
self.game.movePlayer(-1, 0)
elif direction == "right":
self.game.movePlayer(1, 0)
else:
print("No valid direction given.")
def do_rotateTile(self, rotation):
pass
def do_placeTile(self):
pass
def do_drawCard(self):
pass
def do_run(self):
pass
def do_fight(self):
pass
def do_save(self, fileName):
self.game.save(fileName)
def do_load(self, fileName):
self.game.load(fileName)
def do_quit(self):
return True
def validateCommands(self):
pass
# New
def loadFile(filePath):
self.game.loadFile(filePath)
# End New
| [
7,
11,
12,
13,
15
] |
1,867 | 8fe71e87512dfd2ccfcd21c9c175cb50274d9661 | <mask token>
def test_uri_manager_mock_write():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
f = manager.acquire()
f.write('contents')
manager.close()
opener.assert_called_once_with('filename', mode='r')
mock_file.write.assert_called_once_with('contents')
mock_file.close.assert_called_once_with()
def test_uri_manager_mock_write__threaded():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
def write(iter):
nonlocal manager
fh = manager.acquire()
fh.write('contents')
manager._local.thread_manager = None
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for result in executor.map(write, range(5)):
pass
gc.collect()
opener.assert_has_calls([mock.call('filename', mode='r') for _ in range(5)]
)
mock_file.write.assert_has_calls([mock.call('contents') for _ in range(5)])
mock_file.close.assert_has_calls([mock.call() for _ in range(5)])
@pytest.mark.parametrize('expected_warning', [None, RuntimeWarning])
def test_uri_manager_autoclose(expected_warning):
mock_file = mock.Mock()
opener = mock.Mock(return_value=mock_file)
manager = URIManager(opener, 'filename')
manager.acquire()
del manager
gc.collect()
mock_file.close.assert_called_once_with()
def test_uri_manager_write_concurrent(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='w')
f1 = manager.acquire()
f2 = manager.acquire()
f3 = manager.acquire()
assert f1 is f2
assert f2 is f3
f1.write('foo')
f1.flush()
f2.write('bar')
f2.flush()
f3.write('baz')
f3.flush()
del manager
gc.collect()
with open(path) as f:
assert f.read() == 'foobarbaz'
def test_uri_manager_write_pickle(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='a')
f = manager.acquire()
f.write('foo')
f.flush()
manager2 = pickle.loads(pickle.dumps(manager))
f2 = manager2.acquire()
f2.write('bar')
del manager
del manager2
gc.collect()
with open(path) as f:
assert f.read() == 'foobar'
<mask token>
| <mask token>
def test_uri_manager_mock_write():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
f = manager.acquire()
f.write('contents')
manager.close()
opener.assert_called_once_with('filename', mode='r')
mock_file.write.assert_called_once_with('contents')
mock_file.close.assert_called_once_with()
def test_uri_manager_mock_write__threaded():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
def write(iter):
nonlocal manager
fh = manager.acquire()
fh.write('contents')
manager._local.thread_manager = None
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for result in executor.map(write, range(5)):
pass
gc.collect()
opener.assert_has_calls([mock.call('filename', mode='r') for _ in range(5)]
)
mock_file.write.assert_has_calls([mock.call('contents') for _ in range(5)])
mock_file.close.assert_has_calls([mock.call() for _ in range(5)])
@pytest.mark.parametrize('expected_warning', [None, RuntimeWarning])
def test_uri_manager_autoclose(expected_warning):
mock_file = mock.Mock()
opener = mock.Mock(return_value=mock_file)
manager = URIManager(opener, 'filename')
manager.acquire()
del manager
gc.collect()
mock_file.close.assert_called_once_with()
def test_uri_manager_write_concurrent(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='w')
f1 = manager.acquire()
f2 = manager.acquire()
f3 = manager.acquire()
assert f1 is f2
assert f2 is f3
f1.write('foo')
f1.flush()
f2.write('bar')
f2.flush()
f3.write('baz')
f3.flush()
del manager
gc.collect()
with open(path) as f:
assert f.read() == 'foobarbaz'
def test_uri_manager_write_pickle(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='a')
f = manager.acquire()
f.write('foo')
f.flush()
manager2 = pickle.loads(pickle.dumps(manager))
f2 = manager2.acquire()
f2.write('bar')
del manager
del manager2
gc.collect()
with open(path) as f:
assert f.read() == 'foobar'
def test_uri_manager_read(tmpdir):
path = str(tmpdir.join('testing.txt'))
with open(path, 'w') as f:
f.write('foobar')
manager = URIManager(open, path)
f = manager.acquire()
assert f.read() == 'foobar'
manager.close()
<mask token>
| <mask token>
def test_uri_manager_mock_write():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
f = manager.acquire()
f.write('contents')
manager.close()
opener.assert_called_once_with('filename', mode='r')
mock_file.write.assert_called_once_with('contents')
mock_file.close.assert_called_once_with()
def test_uri_manager_mock_write__threaded():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
def write(iter):
nonlocal manager
fh = manager.acquire()
fh.write('contents')
manager._local.thread_manager = None
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for result in executor.map(write, range(5)):
pass
gc.collect()
opener.assert_has_calls([mock.call('filename', mode='r') for _ in range(5)]
)
mock_file.write.assert_has_calls([mock.call('contents') for _ in range(5)])
mock_file.close.assert_has_calls([mock.call() for _ in range(5)])
@pytest.mark.parametrize('expected_warning', [None, RuntimeWarning])
def test_uri_manager_autoclose(expected_warning):
mock_file = mock.Mock()
opener = mock.Mock(return_value=mock_file)
manager = URIManager(opener, 'filename')
manager.acquire()
del manager
gc.collect()
mock_file.close.assert_called_once_with()
def test_uri_manager_write_concurrent(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='w')
f1 = manager.acquire()
f2 = manager.acquire()
f3 = manager.acquire()
assert f1 is f2
assert f2 is f3
f1.write('foo')
f1.flush()
f2.write('bar')
f2.flush()
f3.write('baz')
f3.flush()
del manager
gc.collect()
with open(path) as f:
assert f.read() == 'foobarbaz'
def test_uri_manager_write_pickle(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='a')
f = manager.acquire()
f.write('foo')
f.flush()
manager2 = pickle.loads(pickle.dumps(manager))
f2 = manager2.acquire()
f2.write('bar')
del manager
del manager2
gc.collect()
with open(path) as f:
assert f.read() == 'foobar'
def test_uri_manager_read(tmpdir):
path = str(tmpdir.join('testing.txt'))
with open(path, 'w') as f:
f.write('foobar')
manager = URIManager(open, path)
f = manager.acquire()
assert f.read() == 'foobar'
manager.close()
def test_uri_manager_acquire_context(tmpdir):
path = str(tmpdir.join('testing.txt'))
with open(path, 'w') as f:
f.write('foobar')
class AcquisitionError(Exception):
pass
manager = URIManager(open, path)
with pytest.raises(AcquisitionError):
with manager.acquire_context() as f:
assert f.read() == 'foobar'
raise AcquisitionError
with manager.acquire_context() as f:
assert f.read() == 'foobar'
with pytest.raises(AcquisitionError):
with manager.acquire_context() as f:
f.seek(0)
assert f.read() == 'foobar'
raise AcquisitionError
manager.close()
| <mask token>
import concurrent.futures
import gc
import pickle
from unittest import mock
import pytest
from rioxarray._io import URIManager
def test_uri_manager_mock_write():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
f = manager.acquire()
f.write('contents')
manager.close()
opener.assert_called_once_with('filename', mode='r')
mock_file.write.assert_called_once_with('contents')
mock_file.close.assert_called_once_with()
def test_uri_manager_mock_write__threaded():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, 'filename')
def write(iter):
nonlocal manager
fh = manager.acquire()
fh.write('contents')
manager._local.thread_manager = None
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for result in executor.map(write, range(5)):
pass
gc.collect()
opener.assert_has_calls([mock.call('filename', mode='r') for _ in range(5)]
)
mock_file.write.assert_has_calls([mock.call('contents') for _ in range(5)])
mock_file.close.assert_has_calls([mock.call() for _ in range(5)])
@pytest.mark.parametrize('expected_warning', [None, RuntimeWarning])
def test_uri_manager_autoclose(expected_warning):
mock_file = mock.Mock()
opener = mock.Mock(return_value=mock_file)
manager = URIManager(opener, 'filename')
manager.acquire()
del manager
gc.collect()
mock_file.close.assert_called_once_with()
def test_uri_manager_write_concurrent(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='w')
f1 = manager.acquire()
f2 = manager.acquire()
f3 = manager.acquire()
assert f1 is f2
assert f2 is f3
f1.write('foo')
f1.flush()
f2.write('bar')
f2.flush()
f3.write('baz')
f3.flush()
del manager
gc.collect()
with open(path) as f:
assert f.read() == 'foobarbaz'
def test_uri_manager_write_pickle(tmpdir):
path = str(tmpdir.join('testing.txt'))
manager = URIManager(open, path, mode='a')
f = manager.acquire()
f.write('foo')
f.flush()
manager2 = pickle.loads(pickle.dumps(manager))
f2 = manager2.acquire()
f2.write('bar')
del manager
del manager2
gc.collect()
with open(path) as f:
assert f.read() == 'foobar'
def test_uri_manager_read(tmpdir):
path = str(tmpdir.join('testing.txt'))
with open(path, 'w') as f:
f.write('foobar')
manager = URIManager(open, path)
f = manager.acquire()
assert f.read() == 'foobar'
manager.close()
def test_uri_manager_acquire_context(tmpdir):
path = str(tmpdir.join('testing.txt'))
with open(path, 'w') as f:
f.write('foobar')
class AcquisitionError(Exception):
pass
manager = URIManager(open, path)
with pytest.raises(AcquisitionError):
with manager.acquire_context() as f:
assert f.read() == 'foobar'
raise AcquisitionError
with manager.acquire_context() as f:
assert f.read() == 'foobar'
with pytest.raises(AcquisitionError):
with manager.acquire_context() as f:
f.seek(0)
assert f.read() == 'foobar'
raise AcquisitionError
manager.close()
| """
Tests based on: https://github.com/pydata/xarray/blob/071da2a900702d65c47d265192bc7e424bb57932/xarray/tests/test_backends_file_manager.py
"""
import concurrent.futures
import gc
import pickle
from unittest import mock
import pytest
from rioxarray._io import URIManager
def test_uri_manager_mock_write():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, "filename")
f = manager.acquire()
f.write("contents")
manager.close()
opener.assert_called_once_with("filename", mode="r")
mock_file.write.assert_called_once_with("contents")
mock_file.close.assert_called_once_with()
def test_uri_manager_mock_write__threaded():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
manager = URIManager(opener, "filename")
def write(iter):
nonlocal manager
fh = manager.acquire()
fh.write("contents")
manager._local.thread_manager = None
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for result in executor.map(write, range(5)):
pass
gc.collect()
opener.assert_has_calls([mock.call("filename", mode="r") for _ in range(5)])
mock_file.write.assert_has_calls([mock.call("contents") for _ in range(5)])
mock_file.close.assert_has_calls([mock.call() for _ in range(5)])
@pytest.mark.parametrize("expected_warning", [None, RuntimeWarning])
def test_uri_manager_autoclose(expected_warning):
mock_file = mock.Mock()
opener = mock.Mock(return_value=mock_file)
manager = URIManager(opener, "filename")
manager.acquire()
del manager
gc.collect()
mock_file.close.assert_called_once_with()
def test_uri_manager_write_concurrent(tmpdir):
path = str(tmpdir.join("testing.txt"))
manager = URIManager(open, path, mode="w")
f1 = manager.acquire()
f2 = manager.acquire()
f3 = manager.acquire()
assert f1 is f2
assert f2 is f3
f1.write("foo")
f1.flush()
f2.write("bar")
f2.flush()
f3.write("baz")
f3.flush()
del manager
gc.collect()
with open(path) as f:
assert f.read() == "foobarbaz"
def test_uri_manager_write_pickle(tmpdir):
path = str(tmpdir.join("testing.txt"))
manager = URIManager(open, path, mode="a")
f = manager.acquire()
f.write("foo")
f.flush()
manager2 = pickle.loads(pickle.dumps(manager))
f2 = manager2.acquire()
f2.write("bar")
del manager
del manager2
gc.collect()
with open(path) as f:
assert f.read() == "foobar"
def test_uri_manager_read(tmpdir):
path = str(tmpdir.join("testing.txt"))
with open(path, "w") as f:
f.write("foobar")
manager = URIManager(open, path)
f = manager.acquire()
assert f.read() == "foobar"
manager.close()
def test_uri_manager_acquire_context(tmpdir):
path = str(tmpdir.join("testing.txt"))
with open(path, "w") as f:
f.write("foobar")
class AcquisitionError(Exception):
pass
manager = URIManager(open, path)
with pytest.raises(AcquisitionError):
with manager.acquire_context() as f:
assert f.read() == "foobar"
raise AcquisitionError
with manager.acquire_context() as f:
assert f.read() == "foobar"
with pytest.raises(AcquisitionError):
with manager.acquire_context() as f:
f.seek(0)
assert f.read() == "foobar"
raise AcquisitionError
manager.close()
| [
5,
6,
7,
8,
9
] |
1,868 | 9627e8a468d3a75787c5a9e01856913fc8beb3c4 | <mask token>
| <mask token>
arrayMapPath = 'C:\\Python27\\Lib\\site-packages\\ticketpitcher\\data'
tempPath = 'd:\\temp\\'
| # -*- coding: utf-8 -*-
"""
Created on Fri Nov 14 22:09:56 2014
@author: duhan
"""
#arrayMapPath = r'/usr/local/lib/python2.7/dist-packages/ticketpitcher/data/3'
arrayMapPath = r'C:\Python27\Lib\site-packages\ticketpitcher\data'
#tempPath = r'/tmp/'
tempPath = 'd:\\temp\\'
| null | null | [
0,
1,
2
] |
1,869 | 97857c1c5468a96187d44abc23ffaaf2a7ead1a6 | <mask token>
| <mask token>
def add():
print(a)
<mask token>
| <mask token>
def add():
print(a)
add()
| a = 'aa'
def add():
print(a)
add()
| # data={
# "name":"Alby",
# "age":23
# }
# print (data['age'])
# def foo():
# print("Hellow world")
# return 1
# print (foo())
a="aa"
def add():
print(a)
add() | [
0,
1,
2,
3,
4
] |
1,870 | ed4c97913a9dba5cf6be56050a8d2ce24dbd6033 | <mask token>
| <mask token>
for i in range(a, b + 1):
true_prime = True
for num in range(2, i):
if i % num == 0:
true_prime = False
if true_prime:
count += 1
print(count)
| <mask token>
a = int(input())
b = int(input())
count = 0
for i in range(a, b + 1):
true_prime = True
for num in range(2, i):
if i % num == 0:
true_prime = False
if true_prime:
count += 1
print(count)
| '''
A prime number is a natural number greater than 1 that has no positive divisors other than 1 and itself. Given two integers A and B,
print the number of primes between them, inclusively.
'''
a = int(input())
b = int(input())
count = 0
for i in range(a, b+1):
true_prime = True
for num in range(2, i):
if i % num == 0:
true_prime = False
if true_prime:
count += 1
print(count) | null | [
0,
1,
2,
3
] |
1,871 | c435b0f162512bb2bc0c35e1817f64c5ef9ff7bc | <mask token>
| <mask token>
BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)
), 'bindings')
TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')
DEFAULT_SYSTEM_CONFIG_DIR = None
| from __future__ import absolute_import, unicode_literals, print_function
import os
BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)
), 'bindings')
TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')
DEFAULT_SYSTEM_CONFIG_DIR = None
| # vim:fileencoding=utf-8:noet
from __future__ import absolute_import, unicode_literals, print_function
import os
BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bindings')
TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')
DEFAULT_SYSTEM_CONFIG_DIR = None
| null | [
0,
1,
2,
3
] |
1,872 | cf5a9b8dad5a02610fa5ce2a849b6f9fc50a0aa8 | class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
<mask token>
<mask token>
<mask token>
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
| class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
<mask token>
def deposit(self):
amt = int(input('Enter Deposit amount : '))
self._d['CustomerBalance'] += amt
print('Your a/c is credited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
<mask token>
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
| class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
def add(self):
self._d = {}
self._d['CustomerName'] = self._name
self._d['CustomerPhonenumber'] = self._pno
self._d['CustomerAddress'] = self._add
self._d['CustomerPin'] = self._pin
self._d['CustomerAccountNumber'] = self._acc
self._d['CustomerBalance'] = self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt = int(input('Enter Deposit amount : '))
self._d['CustomerBalance'] += amt
print('Your a/c is credited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
<mask token>
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
| class Customer:
def __init__(self, name, phoneno, address, pin, accno, balance):
self._name = name
self._pno = phoneno
self._add = address
self._pin = pin
self._acc = accno
self._bal = balance
def add(self):
self._d = {}
self._d['CustomerName'] = self._name
self._d['CustomerPhonenumber'] = self._pno
self._d['CustomerAddress'] = self._add
self._d['CustomerPin'] = self._pin
self._d['CustomerAccountNumber'] = self._acc
self._d['CustomerBalance'] = self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt = int(input('Enter Deposit amount : '))
self._d['CustomerBalance'] += amt
print('Your a/c is credited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
def withdraw(self):
amt = int(input('Enter Withdraw amount : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
def transfer(self):
name = input('Enter Recipient name : ')
acc = input('Enter account number : ')
if len(acc) == 16:
amt = int(input('Enter amount to transfer : '))
if amt > self._d['CustomerBalance']:
print('Insufficient Balance')
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance'] -= amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ', amt)
print('Account Balance is ', self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ', self._d['CustomerName'])
print('Account Balance is ', self._d['CustomerBalance'])
print()
def __del__(self):
print('Thank You')
pass
| #2) write a program to make banking system develop business logic
#in one module and call functionality in another .py file
class Customer: #user defined class
def __init__(self,name,phoneno,address,pin,accno,balance) : #constructor with multiple arguments
self._name=name
self._pno=phoneno
self._add=address
self._pin=pin
self._acc=accno
self._bal=balance#protected variable
def add(self) : #user defined method
self._d={} #create empty dictionary
self._d['CustomerName']=self._name #add values to the dictionary using key names
self._d['CustomerPhonenumber']=self._pno
self._d['CustomerAddress']=self._add
self._d['CustomerPin']=self._pin
self._d['CustomerAccountNumber']=self._acc
self._d['CustomerBalance']=self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt=int(input('Enter Deposit amount : '))
self._d['CustomerBalance']+=amt
print('Your a/c is credited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def withdraw(self):
amt=int(input('Enter Withdraw amount : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def transfer(self):
name=input('Enter Recipient name : ')
acc=input('Enter account number : ')
if len(acc)==16:
amt=int(input('Enter amount to transfer : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ',self._d['CustomerName'])
print('Account Balance is ',self._d['CustomerBalance'])
print()
def __del__(self): #destructor
print('Thank You')
pass
| [
5,
6,
7,
8,
9
] |
1,873 | 0f0b3eea9dc397d32e81749304041abaf6651e94 | <mask token>
@ddt.ddt
class TestAddress(unittest.TestCase):
<mask token>
<mask token>
<mask token>
<mask token>
def test_02_check_address(self):
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'
data = {'session': self.session}
response = Address.check_address(url, data)
addr_list = Address.get_value(response, 'data')
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
sql_addr = self.op_database.get_all(sql)
self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')
<mask token>
<mask token>
<mask token>
| <mask token>
@ddt.ddt
class TestAddress(unittest.TestCase):
def setUp(self) ->None:
login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'
login_data = {'name': 'tester', 'password': '123456'}
login = Login(url=login_url)
self.session = login.get_session(login_data)
self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))
self.op_database = OpDatabase()
@classmethod
def setUpClass(cls) ->None:
op_database = OpDatabase()
op_database.clear_mysql()
@classmethod
def tearDownClass(cls) ->None:
op_database = OpDatabase()
op_database.clear_mysql()
@ddt.data(*test_data1)
def test_01_add_address(self, data):
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
before = self.op_database.get_all(sql)
add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'
add_data = {'address': {'default_address': 0, 'consignee':
f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode':
f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0,
'email': f"{data['email']}", 'address': f"{data['detail']}",
'province': '', 'district': '', 'mobile': ''}, 'session': self.
session}
Address.add_address(url=add_url, data=add_data)
after = self.op_database.get_all(sql)
result = len(after) - len(before)
self.assertEqual(data['expect'], result, msg='断言失败')
def test_02_check_address(self):
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'
data = {'session': self.session}
response = Address.check_address(url, data)
addr_list = Address.get_value(response, 'data')
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
sql_addr = self.op_database.get_all(sql)
self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')
@ddt.data(*test_data2)
def test_03_modify_address(self, data):
sql = (
f'select address_id from ecs_user_address where user_id = {self.user_id}'
)
id_list = self.op_database.get_all(sql)
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'
modify_data = {'address': {'default_address': 0, 'consignee':
f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode':
f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0,
'email': f"{data['email']}", 'address': f"{data['detail']}",
'province': '0', 'district': '0', 'mobile': f"{data['mobile']}"
}, 'address_id': id_list[0]['address_id'], 'session': self.session}
response = Address.modify_address(url, modify_data)
succeed = Address.get_value(response, 'succeed')
self.assertEqual(data['expect'], succeed, msg='断言失败')
def test_04_delete_address(self):
sql = (
f'select address_id from ecs_user_address where user_id = {self.user_id}'
)
id_list = self.op_database.get_all(sql)
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'
delete_data = {'address_id': id_list[0]['address_id'], 'session':
self.session}
response = Address.delete_address(url, delete_data)
succeed = Address.get_value(response, 'succeed')
sql = (
f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}"
)
info = self.op_database.get_one(sql)
result = False if info != None else True
self.assertEqual(result, succeed, msg='断言失败')
<mask token>
| <mask token>
@ddt.ddt
class TestAddress(unittest.TestCase):
def setUp(self) ->None:
login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'
login_data = {'name': 'tester', 'password': '123456'}
login = Login(url=login_url)
self.session = login.get_session(login_data)
self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))
self.op_database = OpDatabase()
@classmethod
def setUpClass(cls) ->None:
op_database = OpDatabase()
op_database.clear_mysql()
@classmethod
def tearDownClass(cls) ->None:
op_database = OpDatabase()
op_database.clear_mysql()
@ddt.data(*test_data1)
def test_01_add_address(self, data):
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
before = self.op_database.get_all(sql)
add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'
add_data = {'address': {'default_address': 0, 'consignee':
f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode':
f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0,
'email': f"{data['email']}", 'address': f"{data['detail']}",
'province': '', 'district': '', 'mobile': ''}, 'session': self.
session}
Address.add_address(url=add_url, data=add_data)
after = self.op_database.get_all(sql)
result = len(after) - len(before)
self.assertEqual(data['expect'], result, msg='断言失败')
def test_02_check_address(self):
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'
data = {'session': self.session}
response = Address.check_address(url, data)
addr_list = Address.get_value(response, 'data')
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
sql_addr = self.op_database.get_all(sql)
self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')
@ddt.data(*test_data2)
def test_03_modify_address(self, data):
sql = (
f'select address_id from ecs_user_address where user_id = {self.user_id}'
)
id_list = self.op_database.get_all(sql)
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'
modify_data = {'address': {'default_address': 0, 'consignee':
f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode':
f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0,
'email': f"{data['email']}", 'address': f"{data['detail']}",
'province': '0', 'district': '0', 'mobile': f"{data['mobile']}"
}, 'address_id': id_list[0]['address_id'], 'session': self.session}
response = Address.modify_address(url, modify_data)
succeed = Address.get_value(response, 'succeed')
self.assertEqual(data['expect'], succeed, msg='断言失败')
def test_04_delete_address(self):
sql = (
f'select address_id from ecs_user_address where user_id = {self.user_id}'
)
id_list = self.op_database.get_all(sql)
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'
delete_data = {'address_id': id_list[0]['address_id'], 'session':
self.session}
response = Address.delete_address(url, delete_data)
succeed = Address.get_value(response, 'succeed')
sql = (
f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}"
)
info = self.op_database.get_one(sql)
result = False if info != None else True
self.assertEqual(result, succeed, msg='断言失败')
if __name__ == '__main__':
unittest.main()
| <mask token>
op_excel = OperationExcel()
add_file = (
'D:\\pyCharm\\Demo\\pycode\\Requests\\20191109\\课堂练习\\ECShop_interface\\data\\add_address.xlsx'
)
modify_file = (
'D:\\pyCharm\\Demo\\pycode\\Requests\\20191109\\课堂练习\\ECShop_interface\\data\\modify_address.xlsx'
)
test_data1 = op_excel.get_data(add_file)
test_data2 = op_excel.get_data(modify_file)
@ddt.ddt
class TestAddress(unittest.TestCase):
def setUp(self) ->None:
login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'
login_data = {'name': 'tester', 'password': '123456'}
login = Login(url=login_url)
self.session = login.get_session(login_data)
self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))
self.op_database = OpDatabase()
@classmethod
def setUpClass(cls) ->None:
op_database = OpDatabase()
op_database.clear_mysql()
@classmethod
def tearDownClass(cls) ->None:
op_database = OpDatabase()
op_database.clear_mysql()
@ddt.data(*test_data1)
def test_01_add_address(self, data):
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
before = self.op_database.get_all(sql)
add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'
add_data = {'address': {'default_address': 0, 'consignee':
f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode':
f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0,
'email': f"{data['email']}", 'address': f"{data['detail']}",
'province': '', 'district': '', 'mobile': ''}, 'session': self.
session}
Address.add_address(url=add_url, data=add_data)
after = self.op_database.get_all(sql)
result = len(after) - len(before)
self.assertEqual(data['expect'], result, msg='断言失败')
def test_02_check_address(self):
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'
data = {'session': self.session}
response = Address.check_address(url, data)
addr_list = Address.get_value(response, 'data')
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
sql_addr = self.op_database.get_all(sql)
self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')
@ddt.data(*test_data2)
def test_03_modify_address(self, data):
sql = (
f'select address_id from ecs_user_address where user_id = {self.user_id}'
)
id_list = self.op_database.get_all(sql)
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'
modify_data = {'address': {'default_address': 0, 'consignee':
f"{data['consignee']}", 'tel': f"{data['tel']}", 'zipcode':
f"{data['postcode']}", 'country': '1', 'city': '271', 'id': 0,
'email': f"{data['email']}", 'address': f"{data['detail']}",
'province': '0', 'district': '0', 'mobile': f"{data['mobile']}"
}, 'address_id': id_list[0]['address_id'], 'session': self.session}
response = Address.modify_address(url, modify_data)
succeed = Address.get_value(response, 'succeed')
self.assertEqual(data['expect'], succeed, msg='断言失败')
def test_04_delete_address(self):
sql = (
f'select address_id from ecs_user_address where user_id = {self.user_id}'
)
id_list = self.op_database.get_all(sql)
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'
delete_data = {'address_id': id_list[0]['address_id'], 'session':
self.session}
response = Address.delete_address(url, delete_data)
succeed = Address.get_value(response, 'succeed')
sql = (
f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}"
)
info = self.op_database.get_one(sql)
result = False if info != None else True
self.assertEqual(result, succeed, msg='断言失败')
if __name__ == '__main__':
unittest.main()
| from common.get_keyword import GetKeyword
from common.operation_Excel import OperationExcel
from common.op_database import OpDatabase
from interface.login import Login
from interface.address import Address
import unittest
import ddt
# 测试数据
op_excel = OperationExcel()
add_file = r'D:\pyCharm\Demo\pycode\Requests\20191109\课堂练习\ECShop_interface\data\add_address.xlsx'
modify_file = r'D:\pyCharm\Demo\pycode\Requests\20191109\课堂练习\ECShop_interface\data\modify_address.xlsx'
test_data1 = op_excel.get_data(add_file)
test_data2 = op_excel.get_data(modify_file)
@ddt.ddt
class TestAddress(unittest.TestCase):
# 编写test fixture
def setUp(self) -> None:
# 登录数据
login_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/user/signin'
login_data = {"name": "tester", "password": "123456"}
# 实例化登录对象
login = Login(url=login_url)
self.session = login.get_session(login_data)
self.user_id = int(GetKeyword.get_keyword(self.session, 'uid'))
# 实例化数据操作对象
self.op_database = OpDatabase()
@classmethod
def setUpClass(cls) -> None:
# 清空数据信息
op_database = OpDatabase()
op_database.clear_mysql()
@classmethod
def tearDownClass(cls) -> None:
# 清空数据信息
op_database = OpDatabase()
op_database.clear_mysql()
# 编写test case
# 添加收货地址
@ddt.data(*test_data1)
def test_01_add_address(self, data):
# SQL语句
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
# 获取收货地址表中用户地址数
before = self.op_database.get_all(sql)
# 添加收货地址数据
add_url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/add'
add_data = {
"address": {"default_address": 0, "consignee": f"{data['consignee']}", "tel": f"{data['tel']}",
"zipcode": f"{data['postcode']}", "country": "1", "city": "271", "id": 0,
"email": f"{data['email']}", "address": f"{data['detail']}",
"province": "", "district": "", "mobile": ""}, "session": self.session
}
# 添加收货地址
Address.add_address(url=add_url, data=add_data)
# 获取收货地址表中用户地址数
after = self.op_database.get_all(sql)
result = len(after) - len(before) # 实际结果
# 断言
self.assertEqual(data['expect'], result, msg='断言失败')
# 查看收货地址
def test_02_check_address(self):
# 查看收货地址数据
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/list'
data = {"session": self.session}
# 查看收货地址
response = Address.check_address(url, data)
# 获取返回数据中data的值
addr_list = Address.get_value(response, 'data') # 实际结果
# SQL语句
sql = f'select * from ecs_user_address where user_id = {self.user_id}'
# 获取收货地址表中用户地址数
sql_addr = self.op_database.get_all(sql) # 期望结果
# 断言
self.assertEqual(len(sql_addr), len(addr_list), msg='断言失败')
# 修改收货地址
@ddt.data(*test_data2)
def test_03_modify_address(self, data):
# 读取收货地址表中的地址的address_id
sql = f'select address_id from ecs_user_address where user_id = {self.user_id}'
id_list = self.op_database.get_all(sql)
# 修改收货地址数据
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/update'
modify_data = {
"address": {"default_address": 0, "consignee": f"{data['consignee']}", "tel": f"{data['tel']}",
"zipcode": f"{data['postcode']}", "country": "1", "city": "271", "id": 0, "email": f"{data['email']}",
"address": f"{data['detail']}", "province": "0", "district": "0", "mobile": f"{data['mobile']}"},
"address_id": id_list[0]['address_id'], "session": self.session
}
# 修改收货地址
response = Address.modify_address(url, modify_data)
# 获取返回数据中的succeed
succeed = Address.get_value(response, 'succeed')
# 断言----缺少数据库验证代码
self.assertEqual(data['expect'], succeed, msg='断言失败')
# 删除收货地址
def test_04_delete_address(self):
# 读取收货地址表中的地址的address_id
sql = f'select address_id from ecs_user_address where user_id = {self.user_id}'
id_list = self.op_database.get_all(sql)
# 删除收货地址数据
url = 'http://ecshop.itsoso.cn/ECMobile/?url=/address/delete'
delete_data = {"address_id": id_list[0]['address_id'], "session": self.session}
# 删除收货地址
response = Address.delete_address(url, delete_data)
# 获取返回数据中succeed
succeed = Address.get_value(response, 'succeed') # 实际结果
# 查询收货地址表中该地址的信息
sql = f"select * from ecs_user_address where address_id = {id_list[0]['address_id']}"
info = self.op_database.get_one(sql)
result = False if info != None else True # 期望结果
# 断言
self.assertEqual(result, succeed, msg='断言失败')
if __name__ == '__main__':
unittest.main()
| [
2,
8,
9,
10,
12
] |
1,874 | f3bfa30f51c4a91844457c72fbf2b2b8368d8476 | <mask token>
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
<mask token>
| <mask token>
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
<mask token>
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
| <mask token>
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
def get_local_datetime(date_time):
return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
| <mask token>
interval_length_minutes = 10
tek_rolling_period = 144
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
def get_local_datetime(date_time):
return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
| import datetime
interval_length_minutes = 10 # 10 minutes per interval
tek_rolling_period = 144 # 24*60//10 - 24 hours per day, 60 minutes per hour, 10 minutes per interval
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60 # 60 seconds per minute
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo=datetime.timezone.utc)
def get_local_datetime(date_time):
return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
| [
2,
3,
4,
5,
7
] |
1,875 | 6eecf0ff1ad762089db6e9498e906e68b507370c | <mask token>
| <mask token>
with ____.____(____):
doc = ____
print(____)
| <mask token>
nlp = spacy.load('en_core_web_sm')
text = (
'Chick-fil-A is an American fast food restaurant chain headquartered in the city of College Park, Georgia, specializing in chicken sandwiches.'
)
with ____.____(____):
doc = ____
print(____)
| import spacy
nlp = spacy.load('en_core_web_sm')
text = (
'Chick-fil-A is an American fast food restaurant chain headquartered in the city of College Park, Georgia, specializing in chicken sandwiches.'
)
with ____.____(____):
doc = ____
print(____)
| import spacy
nlp = spacy.load("en_core_web_sm")
text = (
"Chick-fil-A is an American fast food restaurant chain headquartered in "
"the city of College Park, Georgia, specializing in chicken sandwiches."
)
# Disable the tagger and parser
with ____.____(____):
# Process the text
doc = ____
# Print the entities in the doc
print(____)
| [
0,
1,
2,
3,
4
] |
1,876 | b3f62c331ff4ae9f909fc90cc7303997b32daceb | <mask token>
| <mask token>
class Solution:
<mask token>
<mask token>
| <mask token>
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target - val], idx]
seenum[val] = idx
return [-1, -1]
<mask token>
| <mask token>
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target - val], idx]
seenum[val] = idx
return [-1, -1]
if __name__ == '__main__':
nums = [2, 7, 11, 15]
target = 9
sol = Solution()
print(sol.twoSum(nums, target))
| '''
O(n) time complexity
O(n) space complexity
'''
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target-val], idx]
seenum[val] = idx
return [-1, -1]
if __name__ == "__main__":
nums = [2,7,11,15]
target = 9
sol = Solution()
print(sol.twoSum(nums, target)) | [
0,
1,
2,
3,
4
] |
1,877 | 471cab65aac29f5b47de0ffef8f032dbbadf8dd0 | <mask token>
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
<mask token>
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
| <mask token>
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
| <mask token>
CORS(app)
<mask token>
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
| from flask import Flask, render_template, send_from_directory
from flask import request, send_file
from flask_cors import CORS
import os
import json
from crossdomain import crossdomain
import constants
import generation_tools
from music_theory import name_chords_in_tracks
import midi_tools
from client_logging import ClientLogger
from generation_tools import Generator
app = Flask(__name__)
CORS(app)
BASE_URL = os.path.abspath(os.path.dirname(__file__))
CLIENT_APP_FOLDER = os.path.join(BASE_URL, 'ClientApp')
DawState = {}
ClientLogger = ClientLogger()
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
| from flask import Flask, render_template, send_from_directory
from flask import request, send_file
from flask_cors import CORS
import os
import json
from crossdomain import crossdomain
import constants
import generation_tools
from music_theory import name_chords_in_tracks
import midi_tools
from client_logging import ClientLogger
from generation_tools import Generator
app = Flask(__name__)
CORS(app)
BASE_URL = os.path.abspath(os.path.dirname(__file__))
CLIENT_APP_FOLDER = os.path.join(BASE_URL, "ClientApp")
DawState = {}
ClientLogger = ClientLogger()
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult' : result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult' : result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename,
mimetype='audio/midi audio/x-midi',
as_attachment=True,
attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404 | [
6,
8,
9,
11,
12
] |
1,878 | da69fd937153fe2112b9f64411882527274247ef | <mask token>
def clusterVacio():
arreAux = []
busca = 1
bandera = True
for i in range(len(clusters)):
clu = clusters[i]
arreAux.append(int(clu[0]))
print(arreAux)
while bandera:
if busca in arreAux:
busca = busca + 1
else:
bandera = False
return busca
def tablaArchivos():
global archivos
global tams
global clusters
archivos = []
tams = []
clusters = []
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
archivos.append(file.read(15))
tams.append(file.read(8))
clusters.append(file.read(5))
file.seek(file.tell() + 36)
file.close()
def info():
print('Nombre del Sistema: ' + nombre)
print('Version: ' + version)
print('Etiqueta del Volumen: ' + etiqueta)
print('Tamano del cluster en bytes: ' + cluster)
print('Numero de clusters que mide el directorio: ' + numero)
print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)
def listar():
file = open('fiunamfs.img', 'r')
file.seek(2048)
for i in range(64):
name = file.read(15)
if name != 'Xx.xXx.xXx.xXx.':
print(name)
file.seek(file.tell() + 49)
file.close()
def borrar(archivo):
borrado = False
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == archivo:
file.seek(file.tell() - 15)
file.write('Xx.xXx.xXx.xXx.')
borrado = True
file.seek(file.tell() + 49)
file.close()
return borrado
def tamaArchivo(path):
si = stat(path).st_size
return si
<mask token>
def deSistemaAPc(archivo, nombre):
tam = 0
clu = 0
file = open('fiunamfs.img', 'r')
file.seek(2048)
new = open(archivo, 'r+')
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == nombre:
tam = file.read(8)
clu = file.read(5)
file.close()
aux2 = 2048 * clu
file = open('fiunamfs.img', 'r')
file.seek(aux2)
new.write(file.read(tam))
def nombreArchivo(path):
tam = len(path)
slash = 0
name = ''
name2 = ''
for i in range(tam):
if path[i] == '/':
slash = i
for i in range(slash + 1, tam):
name = name + path[i]
espaces = 15 - len(name)
for i in range(espaces):
name2 = name2 + ' '
return name2 + name
<mask token>
| <mask token>
def clusterVacio():
arreAux = []
busca = 1
bandera = True
for i in range(len(clusters)):
clu = clusters[i]
arreAux.append(int(clu[0]))
print(arreAux)
while bandera:
if busca in arreAux:
busca = busca + 1
else:
bandera = False
return busca
def tablaArchivos():
global archivos
global tams
global clusters
archivos = []
tams = []
clusters = []
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
archivos.append(file.read(15))
tams.append(file.read(8))
clusters.append(file.read(5))
file.seek(file.tell() + 36)
file.close()
def info():
print('Nombre del Sistema: ' + nombre)
print('Version: ' + version)
print('Etiqueta del Volumen: ' + etiqueta)
print('Tamano del cluster en bytes: ' + cluster)
print('Numero de clusters que mide el directorio: ' + numero)
print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)
def listar():
file = open('fiunamfs.img', 'r')
file.seek(2048)
for i in range(64):
name = file.read(15)
if name != 'Xx.xXx.xXx.xXx.':
print(name)
file.seek(file.tell() + 49)
file.close()
def borrar(archivo):
borrado = False
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == archivo:
file.seek(file.tell() - 15)
file.write('Xx.xXx.xXx.xXx.')
borrado = True
file.seek(file.tell() + 49)
file.close()
return borrado
def tamaArchivo(path):
si = stat(path).st_size
return si
def dePcASistema(path, nombre):
posicion = 0
actual = 0
try:
new = open(path, 'r+')
file = open('fiunamfs.img', 'r+')
file.seek(2048)
bandera = False
tam = stat(path).st_size
while bandera == False:
name = file.read(15)
if name == 'Xx.xXx.xXx.xXx.':
file.seek(file.tell() - 15)
file.write(nombre)
actual = file.tell()
print('El archivo fue copiado')
bandera = True
file.seek(file.tell() + 49)
file.close()
file = open('fiunamfs.img', 'r+')
pa = clusterVacio()
inde = 2048 * pa
tamano = tamaArchivo(path)
file.seek(inde)
file.write(new.read(tamano))
file.close()
file = open('fiunamfs.img', 'r+')
file.seek(actual)
file.write(str(pa))
file.close()
except:
print('Este archivo no existe')
def deSistemaAPc(archivo, nombre):
tam = 0
clu = 0
file = open('fiunamfs.img', 'r')
file.seek(2048)
new = open(archivo, 'r+')
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == nombre:
tam = file.read(8)
clu = file.read(5)
file.close()
aux2 = 2048 * clu
file = open('fiunamfs.img', 'r')
file.seek(aux2)
new.write(file.read(tam))
def nombreArchivo(path):
tam = len(path)
slash = 0
name = ''
name2 = ''
for i in range(tam):
if path[i] == '/':
slash = i
for i in range(slash + 1, tam):
name = name + path[i]
espaces = 15 - len(name)
for i in range(espaces):
name2 = name2 + ' '
return name2 + name
<mask token>
| <mask token>
file.seek(10)
<mask token>
file.seek(20)
<mask token>
file.seek(40)
<mask token>
file.seek(47)
<mask token>
file.seek(52)
<mask token>
file.close()
<mask token>
def clusterVacio():
arreAux = []
busca = 1
bandera = True
for i in range(len(clusters)):
clu = clusters[i]
arreAux.append(int(clu[0]))
print(arreAux)
while bandera:
if busca in arreAux:
busca = busca + 1
else:
bandera = False
return busca
def tablaArchivos():
global archivos
global tams
global clusters
archivos = []
tams = []
clusters = []
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
archivos.append(file.read(15))
tams.append(file.read(8))
clusters.append(file.read(5))
file.seek(file.tell() + 36)
file.close()
def info():
print('Nombre del Sistema: ' + nombre)
print('Version: ' + version)
print('Etiqueta del Volumen: ' + etiqueta)
print('Tamano del cluster en bytes: ' + cluster)
print('Numero de clusters que mide el directorio: ' + numero)
print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)
def listar():
file = open('fiunamfs.img', 'r')
file.seek(2048)
for i in range(64):
name = file.read(15)
if name != 'Xx.xXx.xXx.xXx.':
print(name)
file.seek(file.tell() + 49)
file.close()
def borrar(archivo):
borrado = False
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == archivo:
file.seek(file.tell() - 15)
file.write('Xx.xXx.xXx.xXx.')
borrado = True
file.seek(file.tell() + 49)
file.close()
return borrado
def tamaArchivo(path):
si = stat(path).st_size
return si
def dePcASistema(path, nombre):
posicion = 0
actual = 0
try:
new = open(path, 'r+')
file = open('fiunamfs.img', 'r+')
file.seek(2048)
bandera = False
tam = stat(path).st_size
while bandera == False:
name = file.read(15)
if name == 'Xx.xXx.xXx.xXx.':
file.seek(file.tell() - 15)
file.write(nombre)
actual = file.tell()
print('El archivo fue copiado')
bandera = True
file.seek(file.tell() + 49)
file.close()
file = open('fiunamfs.img', 'r+')
pa = clusterVacio()
inde = 2048 * pa
tamano = tamaArchivo(path)
file.seek(inde)
file.write(new.read(tamano))
file.close()
file = open('fiunamfs.img', 'r+')
file.seek(actual)
file.write(str(pa))
file.close()
except:
print('Este archivo no existe')
def deSistemaAPc(archivo, nombre):
tam = 0
clu = 0
file = open('fiunamfs.img', 'r')
file.seek(2048)
new = open(archivo, 'r+')
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == nombre:
tam = file.read(8)
clu = file.read(5)
file.close()
aux2 = 2048 * clu
file = open('fiunamfs.img', 'r')
file.seek(aux2)
new.write(file.read(tam))
def nombreArchivo(path):
tam = len(path)
slash = 0
name = ''
name2 = ''
for i in range(tam):
if path[i] == '/':
slash = i
for i in range(slash + 1, tam):
name = name + path[i]
espaces = 15 - len(name)
for i in range(espaces):
name2 = name2 + ' '
return name2 + name
if nombre == 'FiUnamFS' and version == '0.7':
correcto = True
while correcto:
tablaArchivos()
print('Sistema de Archivos FI Unam FS')
print('1: Listar')
print('2: Copiar archivo')
print('3: Copiar archivo a la computadora')
print('4: Eliminar archivo')
print('5: Desgramentar')
print('6: Mostar informacion del sistema de archivos')
print('7: Salir')
opcion = input('Opcion: ')
if opcion == 6:
info()
elif opcion == 1:
listar()
elif opcion == 4:
archivo = raw_input('Nombre del archivo a borrar: ')
if borrar(archivo):
print('El archivo fue borrado')
else:
print('No se encontro el archivo')
elif opcion == 3:
archivo = raw_input('Nombre del archivo a copiar: ')
nombre = nombreArchivo(archivo)
deSistemaAPc(archivo, nombre)
elif opcion == 2:
archivo = raw_input('Nombre del archivo a copiar: ')
nombre = nombreArchivo(archivo)
dePcASistema(archivo, nombre)
elif opcion == 9:
print(archivos)
print(clusters)
print(tams)
elif opcion == 8:
va = clusterVacio()
print(va)
elif opcion == 7:
print('Sistema desmontado')
correcto = False
elif opcion == 5:
print('No se implemento')
else:
print(
'No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.'
)
exit()
| from sys import exit
from os import stat
file = open('fiunamfs.img', 'r')
nombre = file.read(8)
file.seek(10)
version = file.read(3)
file.seek(20)
etiqueta = file.read(15)
file.seek(40)
cluster = file.read(5)
file.seek(47)
numero = file.read(2)
file.seek(52)
numeroCompleto = file.read(8)
file.close()
archivos = []
tams = []
clusters = []
def clusterVacio():
arreAux = []
busca = 1
bandera = True
for i in range(len(clusters)):
clu = clusters[i]
arreAux.append(int(clu[0]))
print(arreAux)
while bandera:
if busca in arreAux:
busca = busca + 1
else:
bandera = False
return busca
def tablaArchivos():
global archivos
global tams
global clusters
archivos = []
tams = []
clusters = []
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
archivos.append(file.read(15))
tams.append(file.read(8))
clusters.append(file.read(5))
file.seek(file.tell() + 36)
file.close()
def info():
print('Nombre del Sistema: ' + nombre)
print('Version: ' + version)
print('Etiqueta del Volumen: ' + etiqueta)
print('Tamano del cluster en bytes: ' + cluster)
print('Numero de clusters que mide el directorio: ' + numero)
print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)
def listar():
file = open('fiunamfs.img', 'r')
file.seek(2048)
for i in range(64):
name = file.read(15)
if name != 'Xx.xXx.xXx.xXx.':
print(name)
file.seek(file.tell() + 49)
file.close()
def borrar(archivo):
borrado = False
file = open('fiunamfs.img', 'r+')
file.seek(2048)
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == archivo:
file.seek(file.tell() - 15)
file.write('Xx.xXx.xXx.xXx.')
borrado = True
file.seek(file.tell() + 49)
file.close()
return borrado
def tamaArchivo(path):
si = stat(path).st_size
return si
def dePcASistema(path, nombre):
posicion = 0
actual = 0
try:
new = open(path, 'r+')
file = open('fiunamfs.img', 'r+')
file.seek(2048)
bandera = False
tam = stat(path).st_size
while bandera == False:
name = file.read(15)
if name == 'Xx.xXx.xXx.xXx.':
file.seek(file.tell() - 15)
file.write(nombre)
actual = file.tell()
print('El archivo fue copiado')
bandera = True
file.seek(file.tell() + 49)
file.close()
file = open('fiunamfs.img', 'r+')
pa = clusterVacio()
inde = 2048 * pa
tamano = tamaArchivo(path)
file.seek(inde)
file.write(new.read(tamano))
file.close()
file = open('fiunamfs.img', 'r+')
file.seek(actual)
file.write(str(pa))
file.close()
except:
print('Este archivo no existe')
def deSistemaAPc(archivo, nombre):
tam = 0
clu = 0
file = open('fiunamfs.img', 'r')
file.seek(2048)
new = open(archivo, 'r+')
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == nombre:
tam = file.read(8)
clu = file.read(5)
file.close()
aux2 = 2048 * clu
file = open('fiunamfs.img', 'r')
file.seek(aux2)
new.write(file.read(tam))
def nombreArchivo(path):
tam = len(path)
slash = 0
name = ''
name2 = ''
for i in range(tam):
if path[i] == '/':
slash = i
for i in range(slash + 1, tam):
name = name + path[i]
espaces = 15 - len(name)
for i in range(espaces):
name2 = name2 + ' '
return name2 + name
if nombre == 'FiUnamFS' and version == '0.7':
correcto = True
while correcto:
tablaArchivos()
print('Sistema de Archivos FI Unam FS')
print('1: Listar')
print('2: Copiar archivo')
print('3: Copiar archivo a la computadora')
print('4: Eliminar archivo')
print('5: Desgramentar')
print('6: Mostar informacion del sistema de archivos')
print('7: Salir')
opcion = input('Opcion: ')
if opcion == 6:
info()
elif opcion == 1:
listar()
elif opcion == 4:
archivo = raw_input('Nombre del archivo a borrar: ')
if borrar(archivo):
print('El archivo fue borrado')
else:
print('No se encontro el archivo')
elif opcion == 3:
archivo = raw_input('Nombre del archivo a copiar: ')
nombre = nombreArchivo(archivo)
deSistemaAPc(archivo, nombre)
elif opcion == 2:
archivo = raw_input('Nombre del archivo a copiar: ')
nombre = nombreArchivo(archivo)
dePcASistema(archivo, nombre)
elif opcion == 9:
print(archivos)
print(clusters)
print(tams)
elif opcion == 8:
va = clusterVacio()
print(va)
elif opcion == 7:
print('Sistema desmontado')
correcto = False
elif opcion == 5:
print('No se implemento')
else:
print(
'No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.'
)
exit()
| from sys import exit
from os import stat
file = open("fiunamfs.img","r")
nombre = file.read(8)
file.seek(10)
version = file.read(3)
file.seek(20)
etiqueta = file.read(15)
file.seek(40)
cluster = file.read(5)
file.seek(47)
numero = file.read(2)
file.seek(52)
numeroCompleto = file.read(8)
file.close()
archivos = []
tams = []
clusters = []
def clusterVacio():
arreAux = []
busca = 1
bandera = True
for i in range(len(clusters)):
clu=clusters[i]
arreAux.append(int(clu[0]))
print(arreAux)
while bandera:
if busca in arreAux:
busca = busca + 1
else:
bandera = False
return busca
def tablaArchivos():
global archivos
global tams
global clusters
archivos = []
tams = []
clusters = []
file = open("fiunamfs.img","r+")
file.seek(2048)
for i in range(64):
archivos.append(file.read(15))
tams.append(file.read(8))
clusters.append(file.read(5))
file.seek(file.tell()+36)
file.close()
def info():
print("Nombre del Sistema: " + nombre)
print("Version: " + version)
print("Etiqueta del Volumen: " + etiqueta)
print("Tamano del cluster en bytes: " + cluster)
print("Numero de clusters que mide el directorio: " + numero)
print("Numero de cluster que mide la unidad completa: " + numeroCompleto)
def listar():
file = open("fiunamfs.img","r")
file.seek(2048)
for i in range(64):
name = file.read(15)
if name != 'Xx.xXx.xXx.xXx.':
print(name)
file.seek(file.tell()+49)
file.close()
def borrar(archivo):
borrado = False
file = open("fiunamfs.img","r+")
file.seek(2048)
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == archivo:
file.seek(file.tell()-15)
file.write('Xx.xXx.xXx.xXx.')
borrado = True
file.seek(file.tell()+49)
file.close()
return borrado
def tamaArchivo(path):
si = stat(path).st_size
return si
def dePcASistema(path, nombre):
posicion =0
actual =0
try:
new = open(path,"r+")
file = open("fiunamfs.img","r+")
file.seek(2048)
bandera = False
tam = stat(path).st_size
while(bandera == False):
name = file.read(15)
if (name == 'Xx.xXx.xXx.xXx.'):
file.seek(file.tell()-15)
file.write(nombre)
actual = file.tell()
print("El archivo fue copiado")
bandera = True
file.seek(file.tell()+49)
file.close()
file = open("fiunamfs.img","r+")
pa = clusterVacio()
inde = 2048*pa
tamano = tamaArchivo(path)
file.seek(inde)
file.write(new.read(tamano))
file.close()
file = open("fiunamfs.img","r+")
file.seek(actual)
file.write(str(pa))
file.close()
except:
print("Este archivo no existe")
def deSistemaAPc(archivo,nombre):
tam = 0
clu = 0
file = open("fiunamfs.img","r") #Se abre el archivo en modo solo lectura
file.seek(2048) #Se salta el superbloque
new = open(archivo,"r+")
for i in range(64):
name = file.read(15)
aux = name.strip()
if (aux == nombre):
tam = file.read(8)
clu = file.read(5)
file.close()
aux2 = 2048*clu
file = open("fiunamfs.img","r")
file.seek(aux2)
new.write(file.read(tam))
def nombreArchivo(path):
tam = len(path)
slash = 0
name = ''
name2 = ''
for i in range(tam):
if (path[i] == '/'):
slash = i
for i in range(slash+1,tam):
name = name + path[i]
##Agregar funcion de limiar nombres de los archivos a 15 caracteres
espaces = 15 - len(name)
for i in range (espaces):
name2 = name2 + " "
return name2 + name
if (nombre == "FiUnamFS" and version == "0.7"):
correcto = True
while(correcto):
tablaArchivos()
print("Sistema de Archivos FI Unam FS")
print("1: Listar")
print("2: Copiar archivo")
print("3: Copiar archivo a la computadora")
print("4: Eliminar archivo")
print("5: Desgramentar")
print("6: Mostar informacion del sistema de archivos")
print("7: Salir")
opcion = input("Opcion: ")
if opcion == 6:
info()
elif opcion == 1:
listar()
elif opcion == 4:
archivo = raw_input("Nombre del archivo a borrar: ")
if(borrar(archivo)):
print('El archivo fue borrado')
else:
print('No se encontro el archivo')
elif opcion == 3:
archivo = raw_input("Nombre del archivo a copiar: ")
nombre = nombreArchivo(archivo)
deSistemaAPc(archivo, nombre)
elif opcion == 2:
archivo = raw_input("Nombre del archivo a copiar: ")
nombre = nombreArchivo(archivo)
dePcASistema(archivo, nombre)
elif opcion == 9:
print(archivos)
print(clusters)
print(tams)
elif opcion == 8:
va = clusterVacio()
print (va)
elif opcion == 7:
print("Sistema desmontado")
correcto = False
elif opcion == 5:
print("No se implemento")
else:
print("No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.")
exit()
| [
8,
9,
10,
12,
13
] |
1,879 | b28ae19f31ae746f901dea645dfeaa211a15cd31 | <mask token>
| <mask token>
while True:
sleep(0.5)
r = random.choice(mineral)
x, y, z = mc.entity.getTilePos(myID)
mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)
| <mask token>
mc = Minecraft.create()
myID = mc.getPlayerEntityId('Baymax1112')
mineral = [14, 15, 16, 56, 73, 129, 57]
while True:
sleep(0.5)
r = random.choice(mineral)
x, y, z = mc.entity.getTilePos(myID)
mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)
| from mcpi.minecraft import Minecraft
from time import sleep
import random
mc = Minecraft.create()
myID = mc.getPlayerEntityId('Baymax1112')
mineral = [14, 15, 16, 56, 73, 129, 57]
while True:
sleep(0.5)
r = random.choice(mineral)
x, y, z = mc.entity.getTilePos(myID)
mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)
| from mcpi.minecraft import Minecraft
from time import sleep
import random
mc = Minecraft.create()
myID=mc.getPlayerEntityId("Baymax1112")
mineral = [14,15,16,56,73,129,57]
while True:
sleep(0.5)
r=random.choice(mineral)
x,y,z = mc.entity.getTilePos(myID)
mc.setBlocks(x+1,y+3,z+1,x-1,y-3,z-1,r) | [
0,
1,
2,
3,
4
] |
1,880 | 5c0ee6e8a0d80dbb77a7a376c411b85bf1405272 | <mask token>
def write_head(file):
with open('head.tex', 'r') as head:
for line in head:
f.write(line)
def write_foot(file):
with open('foot.tex', 'r') as head:
for line in head:
f.write(line)
<mask token>
| <mask token>
def write_head(file):
with open('head.tex', 'r') as head:
for line in head:
f.write(line)
def write_foot(file):
with open('foot.tex', 'r') as head:
for line in head:
f.write(line)
<mask token>
parser.add_argument('-l', type=float, nargs='+', default=[0.0, 0.0, 0.15,
0.0, 0.35, 0.02, 0.42, 0.04, 0.6, 0.12, 0.85, 0.3, 0.92, 0.45, 1.0, 1.0
], help=
'coordinates of the lower line, in the format x1 y1 x2 y2 ... x_t y_t')
parser.add_argument('-u', type=float, nargs='+', default=[0, 0, 0.38, 0.17,
0.74, 0.45, 1.0, 1.0], help=
'coordinates of the upper line, in the format x1 y1 x2 y2 ... x_t y_t')
parser.add_argument('-n', type=str, default='normal.tex', help=
'output path for normal line')
parser.add_argument('-o', type=str, default='onestep.tex', help=
'output path for one-step visualization')
parser.add_argument('-a', type=str, default='allsteps.tex', help=
'output path for allsteps visualization')
parser.add_argument('-p', type=float, default=0.005, help=
'minimum precision for drawing the rainbow gradient')
parser.add_argument('--compile', default=False, action='store_true', help=
'compile pdfs with pdflatex')
<mask token>
if len(lower) % 2 == 1:
print('Coordinate list for lower line must have even number.')
exit(1)
if len(upper) < 2:
print('Coordinate list for upper line must have at least two elements.')
exit(2)
if len(upper) % 2 == 1:
print('Coordinate list for upper line must have even number.')
exit(3)
<mask token>
for i in range(0, len(lower), 2):
points.append((lower[i], lower[i + 1]))
<mask token>
for i in range(2, len(upper), 2):
upper_changes.append((upper[i] - last[0], upper[i + 1] - last[1]))
last = upper[i], upper[i + 1]
<mask token>
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
length += sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
<mask token>
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
segment_length = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
curve.append(LineSegment(x2 - x1, y2 - y1, (length_so_far / length, (
length_so_far + segment_length) / length)))
length_so_far += segment_length
with open(normal_path, 'w') as f:
write_head(f)
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for e in curve:
f.write(e.to_tikz(precision))
write_foot(f)
with open(onestep_path, 'w') as f:
write_head(f)
a1, rest = reorder_step(upper_changes[0][0], upper_changes[0][1], curve)
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for e in a1:
f.write(e.to_tikz(precision))
for e in rest:
f.write(e.to_tikz(precision))
write_foot(f)
with open(allsteps_path, 'w') as f:
write_head(f)
rest = curve
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for x, y in upper_changes:
a, rest = reorder_step(x, y, rest)
for e in a:
f.write(e.to_tikz(precision))
write_foot(f)
if do_compile:
Popen(['pdflatex', normal_path], stdout=DEVNULL)
Popen(['pdflatex', onestep_path], stdout=DEVNULL)
Popen(['pdflatex', allsteps_path], stdout=DEVNULL)
| <mask token>
def write_head(file):
with open('head.tex', 'r') as head:
for line in head:
f.write(line)
def write_foot(file):
with open('foot.tex', 'r') as head:
for line in head:
f.write(line)
parser = ArgumentParser(description=
'Generate LATEX code exemplifying Lemma 5, analogous to Figure 3.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-l', type=float, nargs='+', default=[0.0, 0.0, 0.15,
0.0, 0.35, 0.02, 0.42, 0.04, 0.6, 0.12, 0.85, 0.3, 0.92, 0.45, 1.0, 1.0
], help=
'coordinates of the lower line, in the format x1 y1 x2 y2 ... x_t y_t')
parser.add_argument('-u', type=float, nargs='+', default=[0, 0, 0.38, 0.17,
0.74, 0.45, 1.0, 1.0], help=
'coordinates of the upper line, in the format x1 y1 x2 y2 ... x_t y_t')
parser.add_argument('-n', type=str, default='normal.tex', help=
'output path for normal line')
parser.add_argument('-o', type=str, default='onestep.tex', help=
'output path for one-step visualization')
parser.add_argument('-a', type=str, default='allsteps.tex', help=
'output path for allsteps visualization')
parser.add_argument('-p', type=float, default=0.005, help=
'minimum precision for drawing the rainbow gradient')
parser.add_argument('--compile', default=False, action='store_true', help=
'compile pdfs with pdflatex')
args = parser.parse_args()
precision = args.p
lower = args.l
upper = args.u
normal_path = args.n
onestep_path = args.o
allsteps_path = args.a
do_compile = args.compile
if len(lower) % 2 == 1:
print('Coordinate list for lower line must have even number.')
exit(1)
if len(upper) < 2:
print('Coordinate list for upper line must have at least two elements.')
exit(2)
if len(upper) % 2 == 1:
print('Coordinate list for upper line must have even number.')
exit(3)
points = []
for i in range(0, len(lower), 2):
points.append((lower[i], lower[i + 1]))
upper_changes = []
last = upper[:2]
for i in range(2, len(upper), 2):
upper_changes.append((upper[i] - last[0], upper[i + 1] - last[1]))
last = upper[i], upper[i + 1]
length = 0
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
length += sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
curve = []
length_so_far = 0
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
segment_length = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
curve.append(LineSegment(x2 - x1, y2 - y1, (length_so_far / length, (
length_so_far + segment_length) / length)))
length_so_far += segment_length
with open(normal_path, 'w') as f:
write_head(f)
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for e in curve:
f.write(e.to_tikz(precision))
write_foot(f)
with open(onestep_path, 'w') as f:
write_head(f)
a1, rest = reorder_step(upper_changes[0][0], upper_changes[0][1], curve)
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for e in a1:
f.write(e.to_tikz(precision))
for e in rest:
f.write(e.to_tikz(precision))
write_foot(f)
with open(allsteps_path, 'w') as f:
write_head(f)
rest = curve
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for x, y in upper_changes:
a, rest = reorder_step(x, y, rest)
for e in a:
f.write(e.to_tikz(precision))
write_foot(f)
if do_compile:
Popen(['pdflatex', normal_path], stdout=DEVNULL)
Popen(['pdflatex', onestep_path], stdout=DEVNULL)
Popen(['pdflatex', allsteps_path], stdout=DEVNULL)
| from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from math import sqrt
from sys import exit
from subprocess import Popen, DEVNULL
from resmon import LineSegment, reorder_step
def write_head(file):
with open('head.tex', 'r') as head:
for line in head:
f.write(line)
def write_foot(file):
with open('foot.tex', 'r') as head:
for line in head:
f.write(line)
parser = ArgumentParser(description=
'Generate LATEX code exemplifying Lemma 5, analogous to Figure 3.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-l', type=float, nargs='+', default=[0.0, 0.0, 0.15,
0.0, 0.35, 0.02, 0.42, 0.04, 0.6, 0.12, 0.85, 0.3, 0.92, 0.45, 1.0, 1.0
], help=
'coordinates of the lower line, in the format x1 y1 x2 y2 ... x_t y_t')
parser.add_argument('-u', type=float, nargs='+', default=[0, 0, 0.38, 0.17,
0.74, 0.45, 1.0, 1.0], help=
'coordinates of the upper line, in the format x1 y1 x2 y2 ... x_t y_t')
parser.add_argument('-n', type=str, default='normal.tex', help=
'output path for normal line')
parser.add_argument('-o', type=str, default='onestep.tex', help=
'output path for one-step visualization')
parser.add_argument('-a', type=str, default='allsteps.tex', help=
'output path for allsteps visualization')
parser.add_argument('-p', type=float, default=0.005, help=
'minimum precision for drawing the rainbow gradient')
parser.add_argument('--compile', default=False, action='store_true', help=
'compile pdfs with pdflatex')
args = parser.parse_args()
precision = args.p
lower = args.l
upper = args.u
normal_path = args.n
onestep_path = args.o
allsteps_path = args.a
do_compile = args.compile
if len(lower) % 2 == 1:
print('Coordinate list for lower line must have even number.')
exit(1)
if len(upper) < 2:
print('Coordinate list for upper line must have at least two elements.')
exit(2)
if len(upper) % 2 == 1:
print('Coordinate list for upper line must have even number.')
exit(3)
points = []
for i in range(0, len(lower), 2):
points.append((lower[i], lower[i + 1]))
upper_changes = []
last = upper[:2]
for i in range(2, len(upper), 2):
upper_changes.append((upper[i] - last[0], upper[i + 1] - last[1]))
last = upper[i], upper[i + 1]
length = 0
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
length += sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
curve = []
length_so_far = 0
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
segment_length = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
curve.append(LineSegment(x2 - x1, y2 - y1, (length_so_far / length, (
length_so_far + segment_length) / length)))
length_so_far += segment_length
with open(normal_path, 'w') as f:
write_head(f)
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for e in curve:
f.write(e.to_tikz(precision))
write_foot(f)
with open(onestep_path, 'w') as f:
write_head(f)
a1, rest = reorder_step(upper_changes[0][0], upper_changes[0][1], curve)
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for e in a1:
f.write(e.to_tikz(precision))
for e in rest:
f.write(e.to_tikz(precision))
write_foot(f)
with open(allsteps_path, 'w') as f:
write_head(f)
rest = curve
f.write('\\draw (axis cs:0, 0) coordinate (x);')
for x, y in upper_changes:
a, rest = reorder_step(x, y, rest)
for e in a:
f.write(e.to_tikz(precision))
write_foot(f)
if do_compile:
Popen(['pdflatex', normal_path], stdout=DEVNULL)
Popen(['pdflatex', onestep_path], stdout=DEVNULL)
Popen(['pdflatex', allsteps_path], stdout=DEVNULL)
| from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from math import sqrt
from sys import exit
from subprocess import Popen, DEVNULL
from resmon import LineSegment, reorder_step
def write_head(file):
with open("head.tex", "r") as head:
for line in head:
f.write(line)
def write_foot(file):
with open("foot.tex", "r") as head:
for line in head:
f.write(line)
parser = ArgumentParser(description="Generate LATEX code exemplifying Lemma 5, analogous to Figure 3.",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-l", type=float, nargs="+",
default=[0., 0., .15, .0, .35, .02, .42, .04, .6, .12, .85, .3, .92, .45, 1., 1.],
help="coordinates of the lower line, in the format x1 y1 x2 y2 ... x_t y_t")
parser.add_argument("-u", type=float, nargs="+",
default=[0, 0, .38, .17, .74, .45, 1., 1.],
help="coordinates of the upper line, in the format x1 y1 x2 y2 ... x_t y_t")
parser.add_argument("-n", type=str, default="normal.tex", help="output path for normal line")
parser.add_argument("-o", type=str, default="onestep.tex", help="output path for one-step visualization")
parser.add_argument("-a", type=str, default="allsteps.tex", help="output path for allsteps visualization")
parser.add_argument("-p", type=float, default=0.005,
help="minimum precision for drawing the rainbow gradient")
parser.add_argument('--compile', default=False, action='store_true', help="compile pdfs with pdflatex")
args = parser.parse_args()
precision = args.p
lower = args.l
upper = args.u
normal_path = args.n
onestep_path = args.o
allsteps_path = args.a
do_compile = args.compile
if len(lower) % 2 == 1:
print("Coordinate list for lower line must have even number.")
exit(1)
if len(upper) < 2:
print("Coordinate list for upper line must have at least two elements.")
exit(2)
if len(upper) % 2 == 1:
print("Coordinate list for upper line must have even number.")
exit(3)
points = []
for i in range(0, len(lower), 2):
points.append((lower[i], lower[i+1]))
upper_changes = []
last = upper[:2]
for i in range(2, len(upper), 2):
upper_changes.append((upper[i] - last[0], upper[i+1] - last[1]))
last = (upper[i], upper[i+1])
length = 0
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
length += sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
curve = []
length_so_far = 0
for i in range(len(points) - 1):
x1, y1 = points[i]
x2, y2 = points[i + 1]
segment_length = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
curve.append(LineSegment(x2 - x1, y2 - y1, (length_so_far / length, (length_so_far + segment_length) / length)))
length_so_far += segment_length
with open(normal_path, "w") as f:
write_head(f)
f.write("\draw (axis cs:0, 0) coordinate (x);")
for e in curve:
f.write(e.to_tikz(precision))
write_foot(f)
with open(onestep_path, "w") as f:
write_head(f)
a1, rest = reorder_step(upper_changes[0][0], upper_changes[0][1], curve)
f.write("\draw (axis cs:0, 0) coordinate (x);")
for e in a1:
f.write(e.to_tikz(precision))
for e in rest:
f.write(e.to_tikz(precision))
write_foot(f)
with open(allsteps_path, "w") as f:
write_head(f)
rest = curve
f.write("\draw (axis cs:0, 0) coordinate (x);")
for x, y in upper_changes:
a, rest = reorder_step(x, y, rest)
for e in a:
f.write(e.to_tikz(precision))
write_foot(f)
if do_compile:
Popen(["pdflatex", normal_path], stdout=DEVNULL)
Popen(["pdflatex", onestep_path], stdout=DEVNULL)
Popen(["pdflatex", allsteps_path], stdout=DEVNULL) | [
2,
3,
4,
5,
6
] |
1,881 | 016c004fd95d901a6d55b6f7460397223a6baa3b | <mask token>
| <mask token>
@app.route('/comments/<article_id>', methods=['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
try:
temp_list = json.loads(comments_range)
if isinstance(temp_list, list) and len(temp_list) == 2:
target_article = session.query(Article_list).filter_by(id=
article_id).one_or_none()
if target_article:
target_comments = target_article.relate_comments
comments_in_range = target_comments[-1 - temp_list[0]:-1 -
temp_list[1]:-1]
comments_count = len(target_comments)
comments_list = list(map(lambda x: {'comment': x.content,
'time': x.time, 'user_name': session.query(user).
filter_by(id=x.user_id).one().nickname, 'user_avatar':
session.query(user).filter_by(id=x.user_id).one().
avatar}, comments_in_range))
resp = {'status': 200, 'result': {'count': comments_count,
'commentsList': comments_list}}
session.close()
return jsonify(resp)
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
| <mask token>
orm = config_orm_initial.initialize_orm()
session = orm['dict_session']
Article_list = orm['dict_Articlelist']
user = orm['dict_user']
app = Blueprint('api_get_comments', __name__)
@app.route('/comments/<article_id>', methods=['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
try:
temp_list = json.loads(comments_range)
if isinstance(temp_list, list) and len(temp_list) == 2:
target_article = session.query(Article_list).filter_by(id=
article_id).one_or_none()
if target_article:
target_comments = target_article.relate_comments
comments_in_range = target_comments[-1 - temp_list[0]:-1 -
temp_list[1]:-1]
comments_count = len(target_comments)
comments_list = list(map(lambda x: {'comment': x.content,
'time': x.time, 'user_name': session.query(user).
filter_by(id=x.user_id).one().nickname, 'user_avatar':
session.query(user).filter_by(id=x.user_id).one().
avatar}, comments_in_range))
resp = {'status': 200, 'result': {'count': comments_count,
'commentsList': comments_list}}
session.close()
return jsonify(resp)
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
| from flask import Blueprint, jsonify, request, abort, current_app
import json
from config import config_orm_initial
orm = config_orm_initial.initialize_orm()
session = orm['dict_session']
Article_list = orm['dict_Articlelist']
user = orm['dict_user']
app = Blueprint('api_get_comments', __name__)
@app.route('/comments/<article_id>', methods=['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
try:
temp_list = json.loads(comments_range)
if isinstance(temp_list, list) and len(temp_list) == 2:
target_article = session.query(Article_list).filter_by(id=
article_id).one_or_none()
if target_article:
target_comments = target_article.relate_comments
comments_in_range = target_comments[-1 - temp_list[0]:-1 -
temp_list[1]:-1]
comments_count = len(target_comments)
comments_list = list(map(lambda x: {'comment': x.content,
'time': x.time, 'user_name': session.query(user).
filter_by(id=x.user_id).one().nickname, 'user_avatar':
session.query(user).filter_by(id=x.user_id).one().
avatar}, comments_in_range))
resp = {'status': 200, 'result': {'count': comments_count,
'commentsList': comments_list}}
session.close()
return jsonify(resp)
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
| # -*- coding: UTF-8 -*-
from flask import Blueprint, jsonify, request, abort, current_app
import json
from config import config_orm_initial
orm = config_orm_initial.initialize_orm()
session = orm['dict_session']
Article_list = orm['dict_Articlelist']
user = orm['dict_user']
app = Blueprint('api_get_comments', __name__)
@app.route('/comments/<article_id>', methods = ['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
# 尝试把前端传来的参数解析成list
try:
temp_list = json.loads(comments_range)
# 判断参数是否是list,并且只有2个元素
if isinstance(temp_list, list) and len(temp_list) == 2:
# 先找到对应的article
target_article = session.query(Article_list).filter_by(id = article_id).one_or_none()
# 如果能找到这篇文章
if target_article:
# 然后调用一对多方法,拿到这篇article对应的comments和comments总数
target_comments = target_article.relate_comments
# 拿到的结果和list差不多,所以取倒数排序
comments_in_range = target_comments[-1-temp_list[0] : -1-temp_list[1]: -1]
comments_count = len(target_comments)
comments_list = list(map(
lambda x:{
'comment':x.content,
'time':x.time,
'user_name':session.query(user).filter_by(id=x.user_id).one().nickname,
'user_avatar':session.query(user).filter_by(id=x.user_id).one().avatar
},
comments_in_range)
)
resp = {'status': 200, 'result': {'count': comments_count, 'commentsList': comments_list}}
session.close()
return jsonify(resp)
# 如果不能找到这篇文章
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400) | [
0,
1,
2,
3,
4
] |
1,882 | 4e6401672d4762b444bb679e4cc39ada04193a26 | <mask token>
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
<mask token>
<mask token>
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
<mask token>
| <mask token>
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
<mask token>
def create_ret(self, canvas):
return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
<mask token>
| <mask token>
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)
tk.Label(self, text='Mini Jeu: \n P-0', font=('Helvetica', 18, 'bold')
).pack(side='top', fill='x', pady=5)
bt = Button(self, text='Jouer', command=lambda : master.
switch_frame(PageOne, num=True))
bt.pack(fill=BOTH, expand=True)
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
def create_circle(self, r, canvasName, color):
x = random.randint(20, 300)
y = random.randint(20, 250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1, fill=color)
def create_ret(self, canvas):
return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
<mask token>
| import tkinter as tk
from tkinter import Tk, BOTH, RIGHT, LEFT, END
from tkinter.ttk import Frame, Label, Style, Entry
from tkinter.ttk import Frame, Button, Style
import random
import time
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='#d0a3d8', height=200, width=200)
tk.Label(self, text='Mini Jeu: \n P-0', font=('Helvetica', 18, 'bold')
).pack(side='top', fill='x', pady=5)
bt = Button(self, text='Jouer', command=lambda : master.
switch_frame(PageOne, num=True))
bt.pack(fill=BOTH, expand=True)
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
frame_left = Frame(self)
self.frame_left = frame_left
frame_left.pack(fill=BOTH, side=LEFT)
self.label = tk.Label(frame_left, text='', font=('Helvetica', 10),
fg='red')
self.label.pack()
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='blue')
self.Nombre_1 = Entry(frame_left)
self.Nombre_1.pack(side='top', anchor='w')
self.bagniere_bleu = tk.Canvas(frame_left, width=50, height=3)
self.bagniere_bleu.pack(side='top', anchor='c')
self.bagniere_bleu.create_rectangle(0, 3, 50, 0, fill='red')
self.Nombre_2 = Entry(frame_left)
self.Nombre_2.pack(side='top', anchor='w')
tk.Button(frame_left, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.master = master
self.commencer_un_jeu()
def create_circle(self, r, canvasName, color):
x = random.randint(20, 300)
y = random.randint(20, 250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1, fill=color)
def create_ret(self, canvas):
return canvas.create_rectangle(0, 500, 500, 0, fill='#fdffdb')
def update_clock(self):
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000, self.update_clock)
def commencer_un_jeu(self):
self.fin = True
try:
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0, END)
self.Nombre_1.delete(0, END)
except:
pass
self.bt_valider = tk.Button(self.frame_left, text='valider',
command=lambda : self.fin_du_jeu())
self.bt_valider.pack(side='top', anchor='w')
self.debut = time.time()
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self.
temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle = tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1 = random.randint(1, 10)
self.nombre_j2 = random.randint(1, 10)
for _ in range(self.nombre_j2):
self.create_circle(20, self.rectangle, 'red')
for _ in range(self.nombre_j1):
self.create_circle(20, self.rectangle, 'blue')
def fin_du_jeu(self):
self.fin = False
if int(self.Nombre_1.get()) == self.nombre_j1 and int(self.Nombre_2
.get()) == self.nombre_j2:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Victoire')
else:
self.bt_valider.destroy()
self.rejouer = Button(self.frame_left, text='Rejouer', command=
lambda : self.commencer_un_jeu())
self.rejouer.pack(side='top', fill='x')
self.temps_de_rect = time.time() - self.debut
self.temps_de_rect = time.strftime('%H:%M:%S', time.gmtime(self
.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200, 150, fill='darkblue', font=
'Times 20 italic bold', text='Defaite')
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self, frame_game):
self.after(1000, frame_game.update_clock)
def switch_frame(self, frame_class, num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self, bg='red')
tk.Label(self, text='Page two', font=('Helvetica', 18, 'bold')).pack(
side='top', fill='x', pady=5)
tk.Button(self, text='Go back to start page', command=lambda :
master.switch_frame(StartPage)).pack()
if __name__ == '__main__':
app = SampleApp()
app.geometry('800x800')
app.mainloop()
|
import tkinter as tk
from tkinter import Tk, BOTH,RIGHT,LEFT,END
from tkinter.ttk import Frame, Label, Style,Entry
from tkinter.ttk import Frame, Button, Style
import random
import time
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg="#d0a3d8",height=200,width=200)
tk.Label(self, text="Mini Jeu: \n P-0", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
bt=Button(self, text="Jouer",
command=lambda: master.switch_frame(PageOne,num=True))
bt.pack(fill=BOTH,expand=True)
# tk.Button(self, text="Go to page two",
# command=lambda: master.switch_frame(PageTwo)).pack()
class PageOne(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
# tk.Frame.configure(self,bg='blue')
# tk.Label(self, text="Page de jeu", font=('Helvetica', 18, "bold")).pack(side="top", fill=BOTH, pady=5)
frame_left=Frame(self)
self.frame_left=frame_left
frame_left.pack(fill=BOTH,side=LEFT)
# add entry to this frame
self.label=tk.Label(frame_left , text="", font=('Helvetica', 10), fg='red')
self.label.pack()
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='blue')
self.Nombre_1=Entry(frame_left)
self.Nombre_1.pack(side='top',anchor='w')
# bagnier pour differencier les couleurs
self.bagniere_bleu=tk.Canvas(frame_left,width=50,height=3)
self.bagniere_bleu.pack(side='top',anchor='c')
self.bagniere_bleu.create_rectangle(0,3,50,0,fill='red')
self.Nombre_2=Entry(frame_left)
self.Nombre_2.pack(side='top',anchor='w')
tk.Button(frame_left, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack(side='bottom')
self.frame1 = Frame(self)
self.frame1.pack(fill='x')
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
# self.update_clock()
self.master=master
self.commencer_un_jeu()
def create_circle(self,r, canvasName,color): #center coordinates, radius
x=random.randint(20,300)
y=random.randint(20,250)
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1,fill=color)
def create_ret(self,canvas):
return canvas.create_rectangle(0,500,500,0,fill="#fdffdb")
def update_clock(self):
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
if self.fin:
self.master.after(1000,self.update_clock)
def commencer_un_jeu(self):
self.fin=True
try :
self.rejouer.destroy()
self.label.config(text='')
self.Nombre_2.delete(0,END)
self.Nombre_1.delete(0,END)
except:
pass
self.bt_valider=tk.Button(self.frame_left,text='valider', command=lambda: self.fin_du_jeu())
self. bt_valider.pack(side='top',anchor='w')
self.debut=time.time()
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.update_clock()
self.rectangle.destroy()
self.rectangle=tk.Canvas(self.frame1)
self.rectangle.pack()
self.create_ret(self.rectangle)
self.nombre_j1=random.randint(1,10)
self.nombre_j2=random.randint(1,10)
for _ in range(self.nombre_j2):
self.create_circle(20,self.rectangle,'red')
for _ in range(self.nombre_j1):
self.create_circle(20,self.rectangle,'blue')
def fin_du_jeu(self):
self.fin=False
if(int(self.Nombre_1.get())==self.nombre_j1 ) and (int(self.Nombre_2.get())==self.nombre_j2):
#jeu gagné
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Victoire")
else:
self.bt_valider.destroy()
self.rejouer=Button(self.frame_left, text="Rejouer",
command=lambda: self.commencer_un_jeu())
self.rejouer.pack(side='top',fill='x')
self.temps_de_rect=(time.time()-self.debut)
self.temps_de_rect=time.strftime("%H:%M:%S", time.gmtime(self.temps_de_rect))
self.label.configure(text=self.temps_de_rect)
self.rectangle.create_text(200,150,fill="darkblue",font="Times 20 italic bold",
text="Defaite")
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def timer(self,frame_game):
self.after(1000,frame_game.update_clock)
def switch_frame(self, frame_class,num=False):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
# try:
# if num:
# print(frame_class)
# self.timer(frame_class)
# except:
# print("le frame n'est pas le bon")
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
tk.Frame.configure(self,bg='red')
tk.Label(self, text="Page two", font=('Helvetica', 18, "bold")).pack(side="top", fill="x", pady=5)
tk.Button(self, text="Go back to start page",
command=lambda: master.switch_frame(StartPage)).pack()
if __name__ == "__main__":
app = SampleApp()
app.geometry('800x800')
app.mainloop() | [
11,
12,
15,
17,
18
] |
1,883 | a2e4e4a0c49c319df2adb073b11107d3f520aa6e | <mask token>
def evens(upper_limit):
return [i for i in range(0, upper_limit, 2)]
<mask token>
| <mask token>
def odds(upper_limit):
return [i for i in range(1, upper_limit, 2)]
def evens(upper_limit):
return [i for i in range(0, upper_limit, 2)]
<mask token>
if __name__ == '__main__':
print('odds', odds(12))
print('evens', evens(11))
print('nested', nested)
print('consonants', consonants)
print('ord of vowels', [ord(char) for char in vowels])
| <mask token>
def odds(upper_limit):
return [i for i in range(1, upper_limit, 2)]
def evens(upper_limit):
return [i for i in range(0, upper_limit, 2)]
nested = [(i ** j) for i in range(1, 10) for j in range(1, 4)]
vowels = ['a', 'e', 'i', 'o', 'u']
consonants = [chr(i) for i in range(97, 123) if chr(i) not in vowels]
ascii_table = {i: chr(i) for i in itertools.chain(range(65, 91), range(97,
123))}
ascii_lowercase = {i: chr(i) for i in ascii_table.keys() if chr(i) == chr(i
).lower()}
if __name__ == '__main__':
print('odds', odds(12))
print('evens', evens(11))
print('nested', nested)
print('consonants', consonants)
print('ord of vowels', [ord(char) for char in vowels])
| import itertools
def odds(upper_limit):
return [i for i in range(1, upper_limit, 2)]
def evens(upper_limit):
return [i for i in range(0, upper_limit, 2)]
nested = [(i ** j) for i in range(1, 10) for j in range(1, 4)]
vowels = ['a', 'e', 'i', 'o', 'u']
consonants = [chr(i) for i in range(97, 123) if chr(i) not in vowels]
ascii_table = {i: chr(i) for i in itertools.chain(range(65, 91), range(97,
123))}
ascii_lowercase = {i: chr(i) for i in ascii_table.keys() if chr(i) == chr(i
).lower()}
if __name__ == '__main__':
print('odds', odds(12))
print('evens', evens(11))
print('nested', nested)
print('consonants', consonants)
print('ord of vowels', [ord(char) for char in vowels])
| import itertools
def odds(upper_limit):
return [i for i in range(1,upper_limit,2)]
def evens(upper_limit):
return [i for i in range(0,upper_limit,2)]
nested = [i**j for i in range(1,10) for j in range(1,4)]
vowels = ['a', 'e', 'i', 'o', 'u']
consonants = [chr(i) for i in range(97,123) if chr(i) not in vowels]
ascii_table = {i:chr(i) for i in itertools.chain(range(65,91), range(97,123))}
ascii_lowercase = {i:chr(i) for i in ascii_table.keys() if chr(i) == chr(i).lower()}
if __name__ == "__main__":
print('odds', odds(12))
print('evens', evens(11))
print('nested', nested)
print('consonants', consonants)
print('ord of vowels', [ord(char) for char in vowels])
| [
1,
3,
4,
5,
6
] |
1,884 | 7611a57705939ce456e34d5ae379d6ca748b13c3 | <mask token>
class Datafunction(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<mask token>
| <mask token>
class Datafunction(object):
def __init__(self):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self, series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self, Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self, pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self):
return self.series
def get_class(self):
return self.clAss
<mask token>
<mask token>
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<mask token>
| <mask token>
class Datafunction(object):
def __init__(self):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self, series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self, Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self, pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self):
return self.series
def get_class(self):
return self.clAss
def get_id(self):
return self.id
<mask token>
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<mask token>
| class RefObject(object):
def __init__(self):
self.pose = []
self.name = []
self.time = None
self.id = None
def set_data(self, pose, name, time, Id):
self.pose = pose
self.name = name
self.time = time
self.id = Id
class Datafunction(object):
def __init__(self):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self, series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self, Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self, pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self):
return self.series
def get_class(self):
return self.clAss
def get_id(self):
return self.id
def get_ref_point(self):
return self.ref_point
def get_first_time(self):
return self.first_time
def get_last_time(self):
return self.last_time
<mask token>
| #!/usr/bin/env python
# encoding: utf8
#from __future__ import unicode_literals
class RefObject(object):
def __init__(self,):
self.pose = []
self.name = []
self.time = None
self.id = None
def set_data(self,pose, name, time, Id):
self.pose = pose
self.name = name
self.time = time
self.id = Id
class Datafunction(object):
def __init__(self,):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self,series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self,Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self,pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self,):
return self.series
def get_class(self,):
return self.clAss
def get_id(self,):
return self.id
def get_ref_point(self,):
return self.ref_point
def get_first_time(self,):
return self.first_time
def get_last_time(self):
return self.last_time
if __name__ == '__main__':
print("Hello")
| [
3,
11,
12,
16,
18
] |
1,885 | 7bcdd6c5c6e41b076e476e1db35b663e34d74a67 | <mask token>
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '[email protected]'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
<mask token>
| <mask token>
config.read('crawler.config')
<mask token>
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '[email protected]'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')
rule = '(https.*sticker@2x\\.png)'
ruleEmoji = '(https.*/\\d{3}\\.png)'
title = getTitle(content)
title = re.sub('\\s', '', title)
title = re.sub('[\\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content)
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content)
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print('已全部下載完成')
| <mask token>
config = configparser.ConfigParser()
config.read('crawler.config')
directoryLocation = os.getcwd() + '\\img'
urlList = config['lineStoreUrl']['url'].split(',')
downLoadType = '貼圖'
headers = ('User_Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
)
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '[email protected]'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')
rule = '(https.*sticker@2x\\.png)'
ruleEmoji = '(https.*/\\d{3}\\.png)'
title = getTitle(content)
title = re.sub('\\s', '', title)
title = re.sub('[\\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content)
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content)
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print('已全部下載完成')
| from urllib import request
from urllib import error
from urllib.request import urlretrieve
import os, re
from bs4 import BeautifulSoup
import configparser
from apng2gif import apng2gif
config = configparser.ConfigParser()
config.read('crawler.config')
directoryLocation = os.getcwd() + '\\img'
urlList = config['lineStoreUrl']['url'].split(',')
downLoadType = '貼圖'
headers = ('User_Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
)
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '[email protected]'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')
rule = '(https.*sticker@2x\\.png)'
ruleEmoji = '(https.*/\\d{3}\\.png)'
title = getTitle(content)
title = re.sub('\\s', '', title)
title = re.sub('[\\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content)
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content)
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print('已全部下載完成')
| from urllib import request
from urllib import error
from urllib.request import urlretrieve
import os, re
from bs4 import BeautifulSoup
import configparser
from apng2gif import apng2gif
config = configparser.ConfigParser()
config.read('crawler.config')
# 下載儲存位置
directoryLocation = os.getcwd() + '\\img'
# 設置要爬的頁面
urlList = config['lineStoreUrl']['url'].split(',')
downLoadType = '貼圖'
# 設置User-Agent
headers = ("User_Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0")
# 自定義opener
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + "\\" + downLoadType + "\\" + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + "\\" + str(count + 1) + ".png"
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
# if animationUrl download animation png ,else download imageurl
animationUrl = imgurl[:-7] + '[email protected]'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode("utf-8", "ignore")
rule = '(https.*sticker@2x\.png)' # 正則匹配
ruleEmoji = '(https.*/\d{3}\.png)'
title = getTitle(content)
title = re.sub('\s', '', title)
title = re.sub('[\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content) # 獲取圖片列表
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content) # 小表情規則
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print("已全部下載完成")
| [
3,
4,
5,
6,
7
] |
1,886 | d423b0bc6cd9ea9795317750141ad5f5eab01636 | <mask token>
| <mask token>
def lower_upper_confidence_intervals(avg, SD):
lower = avg - 2 * SD
upper = avg + 2 * SD
return lower, upper
<mask token>
| <mask token>
sys.path.append(
'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')
<mask token>
def lower_upper_confidence_intervals(avg, SD):
lower = avg - 2 * SD
upper = avg + 2 * SD
return lower, upper
print(lower_upper_confidence_intervals(40, 2.71))
print(get_z_from_p(0.975))
| import sys
import os
sys.path.append(
'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')
import normal_distribution_06
def lower_upper_confidence_intervals(avg, SD):
lower = avg - 2 * SD
upper = avg + 2 * SD
return lower, upper
print(lower_upper_confidence_intervals(40, 2.71))
print(get_z_from_p(0.975))
| import sys
import os
sys.path.append("C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive")
import normal_distribution_06
#import sampling_distributions_07
def lower_upper_confidence_intervals(avg, SD):
#avg is x bar. The mean value at the "would be" point. ie Bieber Tweeter
#SD is standard error (standard deviation of population dataset dvided by sqrt(number_in_sample)
lower = avg-2*SD
upper = avg+2*SD
return((lower, upper))
#7. Quiz: Confidence Interval Bounds
print(lower_upper_confidence_intervals(40, 2.71))
#8. Quiz: Exact Z-Scores
print(get_z_from_p(0.975)) | [
0,
1,
2,
3,
4
] |
1,887 | f69351474fb3eb48eeb65eaf1aa46d2f4a390471 | <mask token>
class MendeleyViewsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyViewsTestCase, self).setUp()
self.account = MendeleyAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = MendeleyUserSettingsFactory(owner=self.user,
external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
self.provider = MendeleyCitationsProvider()
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_id')
self.secret_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
@mock.patch('website.addons.mendeley.model.Mendeley.client',
new_callable=mock.PropertyMock)
def test_check_mendeley_credentials(self, mock_client):
mock_client.side_effect = HTTPError(403)
assert_false(self.provider.check_credentials(self.node_addon))
mock_client.side_effect = HTTPError(402)
with assert_raises(HTTPError):
self.provider.check_credentials(self.node_addon)
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_authorizer(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=self.user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_true(result['userHasAuth'])
assert_true(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_non_authorizer(self, mock_credentials):
mock_credentials.return_value = True
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=non_authorizing_user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_false(result['userHasAuth'])
assert_false(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'
)
def test_set_auth(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.put_json(self.project.api_url_for(
'mendeley_add_user_auth'), {'external_account_id': self.account
._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(res.json['result']['userHasAuth'])
assert_equal(self.node_addon.user_settings, self.user_addon)
assert_equal(self.node_addon.external_account, self.account)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
res = self.app.delete_json(self.project.api_url_for(
'mendeley_remove_user_auth'), {'external_account_id': self.
account._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
<mask token>
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_not_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
user = AuthUserFactory()
user.add_addon('mendeley')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(self.project.api_url_for(
'mendeley_set_config'), {'external_account_id': self.account.
_id, 'external_list_id': 'list'}, auth=user.auth)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon,
user_settings=None)
expected = {'result': serializer.serialized_node_settings}
assert_equal(res.json, expected)
<mask token>
<mask token>
@httpretty.activate
def test_mendeley_citation_list_root(self):
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list'), auth=self.user.auth)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
<mask token>
@httpretty.activate
def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.mendeley_list_id = (
'e843da05-8818-47c2-8c37-41eebfc4fe3f')
self.node_addon.save()
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'documents'), body=mock_responses['documents'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list', mendeley_list_id='ROOT'), auth=
non_authorizing_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
| <mask token>
class MendeleyViewsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyViewsTestCase, self).setUp()
self.account = MendeleyAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = MendeleyUserSettingsFactory(owner=self.user,
external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
self.provider = MendeleyCitationsProvider()
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_id')
self.secret_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
@mock.patch('website.addons.mendeley.model.Mendeley.client',
new_callable=mock.PropertyMock)
def test_check_mendeley_credentials(self, mock_client):
mock_client.side_effect = HTTPError(403)
assert_false(self.provider.check_credentials(self.node_addon))
mock_client.side_effect = HTTPError(402)
with assert_raises(HTTPError):
self.provider.check_credentials(self.node_addon)
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_authorizer(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=self.user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_true(result['userHasAuth'])
assert_true(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_non_authorizer(self, mock_credentials):
mock_credentials.return_value = True
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=non_authorizing_user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_false(result['userHasAuth'])
assert_false(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'
)
def test_set_auth(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.put_json(self.project.api_url_for(
'mendeley_add_user_auth'), {'external_account_id': self.account
._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(res.json['result']['userHasAuth'])
assert_equal(self.node_addon.user_settings, self.user_addon)
assert_equal(self.node_addon.external_account, self.account)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
res = self.app.delete_json(self.project.api_url_for(
'mendeley_remove_user_auth'), {'external_account_id': self.
account._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
self.node_addon.associated_user_settings = []
self.node_addon.save()
res = self.app.put_json(self.project.api_url_for(
'mendeley_set_config'), {'external_account_id': self.account.
_id, 'external_list_id': 'list'}, auth=self.user.auth)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon,
user_settings=self.user_addon)
expected = {'result': serializer.serialized_node_settings}
assert_equal(res.json, expected)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_not_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
user = AuthUserFactory()
user.add_addon('mendeley')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(self.project.api_url_for(
'mendeley_set_config'), {'external_account_id': self.account.
_id, 'external_list_id': 'list'}, auth=user.auth)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon,
user_settings=None)
expected = {'result': serializer.serialized_node_settings}
assert_equal(res.json, expected)
<mask token>
def test_widget_view_incomplete(self):
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
url = self.project.api_url_for('mendeley_widget')
res = self.app.get(url, auth=self.user.auth).json
assert_false(res['complete'])
assert_is_none(res['list_id'])
@httpretty.activate
def test_mendeley_citation_list_root(self):
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list'), auth=self.user.auth)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
@httpretty.activate
def test_mendeley_citation_list_non_root(self):
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'documents'), body=mock_responses['documents'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self.
user.auth)
children = res.json['contents']
assert_equal(len(children), 7)
assert_equal(children[0]['kind'], 'folder')
assert_equal(children[1]['kind'], 'file')
assert_true(children[1].get('csl') is not None)
@httpretty.activate
def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.mendeley_list_id = (
'e843da05-8818-47c2-8c37-41eebfc4fe3f')
self.node_addon.save()
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'documents'), body=mock_responses['documents'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list', mendeley_list_id='ROOT'), auth=
non_authorizing_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
| <mask token>
class MockFolder(object):
<mask token>
class MendeleyViewsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyViewsTestCase, self).setUp()
self.account = MendeleyAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = MendeleyUserSettingsFactory(owner=self.user,
external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
self.provider = MendeleyCitationsProvider()
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_id')
self.secret_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
@mock.patch('website.addons.mendeley.model.Mendeley.client',
new_callable=mock.PropertyMock)
def test_check_mendeley_credentials(self, mock_client):
mock_client.side_effect = HTTPError(403)
assert_false(self.provider.check_credentials(self.node_addon))
mock_client.side_effect = HTTPError(402)
with assert_raises(HTTPError):
self.provider.check_credentials(self.node_addon)
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_authorizer(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=self.user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_true(result['userHasAuth'])
assert_true(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_non_authorizer(self, mock_credentials):
mock_credentials.return_value = True
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=non_authorizing_user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_false(result['userHasAuth'])
assert_false(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'
)
def test_set_auth(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.put_json(self.project.api_url_for(
'mendeley_add_user_auth'), {'external_account_id': self.account
._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(res.json['result']['userHasAuth'])
assert_equal(self.node_addon.user_settings, self.user_addon)
assert_equal(self.node_addon.external_account, self.account)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
res = self.app.delete_json(self.project.api_url_for(
'mendeley_remove_user_auth'), {'external_account_id': self.
account._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
self.node_addon.associated_user_settings = []
self.node_addon.save()
res = self.app.put_json(self.project.api_url_for(
'mendeley_set_config'), {'external_account_id': self.account.
_id, 'external_list_id': 'list'}, auth=self.user.auth)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon,
user_settings=self.user_addon)
expected = {'result': serializer.serialized_node_settings}
assert_equal(res.json, expected)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_not_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
user = AuthUserFactory()
user.add_addon('mendeley')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(self.project.api_url_for(
'mendeley_set_config'), {'external_account_id': self.account.
_id, 'external_list_id': 'list'}, auth=user.auth)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon,
user_settings=None)
expected = {'result': serializer.serialized_node_settings}
assert_equal(res.json, expected)
def test_mendeley_widget_view_complete(self):
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user
=self.user))
url = self.project.api_url_for('mendeley_widget')
res = self.app.get(url, auth=self.user.auth).json
assert_true(res['complete'])
assert_equal(res['list_id'], 'ROOT-ID')
def test_widget_view_incomplete(self):
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
url = self.project.api_url_for('mendeley_widget')
res = self.app.get(url, auth=self.user.auth).json
assert_false(res['complete'])
assert_is_none(res['list_id'])
@httpretty.activate
def test_mendeley_citation_list_root(self):
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list'), auth=self.user.auth)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
@httpretty.activate
def test_mendeley_citation_list_non_root(self):
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'documents'), body=mock_responses['documents'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self.
user.auth)
children = res.json['contents']
assert_equal(len(children), 7)
assert_equal(children[0]['kind'], 'folder')
assert_equal(children[1]['kind'], 'file')
assert_true(children[1].get('csl') is not None)
@httpretty.activate
def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.mendeley_list_id = (
'e843da05-8818-47c2-8c37-41eebfc4fe3f')
self.node_addon.save()
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'documents'), body=mock_responses['documents'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list', mendeley_list_id='ROOT'), auth=
non_authorizing_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
| <mask token>
class MockNode(object):
<mask token>
@property
def is_deleted(self):
return False
<mask token>
def get_addon(self, name):
if name == 'mendeley':
return self.addon
return None
class MockFolder(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class MendeleyViewsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyViewsTestCase, self).setUp()
self.account = MendeleyAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = MendeleyUserSettingsFactory(owner=self.user,
external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
self.provider = MendeleyCitationsProvider()
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_id')
self.secret_patcher = mock.patch(
'website.addons.mendeley.model.Mendeley.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
@mock.patch('website.addons.mendeley.model.Mendeley.client',
new_callable=mock.PropertyMock)
def test_check_mendeley_credentials(self, mock_client):
mock_client.side_effect = HTTPError(403)
assert_false(self.provider.check_credentials(self.node_addon))
mock_client.side_effect = HTTPError(402)
with assert_raises(HTTPError):
self.provider.check_credentials(self.node_addon)
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_authorizer(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=self.user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_true(result['userHasAuth'])
assert_true(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials'
)
def test_serialize_settings_non_authorizer(self, mock_credentials):
mock_credentials.return_value = True
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(self.project.api_url_for('mendeley_get_config'),
auth=non_authorizing_user.auth)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_false(result['userHasAuth'])
assert_false(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch(
'website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials'
)
def test_set_auth(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.put_json(self.project.api_url_for(
'mendeley_add_user_auth'), {'external_account_id': self.account
._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(res.json['result']['userHasAuth'])
assert_equal(self.node_addon.user_settings, self.user_addon)
assert_equal(self.node_addon.external_account, self.account)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
res = self.app.delete_json(self.project.api_url_for(
'mendeley_remove_user_auth'), {'external_account_id': self.
account._id}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
self.node_addon.associated_user_settings = []
self.node_addon.save()
res = self.app.put_json(self.project.api_url_for(
'mendeley_set_config'), {'external_account_id': self.account.
_id, 'external_list_id': 'list'}, auth=self.user.auth)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon,
user_settings=self.user_addon)
expected = {'result': serializer.serialized_node_settings}
assert_equal(res.json, expected)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_not_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
user = AuthUserFactory()
user.add_addon('mendeley')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(self.project.api_url_for(
'mendeley_set_config'), {'external_account_id': self.account.
_id, 'external_list_id': 'list'}, auth=user.auth)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon,
user_settings=None)
expected = {'result': serializer.serialized_node_settings}
assert_equal(res.json, expected)
def test_mendeley_widget_view_complete(self):
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user
=self.user))
url = self.project.api_url_for('mendeley_widget')
res = self.app.get(url, auth=self.user.auth).json
assert_true(res['complete'])
assert_equal(res['list_id'], 'ROOT-ID')
def test_widget_view_incomplete(self):
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
url = self.project.api_url_for('mendeley_widget')
res = self.app.get(url, auth=self.user.auth).json
assert_false(res['complete'])
assert_is_none(res['list_id'])
@httpretty.activate
def test_mendeley_citation_list_root(self):
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list'), auth=self.user.auth)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
@httpretty.activate
def test_mendeley_citation_list_non_root(self):
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'documents'), body=mock_responses['documents'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list', mendeley_list_id='ROOT'), auth=self.
user.auth)
children = res.json['contents']
assert_equal(len(children), 7)
assert_equal(children[0]['kind'], 'folder')
assert_equal(children[1]['kind'], 'file')
assert_true(children[1].get('csl') is not None)
@httpretty.activate
def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.mendeley_list_id = (
'e843da05-8818-47c2-8c37-41eebfc4fe3f')
self.node_addon.save()
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'folders'), body=mock_responses['folders'], content_type=
'application/json')
httpretty.register_uri(httpretty.GET, urlparse.urljoin(API_URL,
'documents'), body=mock_responses['documents'], content_type=
'application/json')
res = self.app.get(self.project.api_url_for(
'mendeley_citation_list', mendeley_list_id='ROOT'), auth=
non_authorizing_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
| # -*- coding: utf-8 -*-
from nose.tools import * # noqa
import mock
import httpretty
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
import urlparse
from framework.auth import Auth
from website.addons.mendeley.tests.factories import (
MendeleyAccountFactory,
MendeleyUserSettingsFactory,
MendeleyNodeSettingsFactory
)
from framework.exceptions import HTTPError
from website.addons.mendeley.provider import MendeleyCitationsProvider
from website.addons.mendeley.serializer import MendeleySerializer
from utils import mock_responses
API_URL = 'https://api.mendeley.com'
class MockNode(object):
addon = None
@property
def is_deleted(self):
return False
@property
def is_public(self):
return True
def get_addon(self, name):
if name == 'mendeley':
return self.addon
return None
class MockFolder(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class MendeleyViewsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyViewsTestCase, self).setUp()
self.account = MendeleyAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
self.provider = MendeleyCitationsProvider()
#self.user_addon.grant_oauth_access(self.node_addon, self.account, metadata={'lists': 'list'})
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_id')
self.secret_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
@mock.patch('website.addons.mendeley.model.Mendeley.client', new_callable=mock.PropertyMock)
def test_check_mendeley_credentials(self, mock_client):
mock_client.side_effect = HTTPError(403)
assert_false(self.provider.check_credentials(self.node_addon))
mock_client.side_effect = HTTPError(402)
with assert_raises(HTTPError):
self.provider.check_credentials(self.node_addon)
@mock.patch('website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials')
def test_serialize_settings_authorizer(self, mock_credentials):
#"""dict: a serialized version of user-specific addon settings"""
mock_credentials.return_value = True
res = self.app.get(
self.project.api_url_for('mendeley_get_config'),
auth=self.user.auth,
)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_true(result['userHasAuth'])
assert_true(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch('website.addons.mendeley.views.MendeleyCitationsProvider.check_credentials')
def test_serialize_settings_non_authorizer(self, mock_credentials):
#"""dict: a serialized version of user-specific addon settings"""
mock_credentials.return_value = True
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(
self.project.api_url_for('mendeley_get_config'),
auth=non_authorizing_user.auth,
)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_false(result['userHasAuth'])
assert_false(result['userIsOwner'])
assert_true(result['validCredentials'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
@mock.patch('website.addons.mendeley.provider.MendeleyCitationsProvider.check_credentials')
def test_set_auth(self, mock_credentials):
mock_credentials.return_value = True
res = self.app.put_json(
self.project.api_url_for('mendeley_add_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
assert_true(res.json['result']['userHasAuth'])
assert_equal(
self.node_addon.user_settings,
self.user_addon
)
assert_equal(
self.node_addon.external_account,
self.account
)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
res = self.app.delete_json(
self.project.api_url_for('mendeley_remove_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
# Settings config updates node settings
self.node_addon.associated_user_settings = []
self.node_addon.save()
res = self.app.put_json(
self.project.api_url_for('mendeley_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=self.user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=self.user_addon)
expected = {
'result': serializer.serialized_node_settings
}
assert_equal(res.json, expected)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_not_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
user = AuthUserFactory()
user.add_addon('mendeley')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(
self.project.api_url_for('mendeley_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None)
expected = {
'result': serializer.serialized_node_settings
}
assert_equal(res.json, expected)
def test_mendeley_widget_view_complete(self):
# JSON: everything a widget needs
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user=self.user))
url = self.project.api_url_for('mendeley_widget')
res = self.app.get(url, auth=self.user.auth).json
assert_true(res['complete'])
assert_equal(res['list_id'], 'ROOT-ID')
def test_widget_view_incomplete(self):
# JSON: tell the widget when it hasn't been configured
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
url = self.project.api_url_for('mendeley_widget')
res = self.app.get(url, auth=self.user.auth).json
assert_false(res['complete'])
assert_is_none(res['list_id'])
@httpretty.activate
def test_mendeley_citation_list_root(self):
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list'),
auth=self.user.auth
)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
@httpretty.activate
def test_mendeley_citation_list_non_root(self):
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'documents'),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),
auth=self.user.auth
)
children = res.json['contents']
assert_equal(len(children), 7)
assert_equal(children[0]['kind'], 'folder')
assert_equal(children[1]['kind'], 'file')
assert_true(children[1].get('csl') is not None)
@httpretty.activate
def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.mendeley_list_id = 'e843da05-8818-47c2-8c37-41eebfc4fe3f'
self.node_addon.save()
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'documents'),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),
auth=non_authorizing_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
| [
11,
14,
16,
20,
25
] |
1,888 | 08ccc58fe139db3f4712aa551b80f6ea57e0ad76 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 23 10:16:40 2014
@author: Yusuke
"""
import math
result = []
for i in range(6 * 9**5):
sum_num = 0
for j_digit in str(i):
sum_num += int(j_digit) ** 5
if sum_num == i:
print i
result.append(i)
print math.fsum(result)
| null | null | null | null | [
0
] |
1,889 | 4989d01f31ca034aacdda28eff56adb2e0bb15da | <mask token>
| <mask token>
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
print('--------------\n')
print(f"""Nasca de bacana:
{Counter('Nasca de bacana')}""")
print('--------------\n')
<mask token>
print(f'ocorrencias de palavras: {resultado2}')
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print('----------------------')
print(dir(Counter))
| <mask token>
lista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]
resultado = Counter(lista_1)
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
print('--------------\n')
print(f"""Nasca de bacana:
{Counter('Nasca de bacana')}""")
print('--------------\n')
texto = """Minha terra tem palmeiras, Onde canta o Sabiá;
As aves, que aqui gorjeiam, Não gorjeiam como lá.
Nosso céu tem mais estrelas, Nossas várzeas têm mais flores,
Nossos bosques têm mais vida, Nossa vida mais amores. """
palavras = texto.split()
resultado2 = Counter(palavras)
print(f'ocorrencias de palavras: {resultado2}')
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print('----------------------')
print(dir(Counter))
| <mask token>
from collections import Counter
lista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]
resultado = Counter(lista_1)
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
print('--------------\n')
print(f"""Nasca de bacana:
{Counter('Nasca de bacana')}""")
print('--------------\n')
texto = """Minha terra tem palmeiras, Onde canta o Sabiá;
As aves, que aqui gorjeiam, Não gorjeiam como lá.
Nosso céu tem mais estrelas, Nossas várzeas têm mais flores,
Nossos bosques têm mais vida, Nossa vida mais amores. """
palavras = texto.split()
resultado2 = Counter(palavras)
print(f'ocorrencias de palavras: {resultado2}')
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print('----------------------')
print(dir(Counter))
| """
Modulo collection - Counter
Collections -> High-performance Container Datatypes
Counter -> Recebe um interável como parametro e cria um objeto do tipo Collections Counter
que é parecido com um dicionario, contendo como chave o elemento da lista passada como
parametro e como valor a quantidade de ocorrencias desse elemento.
"""
# Utlizando o counter
from collections import Counter
# Exemplo 1
# Podemos utilizar qualquer iteravel, aqui usamos uma lista
lista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]
resultado = Counter(lista_1) # chave/valor
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
# obs: cada elemento da lista ficou como chave e o valor o número de ocorrencias
print('--------------\n')
# Exemplo 2
print(f'Nasca de bacana: \n {Counter("Nasca de bacana")}')
print('--------------\n')
texto = """Minha terra tem palmeiras, Onde canta o Sabiá;
As aves, que aqui gorjeiam, Não gorjeiam como lá.
Nosso céu tem mais estrelas, Nossas várzeas têm mais flores,
Nossos bosques têm mais vida, Nossa vida mais amores. """
palavras = texto.split()
#print(palavras)
resultado2 = Counter(palavras)
print(f'ocorrencias de palavras: {resultado2}')
# as 'n' mais comuns
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print("----------------------")
print(dir(Counter))
| [
0,
1,
2,
3,
4
] |
1,890 | 57d6b9e7f48d32e5d10bfd6a340ea56281f5d82d | <mask token>
| <mask token>
def binarySearchR(array, target, leftPointer, rightPointer):
if leftPointer > rightPointer:
return -1
else:
midPointer = (leftPointer + rightPointer) // 2
if target == array[midPointer]:
return midPointer
elif target < array[midPointer]:
return binarySearchR(array, target, leftPointer, midPointer - 1)
else:
return binarySearchR(array, target, midPointer + 1, rightPointer)
| def binarySearch(array, target):
if len(array) == 0:
return -1
else:
return binarySearchR(array, target, 0, len(array) - 1)
def binarySearchR(array, target, leftPointer, rightPointer):
if leftPointer > rightPointer:
return -1
else:
midPointer = (leftPointer + rightPointer) // 2
if target == array[midPointer]:
return midPointer
elif target < array[midPointer]:
return binarySearchR(array, target, leftPointer, midPointer - 1)
else:
return binarySearchR(array, target, midPointer + 1, rightPointer)
| # O(logn) T O(1) S
def binarySearch(array, target):
if len(array) == 0:
return -1
else:
return binarySearchR(array, target, 0, len(array) - 1)
def binarySearchR(array, target, leftPointer, rightPointer):
if leftPointer > rightPointer:
return -1
else:
midPointer = (leftPointer + rightPointer) // 2
if target == array[midPointer]:
return midPointer
elif target < array[midPointer]:
return binarySearchR(array, target, leftPointer, midPointer - 1)
else:
return binarySearchR(array, target, midPointer + 1, rightPointer)
| null | [
0,
1,
2,
3
] |
1,891 | 9b7ffa2bb62a8decbec51c6bdea38b4338726816 | <mask token>
| <mask token>
@pytest.fixture(scope='session')
def me(api):
return api.people.me()
| <mask token>
import pytest
import ciscosparkapi
from tests.utils import create_string
@pytest.fixture(scope='session')
def me(api):
return api.people.me()
| # -*- coding: utf-8 -*-
"""pytest People functions, fixtures and tests."""
import pytest
import ciscosparkapi
from tests.utils import create_string
# Helper Functions
# pytest Fixtures
@pytest.fixture(scope="session")
def me(api):
return api.people.me()
| null | [
0,
1,
2,
3
] |
1,892 | d22ebe24605065452ae35c44367ee21a726ae7a1 | <mask token>
def loadDataFrame(fileName, fileSchema):
return spark.read.format('csv').schema(fileSchema).option('header', 'true'
).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName
)
<mask token>
def top_movies(user_id, n):
"""
This function returns the top 'n' movies that user has not seen yet but might like
"""
a = unique_movies.alias('a')
watched_movies = indexed.filter(indexed['userId'] == user_id).select(
'title_new')
b = watched_movies.alias('b')
total_movies = a.join(b, a.title_new == b.title_new, how='left')
remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a
.title_new).distinct()
remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))
recommendations = rec_model.transform(remaining_movies).orderBy(
'prediction', ascending=False).limit(n)
movie_title = IndexToString(inputCol='title_new', outputCol='title',
labels=model.labels)
final_recommendations = movie_title.transform(recommendations)
return final_recommendations.show(n, False)
<mask token>
| <mask token>
def loadDataFrame(fileName, fileSchema):
return spark.read.format('csv').schema(fileSchema).option('header', 'true'
).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName
)
<mask token>
print((df.count(), len(df.columns)))
df.printSchema()
df.orderBy(rand()).show(10, False)
df.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)
df.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)
df.groupBy('title').count().orderBy('count', ascending=False).show(10, False)
df.groupBy('title').count().orderBy('count', ascending=True).show(10, False)
<mask token>
indexed.show(10)
indexed.groupBy('title_new').count().orderBy('count', ascending=False).show(
10, False)
<mask token>
train.count()
test.count()
<mask token>
predicted_ratings.printSchema()
predicted_ratings.orderBy(rand()).show(10)
<mask token>
print(rmse)
<mask token>
unique_movies.count()
<mask token>
watched_movies.count()
<mask token>
total_movies.show(10, False)
<mask token>
remaining_movies.count()
<mask token>
remaining_movies.show(10, False)
<mask token>
recommendations.show(5, False)
<mask token>
final_recommendations.show(10, False)
def top_movies(user_id, n):
"""
This function returns the top 'n' movies that user has not seen yet but might like
"""
a = unique_movies.alias('a')
watched_movies = indexed.filter(indexed['userId'] == user_id).select(
'title_new')
b = watched_movies.alias('b')
total_movies = a.join(b, a.title_new == b.title_new, how='left')
remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a
.title_new).distinct()
remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))
recommendations = rec_model.transform(remaining_movies).orderBy(
'prediction', ascending=False).limit(n)
movie_title = IndexToString(inputCol='title_new', outputCol='title',
labels=model.labels)
final_recommendations = movie_title.transform(recommendations)
return final_recommendations.show(n, False)
top_movies(85, 10)
| <mask token>
spark = SparkSession.builder.appName('rc').getOrCreate()
<mask token>
def loadDataFrame(fileName, fileSchema):
return spark.read.format('csv').schema(fileSchema).option('header', 'true'
).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName
)
<mask token>
movieRatingSchema = StructType([StructField('userId', IntegerType(), True),
StructField('movieId', IntegerType(), True), StructField('rating',
FloatType(), True), StructField('timestamp', StringType(), True)])
movieSchema = StructType([StructField('movieId', IntegerType(), True),
StructField('title', StringType(), True), StructField('genres',
StringType(), True)])
MovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache()
MoviesDF = loadDataFrame('movies.csv', movieSchema).cache()
df = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title',
'rating'])
print((df.count(), len(df.columns)))
df.printSchema()
df.orderBy(rand()).show(10, False)
df.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)
df.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)
df.groupBy('title').count().orderBy('count', ascending=False).show(10, False)
df.groupBy('title').count().orderBy('count', ascending=True).show(10, False)
<mask token>
stringIndexer = StringIndexer(inputCol='title', outputCol='title_new')
model = stringIndexer.fit(df)
indexed = model.transform(df)
indexed.show(10)
indexed.groupBy('title_new').count().orderBy('count', ascending=False).show(
10, False)
train, test = indexed.randomSplit([0.75, 0.25])
train.count()
test.count()
<mask token>
rec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new',
ratingCol='rating', nonnegative=True, coldStartStrategy='drop')
rec_model = rec.fit(train)
predicted_ratings = rec_model.transform(test)
predicted_ratings.printSchema()
predicted_ratings.orderBy(rand()).show(10)
<mask token>
evaluator = RegressionEvaluator(metricName='rmse', predictionCol=
'prediction', labelCol='rating')
rmse = evaluator.evaluate(predicted_ratings)
print(rmse)
unique_movies = indexed.select('title_new').distinct()
unique_movies.count()
a = unique_movies.alias('a')
user_id = 85
watched_movies = indexed.filter(indexed['userId'] == user_id).select(
'title_new').distinct()
watched_movies.count()
b = watched_movies.alias('b')
total_movies = a.join(b, a.title_new == b.title_new, how='left')
total_movies.show(10, False)
remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a
.title_new).distinct()
remaining_movies.count()
remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))
remaining_movies.show(10, False)
recommendations = rec_model.transform(remaining_movies).orderBy('prediction',
ascending=False)
recommendations.show(5, False)
movie_title = IndexToString(inputCol='title_new', outputCol='title', labels
=model.labels)
final_recommendations = movie_title.transform(recommendations)
final_recommendations.show(10, False)
def top_movies(user_id, n):
"""
This function returns the top 'n' movies that user has not seen yet but might like
"""
a = unique_movies.alias('a')
watched_movies = indexed.filter(indexed['userId'] == user_id).select(
'title_new')
b = watched_movies.alias('b')
total_movies = a.join(b, a.title_new == b.title_new, how='left')
remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a
.title_new).distinct()
remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))
recommendations = rec_model.transform(remaining_movies).orderBy(
'prediction', ascending=False).limit(n)
movie_title = IndexToString(inputCol='title_new', outputCol='title',
labels=model.labels)
final_recommendations = movie_title.transform(recommendations)
return final_recommendations.show(n, False)
top_movies(85, 10)
| from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('rc').getOrCreate()
from pyspark.sql.functions import *
def loadDataFrame(fileName, fileSchema):
return spark.read.format('csv').schema(fileSchema).option('header', 'true'
).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName
)
from pyspark.sql.types import *
movieRatingSchema = StructType([StructField('userId', IntegerType(), True),
StructField('movieId', IntegerType(), True), StructField('rating',
FloatType(), True), StructField('timestamp', StringType(), True)])
movieSchema = StructType([StructField('movieId', IntegerType(), True),
StructField('title', StringType(), True), StructField('genres',
StringType(), True)])
MovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache()
MoviesDF = loadDataFrame('movies.csv', movieSchema).cache()
df = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title',
'rating'])
print((df.count(), len(df.columns)))
df.printSchema()
df.orderBy(rand()).show(10, False)
df.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)
df.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)
df.groupBy('title').count().orderBy('count', ascending=False).show(10, False)
df.groupBy('title').count().orderBy('count', ascending=True).show(10, False)
from pyspark.ml.feature import StringIndexer, IndexToString
stringIndexer = StringIndexer(inputCol='title', outputCol='title_new')
model = stringIndexer.fit(df)
indexed = model.transform(df)
indexed.show(10)
indexed.groupBy('title_new').count().orderBy('count', ascending=False).show(
10, False)
train, test = indexed.randomSplit([0.75, 0.25])
train.count()
test.count()
from pyspark.ml.recommendation import ALS
rec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new',
ratingCol='rating', nonnegative=True, coldStartStrategy='drop')
rec_model = rec.fit(train)
predicted_ratings = rec_model.transform(test)
predicted_ratings.printSchema()
predicted_ratings.orderBy(rand()).show(10)
from pyspark.ml.evaluation import RegressionEvaluator
evaluator = RegressionEvaluator(metricName='rmse', predictionCol=
'prediction', labelCol='rating')
rmse = evaluator.evaluate(predicted_ratings)
print(rmse)
unique_movies = indexed.select('title_new').distinct()
unique_movies.count()
a = unique_movies.alias('a')
user_id = 85
watched_movies = indexed.filter(indexed['userId'] == user_id).select(
'title_new').distinct()
watched_movies.count()
b = watched_movies.alias('b')
total_movies = a.join(b, a.title_new == b.title_new, how='left')
total_movies.show(10, False)
remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a
.title_new).distinct()
remaining_movies.count()
remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))
remaining_movies.show(10, False)
recommendations = rec_model.transform(remaining_movies).orderBy('prediction',
ascending=False)
recommendations.show(5, False)
movie_title = IndexToString(inputCol='title_new', outputCol='title', labels
=model.labels)
final_recommendations = movie_title.transform(recommendations)
final_recommendations.show(10, False)
def top_movies(user_id, n):
"""
This function returns the top 'n' movies that user has not seen yet but might like
"""
a = unique_movies.alias('a')
watched_movies = indexed.filter(indexed['userId'] == user_id).select(
'title_new')
b = watched_movies.alias('b')
total_movies = a.join(b, a.title_new == b.title_new, how='left')
remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a
.title_new).distinct()
remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))
recommendations = rec_model.transform(remaining_movies).orderBy(
'prediction', ascending=False).limit(n)
movie_title = IndexToString(inputCol='title_new', outputCol='title',
labels=model.labels)
final_recommendations = movie_title.transform(recommendations)
return final_recommendations.show(n, False)
top_movies(85, 10)
| # Databricks notebook source
#import and create sparksession object
from pyspark.sql import SparkSession
spark=SparkSession.builder.appName('rc').getOrCreate()
# COMMAND ----------
#import the required functions and libraries
from pyspark.sql.functions import *
# COMMAND ----------
# Convert csv file to Spark DataFrame (Databricks version)
def loadDataFrame(fileName, fileSchema):
return (spark.read.format("csv")
.schema(fileSchema)
.option("header", "true")
.option("mode", "DROPMALFORMED")
.csv("/FileStore/tables/%s" % (fileName)))
# COMMAND ----------
from pyspark.sql.types import *
movieRatingSchema = StructType([
StructField("userId", IntegerType(), True),
StructField("movieId", IntegerType(), True),
StructField("rating", FloatType(), True),
StructField("timestamp", StringType(), True)])
movieSchema = StructType([
StructField("movieId", IntegerType(), True),
StructField("title", StringType(), True),
StructField("genres", StringType(), True)])
MovieRatingsDF = loadDataFrame("ratings.csv", movieRatingSchema).cache()
MoviesDF = loadDataFrame("movies.csv", movieSchema).cache()
# COMMAND ----------
#load the dataset and create sprk dataframe
df = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title', 'rating'])
#df=spark.read.csv('movie_ratings_df.csv',inferSchema=True,header=True)
# COMMAND ----------
#validate the shape of the data
print((df.count(),len(df.columns)))
# COMMAND ----------
#check columns in dataframe
df.printSchema()
# COMMAND ----------
#validate few rows of dataframe in random order
df.orderBy(rand()).show(10,False)
# COMMAND ----------
#check number of ratings by each user
df.groupBy('userId').count().orderBy('count',ascending=False).show(10,False)
# COMMAND ----------
#check number of ratings by each user
df.groupBy('userId').count().orderBy('count',ascending=True).show(10,False)
# COMMAND ----------
#number of times movie been rated
df.groupBy('title').count().orderBy('count',ascending=False).show(10,False)
# COMMAND ----------
df.groupBy('title').count().orderBy('count',ascending=True).show(10,False)
# COMMAND ----------
#import String indexer to convert string values to numeric values
from pyspark.ml.feature import StringIndexer,IndexToString
# COMMAND ----------
#creating string indexer to convert the movie title column values into numerical values
stringIndexer = StringIndexer(inputCol="title", outputCol="title_new")
# COMMAND ----------
#applying stringindexer object on dataframe movie title column
model = stringIndexer.fit(df)
# COMMAND ----------
#creating new dataframe with transformed values
indexed = model.transform(df)
# COMMAND ----------
#validate the numerical title values
indexed.show(10)
# COMMAND ----------
#number of times each numerical movie title has been rated
indexed.groupBy('title_new').count().orderBy('count',ascending=False).show(10,False)
# COMMAND ----------
#split the data into training and test datatset
train,test=indexed.randomSplit([0.75,0.25])
# COMMAND ----------
#count number of records in train set
train.count()
# COMMAND ----------
#count number of records in test set
test.count()
# COMMAND ----------
#import ALS recommender function from pyspark ml library
from pyspark.ml.recommendation import ALS
# COMMAND ----------
#Training the recommender model using train datatset
rec=ALS(maxIter=10,regParam=0.01,userCol='userId',itemCol='title_new',ratingCol='rating',nonnegative=True,coldStartStrategy="drop")
# COMMAND ----------
#fit the model on train set
rec_model=rec.fit(train)
# COMMAND ----------
#making predictions on test set
predicted_ratings=rec_model.transform(test)
# COMMAND ----------
#columns in predicted ratings dataframe
predicted_ratings.printSchema()
# COMMAND ----------
#predicted vs actual ratings for test set
predicted_ratings.orderBy(rand()).show(10)
# COMMAND ----------
#importing Regression Evaluator to measure RMSE
from pyspark.ml.evaluation import RegressionEvaluator
# COMMAND ----------
#create Regressor evaluator object for measuring accuracy
evaluator=RegressionEvaluator(metricName='rmse',predictionCol='prediction',labelCol='rating')
# COMMAND ----------
#apply the RE on predictions dataframe to calculate RMSE
rmse=evaluator.evaluate(predicted_ratings)
# COMMAND ----------
#print RMSE error
print(rmse)
# COMMAND ----------
#Recommend top movies which user might like
# COMMAND ----------
#create dataset of all distinct movies
unique_movies=indexed.select('title_new').distinct()
# COMMAND ----------
#number of unique movies
unique_movies.count()
# COMMAND ----------
#assigning alias name 'a' to unique movies df
a = unique_movies.alias('a')
# COMMAND ----------
user_id=85
# COMMAND ----------
#creating another dataframe which contains already watched movie by active user
watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new').distinct()
# COMMAND ----------
#number of movies already rated
watched_movies.count()
# COMMAND ----------
#assigning alias name 'b' to watched movies df
b=watched_movies.alias('b')
# COMMAND ----------
#joining both tables on left join
total_movies = a.join(b, a.title_new == b.title_new,how='left')
# COMMAND ----------
total_movies.show(10,False)
# COMMAND ----------
#selecting movies which active user is yet to rate or watch
remaining_movies=total_movies.where(col("b.title_new").isNull()).select(a.title_new).distinct()
# COMMAND ----------
#number of movies user is yet to rate
remaining_movies.count()
# COMMAND ----------
#adding new column of user_Id of active useer to remaining movies df
remaining_movies=remaining_movies.withColumn("userId",lit(int(user_id)))
# COMMAND ----------
remaining_movies.show(10,False)
# COMMAND ----------
#making recommendations using ALS recommender model and selecting only top 'n' movies
recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False)
# COMMAND ----------
recommendations.show(5,False)
# COMMAND ----------
#converting title_new values back to movie titles
movie_title = IndexToString(inputCol="title_new", outputCol="title",labels=model.labels)
final_recommendations=movie_title.transform(recommendations)
# COMMAND ----------
final_recommendations.show(10,False)
# COMMAND ----------
#create function to recommend top 'n' movies to any particular user
def top_movies(user_id,n):
"""
This function returns the top 'n' movies that user has not seen yet but might like
"""
#assigning alias name 'a' to unique movies df
a = unique_movies.alias('a')
#creating another dataframe which contains already watched movie by active user
watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new')
#assigning alias name 'b' to watched movies df
b=watched_movies.alias('b')
#joining both tables on left join
total_movies = a.join(b, a.title_new == b.title_new,how='left')
#selecting movies which active user is yet to rate or watch
remaining_movies=total_movies.where(col("b.title_new").isNull()).select(a.title_new).distinct()
#adding new column of user_Id of active useer to remaining movies df
remaining_movies=remaining_movies.withColumn("userId",lit(int(user_id)))
#making recommendations using ALS recommender model and selecting only top 'n' movies
recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False).limit(n)
#adding columns of movie titles in recommendations
movie_title = IndexToString(inputCol="title_new", outputCol="title",labels=model.labels)
final_recommendations=movie_title.transform(recommendations)
#return the recommendations to active user
return final_recommendations.show(n,False)
# COMMAND ----------
top_movies(85,10)
# COMMAND ----------
| [
2,
3,
4,
5,
6
] |
1,893 | d6cfe7132855d832d8fd1ea9ca9760bd22109a92 | <mask token>
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
| <mask token>
class Foo(models.Model):
<mask token>
<mask token>
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
| <mask token>
class BigS(models.Model):
<mask token>
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
| <mask token>
class FieldsTest(models.Model):
<mask token>
<mask token>
class BigS(models.Model):
s = models.SlugField(max_length=255)
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
| # -*- utf-8 -*-
from django.db import models
class FieldsTest(models.Model):
pub_date = models.DateTimeField()
mod_date = models.DateTimeField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
| [
5,
6,
8,
10,
13
] |
1,894 | 3dcca85c8003b57ad37734bbbe171ab8cef0f56c | # This package includes different measures to evaluate topics
| null | null | null | null | [
1
] |
1,895 | b360ba7412bd10e2818511cee81302d407f88fd1 | <mask token>
| for x in range(0, 10, 3):
print('★', end=' ')
print()
print('------------------------')
for y in range(0, 10):
for x in range(0, 10):
print('★', end=' ')
print()
| # 3번 반복하고 싶은 경우
# 별 10개를 한줄로
for x in range(0, 10, 3): # 3번째 숫자는 증감할 양을 정해줌.
# print(x)
print("★", end=" ")
print()
print("------------------------")
#이중 for문
for y in range(0, 10):
for x in range(0, 10):
# print(x)
print("★", end=" ")
print() | null | null | [
0,
1,
2
] |
1,896 | a79c9799ed237a943ae3d249a4d66eb2f8693e83 | <mask token>
| class Solution:
<mask token>
| class Solution:
def rotateRight(self, head: ListNode, k: int) ->ListNode:
if head is None or head.next is None or k == 0:
return head
tmp, length = head, 1
while tmp.next:
tmp = tmp.next
length += 1
k = k % length
if k == 0:
return head
fast = slow = head
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next
slow.next = None
fast.next = head
return res
| # Runtime: 44 ms, faster than 62.95% of Python3 online submissions for Rotate List.
# Memory Usage: 13.9 MB, less than 6.05% of Python3 online submissions for Rotate List.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if head is None or head.next is None or k == 0:
return head
tmp, length = head, 1
while tmp.next:
tmp = tmp.next
length += 1
k = k % length
if k == 0: # don't need rotate
return head
fast = slow = head # fast and slow point
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next # ready result
slow.next = None
fast.next = head
return res
| null | [
0,
1,
2,
3
] |
1,897 | 0f0ea6f07f9a082042ed9aff7a95d372c32b5a13 | <mask token>
class ReshapedDistribution(TorchDistribution):
<mask token>
<mask token>
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
<mask token>
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
| <mask token>
class TorchDistributionMixin(Distribution):
<mask token>
<mask token>
<mask token>
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
<mask token>
<mask token>
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception(
"""
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n)."""
)
<mask token>
<mask token>
class TorchDistribution(torch.distributions.Distribution,
TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
| <mask token>
class TorchDistributionMixin(Distribution):
<mask token>
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(
sample_shape)
<mask token>
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
<mask token>
<mask token>
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception(
"""
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n)."""
)
<mask token>
<mask token>
class TorchDistribution(torch.distributions.Distribution,
TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
| <mask token>
class TorchDistributionMixin(Distribution):
<mask token>
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(
sample_shape)
<mask token>
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
def expand(self, batch_shape):
"""
Expands a distribution to a desired
:attr:`~torch.distributions.distribution.Distribution.batch_shape`.
Note that this is more general than :meth:`expand_by` because
``d.expand_by(sample_shape)`` can be reduced to
``d.expand(sample_shape + d.batch_shape)``.
:param torch.Size batch_shape: The target ``batch_shape``. This must
compatible with ``self.batch_shape`` similar to the requirements
of :func:`torch.Tensor.expand`: the target ``batch_shape`` must
be at least as long as ``self.batch_shape``, and for each
non-singleton dim of ``self.batch_shape``, ``batch_shape`` must
either agree or be set to ``-1``.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
batch_shape = list(batch_shape)
if len(batch_shape) < len(self.batch_shape):
raise ValueError(
'Expected len(batch_shape) >= len(self.batch_shape), actual {} vs {}'
.format(len(batch_shape), len(self.batch_shape)))
for dim in range(-1, -1 - len(self.batch_shape), -1):
if batch_shape[dim] == -1:
batch_shape[dim] = self.batch_shape[dim]
elif batch_shape[dim] != self.batch_shape[dim]:
if self.batch_shape[dim] != 1:
raise ValueError(
'Cannot broadcast dim {} of size {} to size {}'.
format(dim, self.batch_shape[dim], batch_shape[dim]))
else:
raise NotImplementedError(
'https://github.com/uber/pyro/issues/1119')
sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]
return self.expand_by(sample_shape)
<mask token>
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception(
"""
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n)."""
)
def independent(self, reinterpreted_batch_ndims=None):
"""
Reinterprets the ``n`` rightmost dimensions of this distributions
:attr:`~torch.distributions.distribution.Distribution.batch_shape`
as event dims, adding them to the left side of
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
Example::
>>> [d1.batch_shape, d1.event_shape]
[torch.Size((2, 3)), torch.Size((4, 5))]
>>> d2 = d1.independent(1)
>>> [d2.batch_shape, d2.event_shape]
[torch.Size((2,)), torch.Size((3, 4, 5))]
>>> d3 = d1.independent(2)
>>> [d3.batch_shape, d3.event_shape]
[torch.Size(()), torch.Size((2, 3, 4, 5))]
:param int reinterpreted_batch_ndims: The number of batch dimensions
to reinterpret as event dimensions.
:return: A reshaped version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
return ReshapedDistribution(self, reinterpreted_batch_ndims=
reinterpreted_batch_ndims)
def mask(self, mask):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
:return: A masked copy of this distribution.
:rtype: :class:`MaskedDistribution`
"""
return MaskedDistribution(self, mask)
class TorchDistribution(torch.distributions.Distribution,
TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
| from __future__ import absolute_import, division, print_function
import numbers
import torch
from torch.distributions import constraints
from pyro.distributions.distribution import Distribution
from pyro.distributions.score_parts import ScoreParts
from pyro.distributions.util import broadcast_shape, sum_rightmost
class TorchDistributionMixin(Distribution):
"""
Mixin to provide Pyro compatibility for PyTorch distributions.
You should instead use `TorchDistribution` for new distribution classes.
This is mainly useful for wrapping existing PyTorch distributions for
use in Pyro. Derived classes must first inherit from
:class:`torch.distributions.distribution.Distribution` and then inherit
from :class:`TorchDistributionMixin`.
"""
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
@property
def event_dim(self):
"""
:return: Number of dimensions of individual events.
:rtype: int
"""
return len(self.event_shape)
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
def expand(self, batch_shape):
"""
Expands a distribution to a desired
:attr:`~torch.distributions.distribution.Distribution.batch_shape`.
Note that this is more general than :meth:`expand_by` because
``d.expand_by(sample_shape)`` can be reduced to
``d.expand(sample_shape + d.batch_shape)``.
:param torch.Size batch_shape: The target ``batch_shape``. This must
compatible with ``self.batch_shape`` similar to the requirements
of :func:`torch.Tensor.expand`: the target ``batch_shape`` must
be at least as long as ``self.batch_shape``, and for each
non-singleton dim of ``self.batch_shape``, ``batch_shape`` must
either agree or be set to ``-1``.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
batch_shape = list(batch_shape)
if len(batch_shape) < len(self.batch_shape):
raise ValueError("Expected len(batch_shape) >= len(self.batch_shape), "
"actual {} vs {}".format(len(batch_shape), len(self.batch_shape)))
# check sizes of existing dims
for dim in range(-1, -1 - len(self.batch_shape), -1):
if batch_shape[dim] == -1:
batch_shape[dim] = self.batch_shape[dim]
elif batch_shape[dim] != self.batch_shape[dim]:
if self.batch_shape[dim] != 1:
raise ValueError("Cannot broadcast dim {} of size {} to size {}".format(
dim, self.batch_shape[dim], batch_shape[dim]))
else:
raise NotImplementedError("https://github.com/uber/pyro/issues/1119")
sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]
return self.expand_by(sample_shape)
def expand_by(self, sample_shape):
"""
Expands a distribution by adding ``sample_shape`` to the left side of
its :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
To expand internal dims of ``self.batch_shape`` from 1 to something
larger, use :meth:`expand` instead.
:param torch.Size sample_shape: The size of the iid batch to be drawn
from the distribution.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
return ReshapedDistribution(self, sample_shape=sample_shape)
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception('''
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).''')
def independent(self, reinterpreted_batch_ndims=None):
"""
Reinterprets the ``n`` rightmost dimensions of this distributions
:attr:`~torch.distributions.distribution.Distribution.batch_shape`
as event dims, adding them to the left side of
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
Example::
>>> [d1.batch_shape, d1.event_shape]
[torch.Size((2, 3)), torch.Size((4, 5))]
>>> d2 = d1.independent(1)
>>> [d2.batch_shape, d2.event_shape]
[torch.Size((2,)), torch.Size((3, 4, 5))]
>>> d3 = d1.independent(2)
>>> [d3.batch_shape, d3.event_shape]
[torch.Size(()), torch.Size((2, 3, 4, 5))]
:param int reinterpreted_batch_ndims: The number of batch dimensions
to reinterpret as event dimensions.
:return: A reshaped version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
# TODO return pyro.distributions.torch.Independent(self, reinterpreted_batch_ndims)
return ReshapedDistribution(self, reinterpreted_batch_ndims=reinterpreted_batch_ndims)
def mask(self, mask):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
:return: A masked copy of this distribution.
:rtype: :class:`MaskedDistribution`
"""
return MaskedDistribution(self, mask)
class TorchDistribution(torch.distributions.Distribution, TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(), reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape):
raise ValueError('Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), '
'actual {} vs {}'.format(reinterpreted_batch_ndims,
len(sample_shape + base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError("Pyro does not enumerate over cartesian products")
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
# Shift enumeration dim to correct location.
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape) != base_dist.batch_shape:
raise ValueError("Expected mask.shape to be broadcastable to base_dist.batch_shape, "
"actual {} vs {}".format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape, base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
| [
27,
35,
36,
39,
44
] |
1,898 | 9bc955def6250908050a1f3046dd78480f25e0a1 | <mask token>
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
<mask token>
| <mask token>
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))
VIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))
| <mask token>
doorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08,
0.18, 0.2]
doorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]
doorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,
False, True, False, True, False, True], [True] * 13, [True, False, True,
False, True, False, True, False, True, False, True, False, True], [True,
False, True, False, True, True, True, True, True, False, True, False,
True], [True, False, True, False, False, False, True, False, False,
False, True, False, True], [True, False, True, True, True, True, True,
True, True, True, True, False, True], [True, False, False, False, False,
False, True, False, False, False, False, False, True], [True] * 13]
windowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]
windowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]
windowOccurrency = [[True] * 9, [True, False, False, False, False, False,
False, False, True], [True] * 9, [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True] * 9]
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))
VIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))
| from pyplasm import *
doorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08,
0.18, 0.2]
doorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]
doorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,
False, True, False, True, False, True], [True] * 13, [True, False, True,
False, True, False, True, False, True, False, True, False, True], [True,
False, True, False, True, True, True, True, True, False, True, False,
True], [True, False, True, False, False, False, True, False, False,
False, True, False, True], [True, False, True, True, True, True, True,
True, True, True, True, False, True], [True, False, False, False, False,
False, True, False, False, False, False, False, True], [True] * 13]
windowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]
windowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]
windowOccurrency = [[True] * 9, [True, False, False, False, False, False,
False, False, True], [True] * 9, [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True, True, False, True,
False, True, False, True, True], [True] * 9, [True] * 9]
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False] * len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if occurrency[x_index][y_index] == False:
update = False
if update:
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if occurrency[x_index][y_index] == False and visitedY[y_index
] == False:
Y[y_index] = dz * Y[y_index] / sumY
visitedY[y_index] = True
modifyX = True
if occurrency[x_index][y_index] == False and visitedY[y_index
] == True and not modifyX:
modifyX = True
if modifyX:
X[x_index] = dx * X[x_index] / sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX, windowY, occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(
yQuotes)]))
result = STRUCT(model)
result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(['iron.jpg'])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(
result)[0] * 0.95])
glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)
glass = TEXTURE(['glass2.jpg'])(glass)
window = STRUCT([windowFrame, glass])
window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(
window)[0], dz / SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if occurrency[xIndex][yIndex] == False:
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2, S3, S1])(res)
res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],
dz / SIZE([3])(res)[0]])(res)
door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *
0.94])
glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)
glass = TEXTURE(['glass.jpg'])(glass)
refiner = CUBOID([0.03, 0.01, dz])
refiner = T([1, 2])([dx / 2, dy])(refiner)
refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))
handler2 = CUBOID([0.05, 0.02, 0.05])
handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))
handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))
handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy,
dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)
finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(
res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,
handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))
VIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))
| from pyplasm import *
doorY = [.2,.18,.08,.18,.08,.18,.4,.18,.08,.18,.08,.18,.2]
doorX = [.2,.5,.2,1.8,.08,.18,.08,.18,.2]
doorOccurrency = [[True]*13,
[True, False, True, False, True, False, True, False, True, False, True, False, True],
[True]*13,
[True, False, True, False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, True, True, True, True, False, True, False, True],
[True, False, True, False, False, False, True, False, False, False, True, False, True],
[True, False, True, True, True, True, True, True, True, True, True, False, True],
[True, False, False, False, False, False, True, False, False, False, False, False, True],
[True]*13]
windowY = [0.04,0.04,0.2,0.02,0.16,0.02,0.2,0.04,0.04]
windowX = [0.02,0.8,0.05,0.02,0.4,0.02,0.4,0.05,0.04]
windowOccurrency = [[True]*9,
[True, False, False, False, False, False, False, False, True],
[True]*9,
[True]*9,
[True, True, False, True, False, True, False, True, True],
[True]*9,
[True, True, False, True, False, True, False, True, True],
[True]*9,
[True]*9]
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False]*len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if(occurrency[x_index][y_index] == False):
update = False
if(update):
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if(occurrency[x_index][y_index] == False and visitedY[y_index] == False):
Y[y_index] = (dz * Y[y_index])/sumY
visitedY[y_index] = True
modifyX = True
if(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):
modifyX = True
if(modifyX):
X[x_index] = (dx * X[x_index])/sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX,windowY,occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if(occurrency[xIndex][yIndex] == False):
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))
result = STRUCT(model)
result = MAP([S2,S3,S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(["iron.jpg"])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])
glass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)
glass = TEXTURE(["glass2.jpg"])(glass)
window = STRUCT([windowFrame, glass])
window = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if(occurrency[xIndex][yIndex] == False):
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([ QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2,S3,S1])(res)
res = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (res)
door = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0]*0.94, 0.01, SIZE([3])(res)[0]*0.94])
glass = T([1,2,3])([dx*0.003, dy/2, dz*0.005])(glass)
glass = TEXTURE(["glass.jpg"])(glass)
refiner = CUBOID([0.03, 0.01,dz])
refiner = T([1,2])([dx/2,dy])(refiner)
refiner = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(.15)(CUBOID([.05,.02,.2]))
handler2 = CUBOID([.05,.02,.05])
handler3 = T([1,2])([.01,.02])(CUBOID([.03,.02,.2]))
handler = TEXTURE("bronze.jpg")(STRUCT([handler3, handler2, handler1]))
handler = T([1,2,3])([dx/2.-2*SIZE([1])(handler)[0],dy, dz/2.-1.5*SIZE([3])(handler)[0]])(handler)
finalDoor = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (STRUCT([door, glass, refiner, handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, .4, 2.8))
VIEW(window(windowX,windowY,windowOccurrency)(.6,.1,1.2)) | [
3,
4,
5,
6,
7
] |
1,899 | 8126af930ec75e2818455d959f00285bdc08c044 | <mask token>
class TestLempelZivWelchDecoder(unittest.TestCase):
<mask token>
<mask token>
| <mask token>
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError, lambda : run_length_decoder.decode())
self.assertTrue(run_length_decoder.input is None)
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value)
self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')
<mask token>
| <mask token>
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError, lambda : run_length_decoder.decode())
self.assertTrue(run_length_decoder.input is None)
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value)
self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')
if __name__ == '__main__':
unittest.main()
| import unittest
from LempelZivWelchDecoder import LempelZivWelchDecoder
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError, lambda : run_length_decoder.decode())
self.assertTrue(run_length_decoder.input is None)
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value)
self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')
if __name__ == '__main__':
unittest.main()
| import unittest
from LempelZivWelchDecoder import LempelZivWelchDecoder
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError,
lambda: run_length_decoder.decode()) # assert if method raises error when there is no input
self.assertTrue(run_length_decoder.input is None) # assert if input is none when it's not set
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value) # assert that input is initialized with proper value
self.assertEqual(run_length_decoder.decode(),
"ttttttessst1") # assert that result is correct
if __name__ == '__main__':
unittest.main()
| [
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.