File size: 3,104 Bytes
67d85c4
 
 
 
3635259
 
 
 
312d081
67d85c4
 
3635259
67d85c4
 
3635259
 
 
 
 
312d081
3635259
67d85c4
 
3635259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import pandas as pd
import numpy as np
import re
import pickle
import pdfminer
from pdfminer.high_level import extract_text
import pytesseract
from pdf2image import convert_from_path
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import LabelEncoder

def cleanResume(resumeText):
    resumeText = re.sub('http\S+\s*', ' ', resumeText)
    resumeText = re.sub('RT|cc', ' ', resumeText)
    resumeText = re.sub('#\S+', '', resumeText)
    resumeText = re.sub('@\S+', '  ', resumeText)
    resumeText = re.sub('[%s]' % re.escape("""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""), ' ', resumeText)
    resumeText = re.sub(r'[^\x00-\x7f]', r' ', resumeText)
    resumeText = re.sub('\s+', ' ', resumeText)
    return resumeText

def pdf_to_text(file):
    text = extract_text(file)
    if not text.strip():  # If PDF text extraction fails, use OCR
        images = convert_from_path(file)
        text = "\n".join([pytesseract.image_to_string(img) for img in images])
    return text

def load_deeprank_model():
    return load_model('deeprank_model.h5')

def predict_category(resumes_data, selected_category, max_sequence_length, model, tokenizer, label):
    resumes_df = pd.DataFrame(resumes_data)
    resumes_text = resumes_df['ResumeText'].values

    tokenized_text = tokenizer.texts_to_sequences(resumes_text)
    padded_text = pad_sequences(tokenized_text, maxlen=max_sequence_length)

    predicted_probs = model.predict(padded_text)
    for i, category in enumerate(label.classes_):
        resumes_df[category] = predicted_probs[:, i]

    resumes_df_sorted = resumes_df.sort_values(by=selected_category, ascending=False)
    ranks = [{'Rank': rank + 1, 'FileName': row['FileName']} for rank, (idx, row) in enumerate(resumes_df_sorted.iterrows())]
    return ranks

def main():
    model = load_deeprank_model()
    df = pd.read_csv('UpdatedResumeDataSet.csv')
    df['cleaned'] = df['Resume'].apply(cleanResume)
    label = LabelEncoder()
    df['Category'] = label.fit_transform(df['Category'])
    
    text = df['cleaned'].values
    tokenizer = Tokenizer()
    tokenizer.fit_on_texts(text)
    vocab_size = len(tokenizer.word_index) + 1
    num_classes = len(label.classes_)
    max_sequence_length = 500

    resumes_data = []
    files = input("Enter the paths of resumes (comma-separated): ").split(',')
    for file in files:
        text = cleanResume(pdf_to_text(file.strip()))
        resumes_data.append({'ResumeText': text, 'FileName': file.strip()})
    
    print("Available categories:", list(label.classes_))
    selected_category = input("Select a category to rank by: ")
    
    if not resumes_data or selected_category not in label.classes_:
        print("Error: Invalid input. Please provide valid resumes and select a valid category.")
    else:
        ranks = predict_category(resumes_data, selected_category, max_sequence_length, model, tokenizer, label)
        print(pd.DataFrame(ranks))

if __name__ == '__main__':
    main()