ashimdahal commited on
Commit
5ee111e
·
verified ·
1 Parent(s): d4d9732

Add/Update generated README.md

Browse files
Files changed (1) hide show
  1. README.md +21 -21
README.md CHANGED
@@ -24,7 +24,7 @@ https://github.com/ashimdahal/captioning_image/blob/main
24
 
25
  **⚠️ Important:** The `base_model` tag in the metadata above is initially empty. The models listed here are *heuristic guesses* based on the training directory name (`microsoft-git-base_microsoft-git-base`). Please verify these against your training configuration and update the `base_model:` list in the YAML metadata block at the top of this README with the correct Hugging Face model identifiers.
26
 
27
- ## How to Use (Example with PEFT)
28
 
29
  ```python
30
  from transformers import AutoProcessor, AutoModelForVision2Seq, Blip2ForConditionalGeneration # Or other relevant classes
@@ -34,41 +34,41 @@ import torch
34
  # --- Configuration ---
35
  # 1. Specify the EXACT base model identifiers used during training
36
  # base_processor_id = "microsoft/git-base" # <-- Replace with correct HF ID
37
- # base_model_id = "microsoft/git-base" # <-- Replace with correct HF ID (e.g., Salesforce/blip2-opt-2.7b)
38
 
39
  # 2. Specify the PEFT adapter repository ID (this repo)
40
- # adapter_repo_id = "ashimdahal/microsoft-git-base_microsoft-git-base"
41
 
42
  # --- Load Base Model and Processor ---
43
- # processor = AutoProcessor.from_pretrained(base_processor_id)
44
 
45
  # Load the base model (ensure it matches the type used for training)
46
  # Example for BLIP-2 OPT:
47
- # base_model = Blip2ForConditionalGeneration.from_pretrained(
48
- # base_model_id,
49
- # torch_dtype=torch.float16 # Or torch.bfloat16 or float32, match training/inference needs
50
- # )
51
  # Or for other model types:
52
- # base_model = AutoModelForVision2Seq.from_pretrained(base_model_id, torch_dtype=torch.float16)
53
 
54
  # --- Load PEFT Adapter ---
55
  # Load the adapter config and merge the adapter weights into the base model
56
- # model = PeftModel.from_pretrained(base_model, adapter_repo_id)
57
- # model = model.merge_and_unload() # Merge weights for inference (optional but often recommended)
58
- # model.eval() # Set model to evaluation mode
59
 
60
  # --- Inference Example ---
61
- # device = "cuda" if torch.cuda.is_available() else "cpu"
62
- # model.to(device)
63
  #
64
- # image = ... # Load your image (e.g., using PIL)
65
- # text = "a photo of" # Optional prompt start
66
  #
67
- # inputs = processor(images=image, text=text, return_tensors="pt").to(device, torch.float16) # Match model dtype
68
- #
69
- # generated_ids = model.generate(**inputs, max_new_tokens=50)
70
- # generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
71
- # print(f"Generated Caption: {{generated_text}}")
72
 
73
  ```
74
 
 
24
 
25
  **⚠️ Important:** The `base_model` tag in the metadata above is initially empty. The models listed here are *heuristic guesses* based on the training directory name (`microsoft-git-base_microsoft-git-base`). Please verify these against your training configuration and update the `base_model:` list in the YAML metadata block at the top of this README with the correct Hugging Face model identifiers.
26
 
27
+ ## How to Use (Example with PEFT)::: This is generated by script and not verified manually so proceed with caution
28
 
29
  ```python
30
  from transformers import AutoProcessor, AutoModelForVision2Seq, Blip2ForConditionalGeneration # Or other relevant classes
 
34
  # --- Configuration ---
35
  # 1. Specify the EXACT base model identifiers used during training
36
  # base_processor_id = "microsoft/git-base" # <-- Replace with correct HF ID
37
+ base_model_id = "microsoft/git-base" # <-- Replace with correct HF ID (e.g., Salesforce/blip2-opt-2.7b)
38
 
39
  # 2. Specify the PEFT adapter repository ID (this repo)
40
+ adapter_repo_id = "ashimdahal/microsoft-git-base_microsoft-git-base"
41
 
42
  # --- Load Base Model and Processor ---
43
+ processor = AutoProcessor.from_pretrained(base_processor_id)
44
 
45
  # Load the base model (ensure it matches the type used for training)
46
  # Example for BLIP-2 OPT:
47
+ base_model = Blip2ForConditionalGeneration.from_pretrained(
48
+ base_model_id,
49
+ torch_dtype=torch.float16 # Or torch.bfloat16 or float32, match training/inference needs
50
+ )
51
  # Or for other model types:
52
+ base_model = AutoModelForVision2Seq.from_pretrained(base_model_id, torch_dtype=torch.float16)
53
 
54
  # --- Load PEFT Adapter ---
55
  # Load the adapter config and merge the adapter weights into the base model
56
+ model = PeftModel.from_pretrained(base_model, adapter_repo_id)
57
+ model = model.merge_and_unload() # Merge weights for inference (optional but often recommended)
58
+ model.eval() # Set model to evaluation mode
59
 
60
  # --- Inference Example ---
61
+ device = "cuda" if torch.cuda.is_available() else "cpu"
62
+ model.to(device)
63
  #
64
+ image = ... # Load your image (e.g., using PIL)
65
+ text = "a photo of" # Optional prompt start
66
  #
67
+ inputs = processor(images=image, text=text, return_tensors="pt").to(device, torch.float16) # Match model dtype
68
+
69
+ generated_ids = model.generate(**inputs, max_new_tokens=50)
70
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
71
+ print(f"Generated Caption: {{generated_text}}")
72
 
73
  ```
74