ColorfulAI nielsr HF Staff commited on
Commit
da16492
·
verified ·
1 Parent(s): 332ec02

Add pipeline tag, library name, paper link and Github link (#1)

Browse files

- Add pipeline tag, library name, paper link and Github link (307c3d88840ba9ca65f1856a6d009a68c52a4162)


Co-authored-by: Niels Rogge <[email protected]>

Files changed (1) hide show
  1. README.md +142 -122
README.md CHANGED
@@ -1,122 +1,142 @@
1
- ---
2
- license: mit
3
- ---
4
- # M4-Audio-LongVA-7B-Qwen2
5
-
6
- Enhancing Omni Interactive Capabilities in MLLM
7
-
8
- ![images](./assets/framework.png)
9
-
10
- M4-Audio-7B is an extension of [LongVA-7B](https://github.com/EvolvingLMMs-Lab/LongVA), further trained using the [M4-IT](https://huggingface.co/datasets/ColorfulAI/M4-IT) dataset, which comprises 9,963 visual-audio instruction tuning instances. This training was conducted without any special modifications to the existing training pipeline.
11
-
12
-
13
- ## Usage
14
-
15
-
16
- *Please refer to [M4](https://github.com/patrick-tssn/M4) to install relvevant packages*
17
-
18
- ```python
19
- import os
20
- from PIL import Image
21
- import numpy as np
22
- import torchaudio
23
- import torch
24
- from decord import VideoReader, cpu
25
- import whisper
26
- # fix seed
27
- torch.manual_seed(0)
28
-
29
- from intersuit.model.builder import load_pretrained_model
30
- from intersuit.mm_utils import tokenizer_image_speech_tokens, process_images
31
- from intersuit.constants import IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX
32
-
33
- import ChatTTS
34
- chat = ChatTTS.Chat()
35
- chat.load(source='local', compile=True)
36
-
37
- import warnings
38
- warnings.filterwarnings("ignore")
39
-
40
- model_path = "checkpoints/M4-Audio-LongVA-7B-Qwen2"
41
- video_path = "local_demo/assets/water.mp4"
42
- audio_path = "local_demo/wav/infer.wav"
43
- new_audio_path = "local_demo/wav/new_infer.wav"
44
- max_frames_num = 16 # you can change this to several thousands so long you GPU memory can handle it :)
45
- gen_kwargs = {"do_sample": True, "temperature": 0.5, "top_p": None, "num_beams": 1, "use_cache": True, "max_new_tokens": 1024}
46
- tokenizer, model, image_processor, _ = load_pretrained_model(model_path, None, "llava_qwen", device_map="cuda:0", attn_implementation="eager")
47
-
48
- # original query
49
- query = "Give a detailed caption of the video as if I am blind."
50
- query = None # comment this to use ChatTTS to convert the query to audio
51
- prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image><|im_end|>\n<|im_start|>user\n<speech>\n<|im_end|>\n<|im_start|>assistant\n"
52
- input_ids = tokenizer_image_speech_tokens(prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
53
- pad_token_ids = (tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id)
54
- attention_masks = input_ids.ne(pad_token_ids).to(input_ids.device)
55
- # audio input
56
- if query is not None:
57
- audio_path = "./local_demo/wav/" + "infer.wav"
58
- if os.path.exists(audio_path): os.remove(audio_path) # refresh
59
- if not os.path.exists(audio_path):
60
- wav = chat.infer(query)
61
- try:
62
- torchaudio.save(audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
63
- except:
64
- torchaudio.save(audio_path, torch.from_numpy(wav), 24000)
65
- speech = whisper.load_audio(audio_path)
66
- speech = whisper.pad_or_trim(speech)
67
- speech = whisper.log_mel_spectrogram(speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
68
- speech_length = torch.LongTensor([speech.shape[0]]).to(model.device)
69
-
70
- # new query
71
- new_query = "How many people in the video?"
72
- new_query = "Okay, I see."
73
- new_query = "Sorry to interrupt."
74
- new_query_pos = 10 # which token encounter the new query
75
- new_query = None # comment this to use ChatTTS to convert the query to audio
76
- new_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<speech>\n<|im_end|>\n<|im_start|>assistant\n"
77
- new_input_ids = tokenizer_image_speech_tokens(new_prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
78
- # audio input
79
- if new_query is not None:
80
- new_audio_path = "./local_demo/wav/" + "new_infer.wav"
81
- if os.path.exists(new_audio_path): os.remove(new_audio_path) # refresh
82
- if not os.path.exists(new_audio_path):
83
- wav = chat.infer(new_query)
84
- try:
85
- torchaudio.save(new_audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
86
- except:
87
- torchaudio.save(new_audio_path, torch.from_numpy(wav), 24000)
88
- new_speech = whisper.load_audio(new_audio_path)
89
- new_speech = whisper.pad_or_trim(new_speech)
90
- new_speech = whisper.log_mel_spectrogram(new_speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
91
- new_speech_length = torch.LongTensor([new_speech.shape[0]]).to(model.device)
92
-
93
- #video input
94
- vr = VideoReader(video_path, ctx=cpu(0))
95
- total_frame_num = len(vr)
96
- uniform_sampled_frames = np.linspace(0, total_frame_num - 1, max_frames_num, dtype=int)
97
- frame_idx = uniform_sampled_frames.tolist()
98
- frames = vr.get_batch(frame_idx).asnumpy()
99
- video_tensor = image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].to(model.device, dtype=torch.bfloat16)
100
-
101
-
102
- with torch.inference_mode():
103
- output_ids = model.generate_parallel(input_ids,
104
- attention_mask=attention_masks,
105
- images=[video_tensor],
106
- modalities=["video"],
107
- speeches=speech.unsqueeze(0),
108
- speech_lengths=speech_length,
109
- new_query=new_input_ids,
110
- new_query_pos=new_query_pos,
111
- new_speeches=new_speech.unsqueeze(0),
112
- new_speech_lengths=new_speech_length,
113
- query_str=query,
114
- new_query_str=new_query,
115
- tokenizer=tokenizer,
116
- **gen_kwargs)
117
- outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
118
-
119
- ```
120
-
121
-
122
- For more information about the interaction inference pipeline, please visit the [M4 GitHub repository](https://github.com/patrick-tssn/M4).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ pipeline_tag: video-text-to-text
4
+ library_name: transformers
5
+ ---
6
+
7
+ # M4-Audio-LongVA-7B-Qwen2
8
+
9
+ Enhancing Omni Interactive Capabilities in MLLM
10
+
11
+ This repository contains the model described in [OmniMMI: A Comprehensive Multi-modal Interaction Benchmark in Streaming Video Contexts](https://huggingface.co/papers/2503.22952).
12
+ The code can be found at https://github.com/patrick-tssn/M4.
13
+
14
+ ![images](./assets/framework.png)
15
+
16
+ M4-Audio-7B is an extension of [LongVA-7B](https://github.com/EvolvingLMMs-Lab/LongVA), further trained using the [M4-IT](https://huggingface.co/datasets/ColorfulAI/M4-IT) dataset, which comprises 9,963 visual-audio instruction tuning instances. This training was conducted without any special modifications to the existing training pipeline.
17
+
18
+
19
+ ## Usage
20
+
21
+
22
+ *Please refer to [M4](https://github.com/patrick-tssn/M4) to install relvevant packages*
23
+
24
+ ```python
25
+ import os
26
+ from PIL import Image
27
+ import numpy as np
28
+ import torchaudio
29
+ import torch
30
+ from decord import VideoReader, cpu
31
+ import whisper
32
+ # fix seed
33
+ torch.manual_seed(0)
34
+
35
+ from intersuit.model.builder import load_pretrained_model
36
+ from intersuit.mm_utils import tokenizer_image_speech_tokens, process_images
37
+ from intersuit.constants import IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX
38
+
39
+ import ChatTTS
40
+ chat = ChatTTS.Chat()
41
+ chat.load(source='local', compile=True)
42
+
43
+ import warnings
44
+ warnings.filterwarnings("ignore")
45
+
46
+ model_path = "checkpoints/M4-Audio-LongVA-7B-Qwen2"
47
+ video_path = "local_demo/assets/water.mp4"
48
+ audio_path = "local_demo/wav/infer.wav"
49
+ new_audio_path = "local_demo/wav/new_infer.wav"
50
+ max_frames_num = 16 # you can change this to several thousands so long you GPU memory can handle it :)
51
+ gen_kwargs = {"do_sample": True, "temperature": 0.5, "top_p": None, "num_beams": 1, "use_cache": True, "max_new_tokens": 1024}
52
+ tokenizer, model, image_processor, _ = load_pretrained_model(model_path, None, "llava_qwen", device_map="cuda:0", attn_implementation="eager")
53
+
54
+ # original query
55
+ query = "Give a detailed caption of the video as if I am blind."
56
+ query = None # comment this to use ChatTTS to convert the query to audio
57
+ prompt = "<|im_start|>system
58
+ You are a helpful assistant.<|im_end|>
59
+ <|im_start|>user
60
+ <image><|im_end|>
61
+ <|im_start|>user
62
+ <speech>
63
+ <|im_end|>
64
+ <|im_start|>assistant
65
+ "
66
+ input_ids = tokenizer_image_speech_tokens(prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
67
+ pad_token_ids = (tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id)
68
+ attention_masks = input_ids.ne(pad_token_ids).to(input_ids.device)
69
+ # audio input
70
+ if query is not None:
71
+ audio_path = "./local_demo/wav/" + "infer.wav"
72
+ if os.path.exists(audio_path): os.remove(audio_path) # refresh
73
+ if not os.path.exists(audio_path):
74
+ wav = chat.infer(query)
75
+ try:
76
+ torchaudio.save(audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
77
+ except:
78
+ torchaudio.save(audio_path, torch.from_numpy(wav), 24000)
79
+ speech = whisper.load_audio(audio_path)
80
+ speech = whisper.pad_or_trim(speech)
81
+ speech = whisper.log_mel_spectrogram(speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
82
+ speech_length = torch.LongTensor([speech.shape[0]]).to(model.device)
83
+
84
+ # new query
85
+ new_query = "How many people in the video?"
86
+ new_query = "Okay, I see."
87
+ new_query = "Sorry to interrupt."
88
+ new_query_pos = 10 # which token encounter the new query
89
+ new_query = None # comment this to use ChatTTS to convert the query to audio
90
+ new_prompt = "<|im_start|>system
91
+ You are a helpful assistant.<|im_end|>
92
+ <|im_start|>user
93
+ <speech>
94
+ <|im_end|>
95
+ <|im_start|>assistant
96
+ "
97
+ new_input_ids = tokenizer_image_speech_tokens(new_prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
98
+ # audio input
99
+ if new_query is not None:
100
+ new_audio_path = "./local_demo/wav/" + "new_infer.wav"
101
+ if os.path.exists(new_audio_path): os.remove(new_audio_path) # refresh
102
+ if not os.path.exists(new_audio_path):
103
+ wav = chat.infer(new_query)
104
+ try:
105
+ torchaudio.save(new_audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
106
+ except:
107
+ torchaudio.save(new_audio_path, torch.from_numpy(wav), 24000)
108
+ new_speech = whisper.load_audio(new_audio_path)
109
+ new_speech = whisper.pad_or_trim(new_speech)
110
+ new_speech = whisper.log_mel_spectrogram(new_speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
111
+ new_speech_length = torch.LongTensor([new_speech.shape[0]]).to(model.device)
112
+
113
+ #video input
114
+ vr = VideoReader(video_path, ctx=cpu(0))
115
+ total_frame_num = len(vr)
116
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, max_frames_num, dtype=int)
117
+ frame_idx = uniform_sampled_frames.tolist()
118
+ frames = vr.get_batch(frame_idx).asnumpy()
119
+ video_tensor = image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].to(model.device, dtype=torch.bfloat16)
120
+
121
+
122
+ with torch.inference_mode():
123
+ output_ids = model.generate_parallel(input_ids,
124
+ attention_mask=attention_masks,
125
+ images=[video_tensor],
126
+ modalities=["video"],
127
+ speeches=speech.unsqueeze(0),
128
+ speech_lengths=speech_length,
129
+ new_query=new_input_ids,
130
+ new_query_pos=new_query_pos,
131
+ new_speeches=new_speech.unsqueeze(0),
132
+ new_speech_lengths=new_speech_length,
133
+ query_str=query,
134
+ new_query_str=new_query,
135
+ tokenizer=tokenizer,
136
+ **gen_kwargs)
137
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
138
+
139
+ ```
140
+
141
+
142
+ For more information about the interaction inference pipeline, please visit the [M4 GitHub repository](https://github.com/patrick-tssn/M4).