| |
|
| |
|
| | import warnings |
| | from typing import Any, List, Optional, Tuple, Union |
| |
|
| | import torchvision.transforms as T |
| | from torchvision.transforms.functional import InterpolationMode |
| |
|
| | import torch.utils.checkpoint |
| | import transformers |
| |
|
| | from .modeling_internlm2 import InternLM2ForCausalLM |
| | from .modeling_phi3 import Phi3ForCausalLM |
| | from peft import LoraConfig, get_peft_model |
| | from torch import nn |
| | from torch.nn import CrossEntropyLoss |
| | from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM, |
| | LlamaTokenizer, Qwen2ForCausalLM) |
| | from transformers.modeling_outputs import CausalLMOutputWithPast |
| | from transformers.modeling_utils import PreTrainedModel |
| | from transformers.utils import ModelOutput, logging |
| | from transformers import StoppingCriteriaList, StoppingCriteria, AutoProcessor |
| | from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VisionTransformerPretrainedModel |
| |
|
| | from .configuration_sa2va_chat import Sa2VAChatConfig |
| | from .modeling_intern_vit import InternVisionModel, has_flash_attn |
| |
|
| | from .sam2 import SAM2 |
| | from .templates import PROMPT_TEMPLATE |
| |
|
| | import numpy as np |
| | from torchvision.transforms.functional import resize, to_pil_image |
| |
|
| | from types import MethodType |
| | import torch.nn.functional as F |
| |
|
| | try: |
| | from .flash_attention import FlashAttention |
| | has_flash_attn = True |
| | except: |
| | print('FlashAttention is not installed.') |
| | has_flash_attn = False |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | def version_cmp(v1, v2, op='eq'): |
| | import operator |
| |
|
| | from packaging import version |
| | op_func = getattr(operator, op) |
| | return op_func(version.parse(v1), version.parse(v2)) |
| |
|
| | class StopWordStoppingCriteria(StoppingCriteria): |
| | """StopWord stopping criteria.""" |
| |
|
| | def __init__(self, tokenizer, stop_word): |
| | self.tokenizer = tokenizer |
| | self.stop_word = stop_word |
| | self.length = len(self.stop_word) |
| |
|
| | def __call__(self, input_ids, *args, **kwargs) -> bool: |
| | cur_text = self.tokenizer.decode(input_ids[0]) |
| | cur_text = cur_text.replace('\r', '').replace('\n', '') |
| | return cur_text[-self.length:] == self.stop_word |
| |
|
| | def get_stop_criteria( |
| | tokenizer, |
| | stop_words=[], |
| | ): |
| | stop_criteria = StoppingCriteriaList() |
| | for word in stop_words: |
| | stop_criteria.append(StopWordStoppingCriteria(tokenizer, word)) |
| | return stop_criteria |
| |
|
| | class DirectResize: |
| | def __init__(self, target_length: int) -> None: |
| | self.target_length = target_length |
| |
|
| | def apply_image(self, image: np.ndarray) -> np.ndarray: |
| | """ |
| | Expects a numpy array with shape HxWxC in uint8 format. |
| | """ |
| | img = to_pil_image(image, mode='RGB') |
| | return np.array(img.resize((self.target_length, self.target_length))) |
| |
|
| | class Sa2VAChatModelQwen(PreTrainedModel): |
| | config_class = Sa2VAChatConfig |
| | main_input_name = 'pixel_values' |
| | base_model_prefix = 'language_model' |
| | _no_split_modules = ['Qwen2_5_VisionTransformerPretrainedModel', 'Qwen2_5_VLDecoderLayer', 'SAM2'] |
| | _supports_flash_attn_2 = True |
| | supports_gradient_checkpointing = True |
| |
|
| | def __init__(self, config: Sa2VAChatConfig, visual=None, language_model=None, use_flash_attn=True): |
| | super().__init__(config) |
| |
|
| | assert version_cmp(transformers.__version__, '4.49.0', 'ge') |
| | |
| | |
| | |
| | self.select_layer = config.select_layer |
| | self.template = config.template |
| | self.template = self.template.replace('-', '_') |
| | |
| | |
| | |
| | self.llm_arch_name = config.llm_config.architectures[0] |
| |
|
| | self.min_pixels = 512*28*28 |
| | self.max_pixels = 2048*28*28 |
| |
|
| | use_flash_attn = use_flash_attn if has_flash_attn else False |
| | config.vision_config.use_flash_attn = True if use_flash_attn else False |
| | config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager' |
| |
|
| | print(config.vision_config.keys()) |
| | print('-'*33) |
| | print(config.keys()) |
| | config.vision_config.spatial_merge_size=2 |
| |
|
| | self.merge_length = config.vision_config.spatial_merge_size ** 2 |
| |
|
| | |
| | |
| | if visual is not None: |
| | self.visual = visual |
| | else: |
| | self.visual = Qwen2_5_VisionTransformerPretrainedModel._from_config(config.vision_config) |
| | if language_model is not None: |
| | self.language_model = language_model |
| | else: |
| | if config.llm_config.architectures[0] == 'Qwen2ForCausalLM': |
| | self.language_model = Qwen2ForCausalLM(config.llm_config) |
| |
|
| | elif config.llm_config.architectures[0] == 'Qwen3ForCausalLM': |
| | assert transformers.__version__ >= '4.56.0', 'Please upgrade transformers to >=4.56.0 for Qwen3 support.' |
| | from transformers import Qwen3ForCausalLM |
| | self.language_model = Qwen3ForCausalLM(config.llm_config) |
| | else: |
| | raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.') |
| |
|
| | |
| | llm_hidden_size = config.llm_config.hidden_size |
| |
|
| | self.lm_head = nn.Linear(llm_hidden_size, config.llm_config.vocab_size, bias=False) |
| |
|
| |
|
| | self.img_context_token_id = None |
| | self.conv_template = PROMPT_TEMPLATE[self.template] |
| | self.template = self.conv_template |
| | if hasattr(config, 'system_message'): |
| | self.system_message = config.system_message |
| | self.num_samples = 0 |
| |
|
| | if config.use_backbone_lora: |
| | self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora) |
| |
|
| | if config.use_llm_lora: |
| | self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora) |
| |
|
| | self.grounding_encoder = SAM2() |
| | out_dim = self.grounding_encoder.hidden_dim |
| | in_dim = llm_hidden_size |
| | self.text_hidden_fcs = nn.Sequential( |
| | nn.Linear(in_dim, in_dim), nn.ReLU(inplace=True), |
| | nn.Linear(in_dim, out_dim), nn.Dropout(0.0) |
| | ) |
| |
|
| | self.init_prediction_config = False |
| |
|
| | def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): |
| | lora_config = LoraConfig( |
| | r=r, |
| | target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'], |
| | lora_alpha=lora_alpha, |
| | lora_dropout=lora_dropout, |
| | ) |
| | self.visual = get_peft_model(self.visual, lora_config) |
| | self.visual.print_trainable_parameters() |
| |
|
| | def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): |
| | |
| | if self.llm_arch_name == 'InternLM2ForCausalLM': |
| | target_modules = ['attention.wqkv', 'attention.wo', 'feed_forward.w1', 'feed_forward.w2', 'feed_forward.w3'] |
| | elif self.llm_arch_name == 'Phi3ForCausalLM': |
| | target_modules = ['mlp.down_proj', 'mlp.gate_up_proj', 'self_attn.o_proj', 'self_attn.qkv_proj'] |
| | elif self.llm_arch_name in ['Qwen2ForCausalLM', 'LlamaForCausalLM']: |
| | target_modules = ['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj', |
| | 'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj'] |
| | else: |
| | raise NotImplemented |
| | lora_config = LoraConfig( |
| | r=r, |
| | target_modules=target_modules, |
| | lora_alpha=lora_alpha, |
| | lora_dropout=lora_dropout, |
| | task_type='CAUSAL_LM' |
| | ) |
| | self.language_model = get_peft_model(self.language_model, lora_config) |
| | self.language_model.enable_input_require_grads() |
| | self.language_model.print_trainable_parameters() |
| |
|
| | def extract_feature(self, pixel_values, image_grid_thw): |
| | if self.select_layer == -1: |
| | vit_embeds = self.visual( |
| | pixel_values=pixel_values, |
| | grid_thw=image_grid_thw) |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist() |
| | vit_embeds = torch.split(vit_embeds, split_sizes) |
| |
|
| | return vit_embeds |
| |
|
| | @property |
| | def lm_head(self): |
| | return self.language_model.get_output_embeddings() |
| |
|
| | def get_input_embeddings(self): |
| | return self.language_model.get_input_embeddings() |
| |
|
| | def get_output_embeddings(self): |
| | return self.language_model.get_output_embeddings() |
| |
|
| | def forward(self, data, data_samples=None, mode='loss'): |
| | pixel_values = data['pixel_values'] |
| | image_grid_thw = data['image_grid_thw'] |
| |
|
| | if type(pixel_values) is list or pixel_values.ndim == 5: |
| | if type(pixel_values) is list: |
| | pixel_values = [ |
| | x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values |
| | ] |
| | |
| | concat_images = torch.cat( |
| | [image.to(self.visual.dtype) for image in pixel_values], dim=0) |
| | else: |
| | raise NotImplementedError() |
| |
|
| | input_ids = data['input_ids'] |
| | position_ids = data['position_ids'] |
| | attention_mask = data['attention_mask'] |
| | |
| | image_flags = torch.sum(concat_images, dim=(1, 2, 3)) != 0 |
| | image_flags = image_flags.long() |
| |
|
| | labels = data['labels'] |
| | use_cache = False |
| |
|
| | if 'vp_overall_mask' not in data.keys(): |
| | vp_overall_mask = None |
| | else: |
| | vp_overall_mask = data['vp_overall_mask'] |
| |
|
| | if 'prompt_masks' in data.keys(): |
| | prompt_masks = data['prompt_masks'] |
| | else: |
| | prompt_masks = None |
| |
|
| | outputs = self._llm_forward( |
| | input_ids=input_ids, |
| | position_ids=position_ids, |
| | attention_mask=attention_mask, |
| | image_flags=image_flags, |
| | pixel_values=concat_images, |
| | labels=labels, |
| | use_cache=use_cache, |
| | output_hidden_states=True, |
| | vp_overall_mask=vp_overall_mask, |
| | prompt_masks=prompt_masks, |
| | image_grid_thw=image_grid_thw, |
| | ) |
| |
|
| | return outputs |
| |
|
| | def _llm_forward( |
| | self, |
| | pixel_values: torch.FloatTensor, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | image_flags: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | vp_overall_mask=None, |
| | prompt_masks=None, |
| | image_grid_thw=None, |
| | ) -> Union[Tuple, CausalLMOutputWithPast]: |
| | return_dict = return_dict if return_dict is not None \ |
| | else self.config.use_return_dict |
| |
|
| | image_flags = image_flags.squeeze(-1) |
| | |
| | input_embeds = self.language_model.get_input_embeddings()( |
| | input_ids).clone() |
| |
|
| | vit_embeds = self.extract_feature(pixel_values, image_grid_thw) |
| | vit_embeds = vit_embeds.to(input_embeds.dtype) |
| | fast_vit_embeds = None |
| |
|
| | vit_embeds = vit_embeds[image_flags == 1] |
| | vit_batch_size = pixel_values.shape[0] |
| |
|
| | B, N, C = input_embeds.shape |
| | input_embeds = input_embeds.reshape(B * N, C) |
| |
|
| | self._count += 1 |
| |
|
| | if vp_overall_mask is not None and prompt_masks is not None: |
| | vp_embeds = [] |
| | vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool() |
| | prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks] |
| |
|
| | vp_overall_mask = vp_overall_mask[image_flags == 1] |
| | overall_tile_vit_embeds = vit_embeds[vp_overall_mask] |
| |
|
| | i_vp_img = 0 |
| | for i_img in range(len(vit_embeds)): |
| | vp_embeds.append(vit_embeds[i_img].reshape(-1, C)) |
| | if vp_overall_mask[i_img]: |
| | tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) |
| | objects_prompt_masks = prompt_masks[i_vp_img] |
| | n_obj = len(objects_prompt_masks) |
| | tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1) |
| | objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1) |
| | vp_embeds.append(tile_vit_embeds[objects_prompt_masks]) |
| | i_vp_img += 1 |
| | vp_embeds = torch.cat(vp_embeds, dim=0) |
| | else: |
| | vp_embeds = None |
| |
|
| | input_ids = input_ids.reshape(B * N) |
| | selected = (input_ids == self.img_context_token_id) |
| |
|
| | if vp_embeds is None: |
| | try: |
| | input_embeds[selected] = vit_embeds.reshape(-1, C) |
| | except Exception as e: |
| | vit_embeds = vit_embeds.reshape(-1, C) |
| | print(f'warning: {e}, input_embeds[selected].shape=' |
| | f'{input_embeds[selected].shape}, ' |
| | f'vit_embeds.shape={vit_embeds.shape}') |
| | n_token = selected.sum() |
| | if n_token > len(vit_embeds): |
| | print(f"Wrong !!! {n_token} image tokens in text but only {len(vit_embeds)} vit embeds !!!") |
| | expand_ratio = n_token // len(vit_embeds) + 1 |
| | vit_embeds = torch.cat([vit_embeds] * expand_ratio, dim=0) |
| |
|
| | input_embeds[selected] = vit_embeds[:n_token] |
| | else: |
| | try: |
| | input_embeds[selected] = vp_embeds.reshape(-1, C) |
| | except Exception as e: |
| | vp_embeds = vp_embeds.reshape(-1, C) |
| | print(f'warning: {e}, input_embeds[selected].shape=' |
| | f'{input_embeds[selected].shape}, ' |
| | f'vp_embeds.shape={vp_embeds.shape}') |
| | n_token = selected.sum() |
| | if n_token > len(vp_embeds): |
| | print(f"Wrong !!! {n_token} image tokens in text but only {len(vp_embeds)} vit embeds !!!") |
| | expand_ratio = n_token // len(vp_embeds) + 1 |
| | vp_embeds = torch.cat([vp_embeds] * expand_ratio, dim=0) |
| |
|
| | input_embeds[selected] = vp_embeds[:n_token] |
| |
|
| | input_embeds = input_embeds.reshape(B, N, C) |
| |
|
| | outputs = self.language_model( |
| | inputs_embeds=input_embeds, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_values=past_key_values, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | logits = outputs.logits |
| |
|
| | loss = None |
| | if labels is not None: |
| | |
| | shift_logits = logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous() |
| | |
| | loss_fct = CrossEntropyLoss() |
| | shift_logits = shift_logits.view( |
| | -1, self.language_model.config.vocab_size) |
| | shift_labels = shift_labels.view(-1) |
| | |
| | shift_labels = shift_labels.to(shift_logits.device) |
| | loss = loss_fct(shift_logits, shift_labels) |
| |
|
| | if not return_dict: |
| | output = (logits,) + outputs[1:] |
| | return (loss,) + output if loss is not None else output |
| |
|
| | return CausalLMOutputWithPast( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=outputs.past_key_values, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|
| | @torch.no_grad() |
| | def generate( |
| | self, |
| | pixel_values: Optional[torch.FloatTensor] = None, |
| | input_ids: Optional[torch.FloatTensor] = None, |
| | attention_mask: Optional[torch.LongTensor] = None, |
| | visual_features: Optional[torch.FloatTensor] = None, |
| | generation_config: Optional[GenerationConfig] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | prompt_masks=None, |
| | vp_overall_mask=None, |
| | **generate_kwargs, |
| | ) -> torch.LongTensor: |
| | device = self.device |
| | assert self.img_context_token_id is not None |
| |
|
| | if pixel_values is not None: |
| | if visual_features is not None: |
| | vit_embeds = visual_features |
| | else: |
| | if type(pixel_values) is list or pixel_values.ndim == 5: |
| | if type(pixel_values) is list: |
| | pixel_values = [ |
| | x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values |
| | ] |
| | |
| | pixel_values = torch.cat( |
| | [image.to(self.visual.dtype) for image in pixel_values], dim=0) |
| |
|
| | vit_embeds = self.extract_feature(pixel_values.to(device)) |
| | image_flags = torch.sum(pixel_values, dim=(1, 2, 3)) != 0 |
| | image_flags = image_flags.long() |
| | vit_embeds = vit_embeds[image_flags == 1] |
| |
|
| | input_embeds = self.language_model.get_input_embeddings()(input_ids.to(device)) |
| | B, N, C = input_embeds.shape |
| | input_embeds = input_embeds.reshape(B * N, C) |
| |
|
| | if vp_overall_mask is not None and prompt_masks is not None: |
| | vp_embeds = [] |
| | vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool() |
| | prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks] |
| |
|
| | vp_overall_mask = vp_overall_mask[image_flags == 1] |
| | overall_tile_vit_embeds = vit_embeds[vp_overall_mask] |
| |
|
| | i_vp_img = 0 |
| | for i_img in range(len(vit_embeds)): |
| | vp_embeds.append(vit_embeds[i_img].reshape(-1, C)) |
| | if vp_overall_mask[i_img]: |
| | tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) |
| | objects_prompt_masks = prompt_masks[i_vp_img] |
| | n_obj = len(objects_prompt_masks) |
| | tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1) |
| | objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1) |
| | vp_embeds.append(tile_vit_embeds[objects_prompt_masks]) |
| | i_vp_img += 1 |
| |
|
| | vp_embeds = torch.cat(vp_embeds, dim=0) |
| | else: |
| | vp_embeds = None |
| |
|
| | input_ids = input_ids.reshape(B * N) |
| | selected = (input_ids == self.img_context_token_id) |
| | assert selected.sum() != 0 |
| | if vp_embeds is None: |
| | input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device) |
| | else: |
| | if len(input_embeds[selected]) != len(vp_embeds.reshape(-1, C)): |
| | print("Shape mismatch, selected is {}, vp embeds is {} !!!" \ |
| | .format(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C)))) |
| | min_tokens = min(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C))) |
| | input_embeds[selected][:min_tokens] = vp_embeds.reshape(-1, C)[:min_tokens].to(input_embeds.device) |
| | else: |
| | input_embeds[selected] = vp_embeds.reshape(-1, C).to(input_embeds.device) |
| |
|
| | input_embeds = input_embeds.reshape(B, N, C) |
| | else: |
| | input_embeds = self.language_model.get_input_embeddings()(input_ids) |
| |
|
| | outputs = self.language_model.generate( |
| | inputs_embeds=input_embeds, |
| | attention_mask=attention_mask.to(device), |
| | generation_config=generation_config, |
| | output_hidden_states=output_hidden_states, |
| | |
| | use_cache=True, |
| | **generate_kwargs, |
| | ) |
| |
|
| | return outputs |
| |
|
| | def preparing_for_generation(self, tokenizer, max_new_tokens=2048, torch_dtype=torch.bfloat16): |
| | |
| | if not hasattr(self, 'tokenizer'): |
| | self.tokenizer = tokenizer |
| | self.bot_name = 'BOT' |
| | stop_words = [] |
| | stop_words += self.template.get('STOP_WORDS', []) |
| | stop_criteria = get_stop_criteria( |
| | tokenizer=self.tokenizer, stop_words=stop_words) |
| | self.stop_criteria = stop_criteria |
| |
|
| | default_generation_kwargs = dict( |
| | max_new_tokens=max_new_tokens, |
| | do_sample=False, |
| | eos_token_id=self.tokenizer.eos_token_id, |
| | pad_token_id=( |
| | self.tokenizer.pad_token_id |
| | if self.tokenizer.pad_token_id is not None |
| | else self.tokenizer.eos_token_id |
| | ), |
| | ) |
| |
|
| | self.gen_config = GenerationConfig(**default_generation_kwargs) |
| | self.gen_config.transformers_version = '4.56.1' |
| | self.init_prediction_config = True |
| | self.torch_dtype = torch_dtype |
| | |
| | self.extra_image_processor = DirectResize(target_length=1024, ) |
| | |
| |
|
| | |
| | |
| | |
| | |
| | self.use_thumbnail = True |
| | |
| | |
| |
|
| | |
| | self.IMAGENET_MEAN = (0.485, 0.456, 0.406) |
| | self.IMAGENET_STD = (0.229, 0.224, 0.225) |
| | self.IMG_PAD_TOKEN = '<|image_pad|>' |
| | self.IMG_START_TOKEN = '<|vision_start|>' |
| | self.IMG_END_TOKEN = '<|vision_end|>' |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | self.VP_START_TOKEN = '<vp>' |
| | self.VP_END_TOKEN = '</vp>' |
| |
|
| | |
| | img_context_token_id = tokenizer.convert_tokens_to_ids('<|image_pad|>') |
| | self.img_context_token_id = img_context_token_id |
| | self.seg_token_idx = tokenizer.convert_tokens_to_ids('[SEG]') |
| | |
| | |
| | self.image_token_id = self.config.image_token_id |
| | self.video_token_id = self.config.video_token_id |
| | self.vision_start_token_id = self.config.vision_start_token_id |
| | |
| | |
| | self.spatial_merge_size = self.config.vision_config.spatial_merge_size |
| | self.temporal_patch_size = self.config.vision_config.temporal_patch_size |
| | self.patch_size = self.config.vision_config.patch_size |
| |
|
| | return |
| |
|
| | def predict_forward( |
| | self, |
| | image=None, |
| | video=None, |
| | text=None, |
| | past_text='', |
| | mask_prompts=None, |
| | tokenizer=None, |
| | processor=None, |
| | ): |
| | if not self.init_prediction_config: |
| | assert tokenizer |
| | self.preparing_for_generation(tokenizer=tokenizer) |
| | self.processor = processor |
| |
|
| | if image is None and video is None and '<image>' not in past_text: |
| | text = text.replace('<image>', "") |
| | input_text = '' |
| | input_text += self.template['INSTRUCTION'].format( |
| | input=text, round=1, bot_name=self.bot_name) |
| | input_text = past_text + input_text |
| | ids = self.tokenizer.encode(input_text) |
| | ids = torch.tensor(ids).cuda().unsqueeze(0) |
| |
|
| | attention_mask = torch.ones_like(ids, dtype=torch.bool) |
| |
|
| | mm_inputs = { |
| | 'pixel_values': None, |
| | 'input_ids': ids, |
| | 'attention_mask': attention_mask, |
| | 'position_ids': None, |
| | 'past_key_values': None, |
| | 'labels': None, |
| | 'prompt_masks': None, |
| | 'vp_overall_mask': None, |
| | 'image_grid_thw': None, |
| | } |
| | ret_masks = [] |
| | else: |
| | input_dict = {} |
| | if video is not None: |
| | pixel_values = [] |
| | extra_pixel_values = [] |
| | images = [] |
| | ori_image_size = video[0].size |
| | for frame_idx, frame_image in enumerate(video): |
| | |
| | g_image = np.array(frame_image) |
| | g_image = self.extra_image_processor.apply_image(g_image) |
| | g_image = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() |
| | extra_pixel_values.append(g_image) |
| | if frame_idx < 5: |
| | |
| | |
| | images.append(frame_image) |
| |
|
| | _data_dict = self.processor( |
| | images=images, min_pixels=self.min_pixels, max_pixels=self.max_pixels |
| | ) |
| | num_image_tokens = int(_data_dict['image_grid_thw'][0].prod()) // self.merge_length |
| | pixel_values = _data_dict['pixel_values'] |
| |
|
| | |
| | g_pixel_values = torch.stack([ |
| | self.grounding_encoder.preprocess_image(pixel) for pixel in extra_pixel_values |
| | ]).to(self.torch_dtype) |
| | |
| | num_frames = len(pixel_values) |
| |
|
| | input_dict['vp_overall_mask'] = None |
| | else: |
| | |
| |
|
| | |
| | g_image = np.array(image) |
| | g_image = self.extra_image_processor.apply_image(g_image) |
| | g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous().to(self.torch_dtype) |
| | extra_pixel_values = [g_pixel_values] |
| | g_pixel_values = torch.stack([ |
| | self.grounding_encoder.preprocess_image(pixel) for pixel in extra_pixel_values |
| | ]).to(self.torch_dtype) |
| |
|
| | |
| | |
| | |
| |
|
| | if mask_prompts is not None: |
| | vp_overall_mask = torch.Tensor([False] * (len(images) - 1) + [True]) |
| | input_dict['vp_overall_mask'] = vp_overall_mask |
| | else: |
| | input_dict['vp_overall_mask'] = None |
| |
|
| | |
| | |
| |
|
| | _data_dict = self.processor( |
| | images=[image], min_pixels=self.min_pixels, max_pixels=self.max_pixels |
| | ) |
| |
|
| | num_image_tokens = int(_data_dict['image_grid_thw'][0].prod()) // self.merge_length |
| | num_frames = 1 |
| | input_dict['g_pixel_values'] = g_pixel_values |
| | input_dict['pixel_values'] = _data_dict['pixel_values'].to(self.torch_dtype) |
| |
|
| | if mask_prompts is not None: |
| | |
| | mask_prompts = [torch.Tensor(item).to(pixel_values.device) for item in mask_prompts] |
| | mask_prompts = [F.interpolate( |
| | item.unsqueeze(0), |
| | size=(int(self.image_size // self.patch_size * self.downsample_ratio), |
| | int(self.image_size // self.patch_size * self.downsample_ratio)), |
| | mode='nearest').squeeze(0) for item in mask_prompts] |
| | region_pixels = [] |
| | for mask_prompt in mask_prompts[0]: |
| | region_pixels.append(mask_prompt.bool().to(torch.int64).sum()) |
| |
|
| | vp_token_str = '\nThere are {} part regions in the picture: '.format(len(mask_prompts[0])) |
| | for i in range(len(mask_prompts[0])): |
| | vp_token_str = vp_token_str + \ |
| | f"region{i + 1}" + self.VP_START_TOKEN + \ |
| | self.IMG_PAD_TOKEN * region_pixels[i] + \ |
| | self.VP_END_TOKEN |
| | if i == len(mask_prompts[0]) - 1: |
| | vp_token_str = vp_token_str + '.\n' |
| | else: |
| | vp_token_str = vp_token_str + ', ' |
| | else: |
| | vp_token_str = '' |
| |
|
| | image_token_str = f'{self.IMG_START_TOKEN}' \ |
| | f'{self.IMG_PAD_TOKEN * num_image_tokens}' \ |
| | f'{self.IMG_END_TOKEN}' |
| | image_token_str = image_token_str + '\n' |
| | image_token_str = image_token_str * num_frames |
| | image_token_str = image_token_str.strip() |
| |
|
| | ret_masks = [] |
| |
|
| | if '<image>' in text or mask_prompts is not None: |
| | assert past_text is None or len(past_text) == 0 |
| | text = text.replace('<image>', image_token_str + vp_token_str) |
| | input_text = '' |
| | input_text += self.template['INSTRUCTION'].format( |
| | input=text, round=1, bot_name=self.bot_name) |
| | input_text = past_text + input_text |
| | ids = self.tokenizer.encode(input_text) |
| | ids = torch.tensor(ids).cuda().unsqueeze(0) |
| |
|
| | attention_mask = torch.ones_like(ids, dtype=torch.bool) |
| |
|
| | mm_inputs = { |
| | 'pixel_values': input_dict['pixel_values'], |
| | 'input_ids': ids, |
| | 'attention_mask': attention_mask, |
| | 'position_ids': None, |
| | 'past_key_values': None, |
| | 'labels': None, |
| | 'prompt_masks': mask_prompts, |
| | 'vp_overall_mask': input_dict['vp_overall_mask'], |
| | 'image_grid_thw': _data_dict['image_grid_thw'], |
| | } |
| |
|
| | generate_output = self.generate( |
| | **mm_inputs, |
| | generation_config=self.gen_config, |
| | streamer=None, |
| | bos_token_id=self.tokenizer.bos_token_id, |
| | stopping_criteria=self.stop_criteria, |
| | output_hidden_states=True, |
| | return_dict_in_generate=True |
| | ) |
| | predict = self.tokenizer.decode( |
| | generate_output.sequences[0], skip_special_tokens=False).strip() |
| |
|
| | if image is None and video is None and '<image>' not in past_text: |
| | return {'prediction': predict, 'prediction_masks': ret_masks, } |
| |
|
| | |
| | hidden_states = generate_output.hidden_states |
| | last_hidden_states = [item[-1][0] for item in hidden_states] |
| | last_hidden_states = torch.cat(last_hidden_states, dim=0) |
| | seg_hidden_states = get_seg_hidden_states( |
| | last_hidden_states, generate_output.sequences[0][:-1], |
| | seg_id=self.seg_token_idx |
| | ) |
| | all_seg_hidden_states = self.text_hidden_fcs(seg_hidden_states) |
| |
|
| | for seg_hidden_states in all_seg_hidden_states: |
| | seg_hidden_states = seg_hidden_states.unsqueeze(0) |
| | g_pixel_values = input_dict['g_pixel_values'] |
| | sam_states = self.grounding_encoder.get_sam2_embeddings(g_pixel_values) |
| | pred_masks = self.grounding_encoder.language_embd_inference(sam_states, [seg_hidden_states] * num_frames) |
| | w, h = ori_image_size |
| | masks = F.interpolate(pred_masks, size=(h, w), mode='bilinear', align_corners=False) |
| | masks = masks[:, 0] |
| | masks = masks.sigmoid() > 0.5 |
| | masks = masks.cpu().numpy() |
| | ret_masks.append(masks) |
| |
|
| | return {'prediction': predict, 'prediction_masks': ret_masks,} |
| |
|
| | def get_seg_hidden_states(hidden_states, output_ids, seg_id): |
| | seg_mask = output_ids == seg_id |
| | n_out = len(seg_mask) |
| | if n_out == 0: |
| | return hidden_states[0:0] |
| | return hidden_states[-n_out:][seg_mask] |