{ "architectures": [ "Blip2ForConditionalGeneration" ], "image_text_hidden_size": 256, "image_token_index": null, "initializer_factor": 1.0, "initializer_range": 0.02, "model_type": "blip-2", "num_query_tokens": 32, "qformer_config": { "_attn_implementation_autoset": true, "encoder_hidden_size": 1024, "hidden_size": 1024, "model_type": "blip_2_qformer", "num_attention_heads": 16 }, "text_config": { "model_type": "opt" }, "torch_dtype": "float32", "transformers_version": "4.47.0", "use_decoder_only_language_model": true, "vision_config": { "_attn_implementation_autoset": true, "hidden_size": 1024, "image_size": 112, "model_type": "blip_2_vision_model", "num_hidden_layers": 12, "patch_size": 16 } }