{ "architectures": [ "MultiLUARs" ], "embedding_size": 512, "hidden_size": 512, "k_bucket_size": 1024, "model_type": "MultiLUARs", "q_bucket_size": 512, "sentence_transformer_support": false, "torch_dtype": "float32", "transformers_version": "4.46.3", "use_memory_efficient_attention": false }