多模態嵌入的概述#
多模態嵌入(Multimodal Embeddings)指的是透過對不同數據模式(如文本、圖像、音頻、視頻等)進行嵌入表示。透過這種技術,Weaviate 能夠將各種模式的輸入(如文本和圖像)轉化為統一的向量表示,用於高效的相似性搜索或其他機器學習任務。Weaviate 與 Google Vertex AI 的集成使得這一功能變得更加易用和強大。Weaviate 與 Google AI 多模態嵌入的集成使得用戶能夠處理和搜索不同類型的多模態數據,這對於需要在大規模數據庫中處理文本、圖像、視頻等不同數據模式的場景非常有用。集成的 Google Vertex AI 模型不僅性能強大,還能夠支持各種複雜的語義和多模態搜索,提升了數據管理和查詢的智能化水平。
使用 Docker 部署 Weaviate 實例#
services:
weaviate:
command:
- --host
- 0.0.0.0
- --port
- '8080'
- --scheme
- http
image: cr.weaviate.io/semitechnologies/weaviate:1.26.4
ports:
- 8080:8080
- 50051:50051
volumes:
- ./weaviate_data:/var/lib/weaviate
- /root/.config/gcloud/application_default_credentials.json:/etc/weaviate/gcp-credentials.json
restart: on-failure:0
environment:
QUERY_DEFAULTS_LIMIT: 25
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true'
PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
DEFAULT_VECTORIZER_MODULE: 'multi2vec-palm'
ENABLE_MODULES: 'multi2vec-palm,ref2vec-centroid'
CLUSTER_HOSTNAME: 'node1'
GOOGLE_APPLICATION_CREDENTIALS: '/etc/weaviate/gcp-credentials.json'
USE_GOOGLE_AUTH: 'true'
你必須提供合法的 API 憑證才能正確的使用 Vertex AI 集成,具體的配置方式你可以查看API credentials。
編寫代碼#
連接到 Weaviate 並檢查連接#
import weaviate
client = weaviate.connect_to_local()
client.is_ready()
創建 Collection#
from weaviate.classes.config import Configure
if client.collections.exists("AnimeGirls"):
client.collections.delete("AnimeGirls")
client.collections.create(
name="AnimeGirls",
vectorizer_config=Configure.Vectorizer.multi2vec_palm(
image_fields=["image"],
text_fields=["text"],
video_fields=["video"],
project_id="neurosearch-436306",
location="europe-west1",
model_id="multimodalembedding@001",
dimensions=1408,
),
)
創建工具函數#
import base64
def to_base64(file_path: str) -> str:
with open(file_path, "rb") as file:
return base64.b64encode(file.read()).decode("utf-8")
導入數據#
import os
from weaviate.util import generate_uuid5
anime_girls = client.collections.get("AnimeGirls")
sources = os.listdir("./images/")
with anime_girls.batch.dynamic() as batch:
for name in sources:
print(f"Adding {name}")
path = "./images/" + name
batch.add_object(
{
"name": name,
"image": to_base64(path),
"path": path,
"mediaType": "image",
},
uuid=generate_uuid5(name),
)
檢查所有數據是否導入成功#
if len(anime_girls.batch.failed_objects) > 0:
print(f"Failed to import {len(anime_girls.batch.failed_objects)} objects")
for failed_object in anime_girls.batch.failed_objects:
print(f"e.g. Failed to import object with error: {failed_object.message}")
else:
print("All objects imported successfully")
通過 Text 檢索#
import json
response = anime_girls.query.near_text(
query="Seeing a girl through glasses",
return_properties=["name", "path", "mediaType"],
limit=2,
)
for obj in response.objects:
print(json.dumps(obj.properties, indent=2))
from IPython.display import Image, display
def display_image(item: dict):
path = item["path"]
display(Image(path, width=300))
display_image(response.objects[0].properties)
通過 Image 檢索#
response = anime_girls.query.near_image(
near_image=to_base64("./images/121955436_p0_master1200.jpg"),
return_properties=["name", "path", "mediaType"],
limit=2,
)
# for obj in response.objects:
# print(json.dumps(obj.properties, indent=2))
display_image(response.objects[0].properties)
混合檢索#
response = anime_girls.query.hybrid(
query="Seeing a girl through glasses",
return_properties=["name", "path", "mediaType"],
limit=2,
)
# for obj in response.objects:
# print(json.dumps(obj.properties, indent=2))
display_image(response.objects[0].properties)
返回所有向量#
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# 假設embedding是你的1408維數據
embedding = np.array([item.vector['default'] for item in anime_girls.iterator(include_vector=True)])
# 使用PCA將1408維數據降到2維
pca = PCA(n_components=2)
reduced_embedding = pca.fit_transform(embedding)
# 繪製降維後的數據
plt.figure(figsize=(10, 7))
plt.scatter(reduced_embedding[:, 0], reduced_embedding[:, 1], alpha=0.5)
plt.title('PCA of AnimeGirls Embeddings')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.show()