Supabase Vector - Vector Search with PostgreSQL

2025.12.04

What is Supabase Vector

Supabase Vector is a vector search feature utilizing the PostgreSQL extension pgvector. It enables semantic search and RAG (Retrieval Augmented Generation) needed for AI/ML applications on your existing PostgreSQL database.

Setup

Enabling pgvector

-- Enable pgvector extension
CREATE EXTENSION IF NOT EXISTS vector;

-- Create table with vector column
CREATE TABLE documents (
  id BIGSERIAL PRIMARY KEY,
  content TEXT,
  embedding VECTOR(1536), -- OpenAI ada-002 dimensions
  metadata JSONB,
  created_at TIMESTAMPTZ DEFAULT NOW()
);

Creating Index

-- IVFFlat index (fast, approximate search)
CREATE INDEX ON documents
USING ivfflat (embedding vector_cosine_ops)
WITH (lists = 100);

-- HNSW index (higher accuracy)
CREATE INDEX ON documents
USING hnsw (embedding vector_cosine_ops)
WITH (m = 16, ef_construction = 64);

Generating and Storing Vectors

Using OpenAI Embeddings

import { createClient } from '@supabase/supabase-js';
import OpenAI from 'openai';

const supabase = createClient(SUPABASE_URL, SUPABASE_KEY);
const openai = new OpenAI();

async function addDocument(content: string, metadata: object) {
  // Generate embedding
  const embeddingResponse = await openai.embeddings.create({
    model: 'text-embedding-ada-002',
    input: content
  });

  const embedding = embeddingResponse.data[0].embedding;

  // Save to Supabase
  const { error } = await supabase
    .from('documents')
    .insert({
      content,
      embedding,
      metadata
    });

  if (error) throw error;
}
async function searchSimilar(query: string, limit = 5) {
  // Generate embedding for query
  const embeddingResponse = await openai.embeddings.create({
    model: 'text-embedding-ada-002',
    input: query
  });

  const queryEmbedding = embeddingResponse.data[0].embedding;

  // Vector search
  const { data, error } = await supabase.rpc('match_documents', {
    query_embedding: queryEmbedding,
    match_threshold: 0.7,
    match_count: limit
  });

  return data;
}
CREATE OR REPLACE FUNCTION match_documents(
  query_embedding VECTOR(1536),
  match_threshold FLOAT,
  match_count INT
)
RETURNS TABLE (
  id BIGINT,
  content TEXT,
  metadata JSONB,
  similarity FLOAT
)
LANGUAGE plpgsql
AS $$
BEGIN
  RETURN QUERY
  SELECT
    documents.id,
    documents.content,
    documents.metadata,
    1 - (documents.embedding <=> query_embedding) AS similarity
  FROM documents
  WHERE 1 - (documents.embedding <=> query_embedding) > match_threshold
  ORDER BY documents.embedding <=> query_embedding
  LIMIT match_count;
END;
$$;

Building RAG Applications

async function askQuestion(question: string) {
  // 1. Search relevant documents
  const relevantDocs = await searchSimilar(question, 3);

  // 2. Build context
  const context = relevantDocs
    .map(doc => doc.content)
    .join('\n\n');

  // 3. Generate answer with LLM
  const response = await openai.chat.completions.create({
    model: 'gpt-4',
    messages: [
      {
        role: 'system',
        content: `Answer the question using the following context.
        If the information is not in the context, say "I don't know".

        Context:
        ${context}`
      },
      { role: 'user', content: question }
    ]
  });

  return {
    answer: response.choices[0].message.content,
    sources: relevantDocs
  };
}

Combine vector search with full-text search.

CREATE OR REPLACE FUNCTION hybrid_search(
  query_text TEXT,
  query_embedding VECTOR(1536),
  match_count INT
)
RETURNS TABLE (
  id BIGINT,
  content TEXT,
  similarity FLOAT
)
LANGUAGE plpgsql
AS $$
BEGIN
  RETURN QUERY
  SELECT
    d.id,
    d.content,
    (
      0.5 * (1 - (d.embedding <=> query_embedding)) +
      0.5 * ts_rank(to_tsvector('english', d.content), plainto_tsquery('english', query_text))
    ) AS similarity
  FROM documents d
  WHERE to_tsvector('english', d.content) @@ plainto_tsquery('english', query_text)
  ORDER BY similarity DESC
  LIMIT match_count;
END;
$$;

Edge Functions Integration

// supabase/functions/embed-and-search/index.ts
import { serve } from 'https://deno.land/std@0.168.0/http/server.ts';
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2';

serve(async (req) => {
  const { query } = await req.json();

  // Generate embedding with OpenAI API
  const embeddingRes = await fetch('https://api.openai.com/v1/embeddings', {
    method: 'POST',
    headers: {
      'Authorization': `Bearer ${Deno.env.get('OPENAI_API_KEY')}`,
      'Content-Type': 'application/json'
    },
    body: JSON.stringify({
      model: 'text-embedding-ada-002',
      input: query
    })
  });

  const { data } = await embeddingRes.json();
  const embedding = data[0].embedding;

  // Search with Supabase
  const supabase = createClient(
    Deno.env.get('SUPABASE_URL')!,
    Deno.env.get('SUPABASE_SERVICE_ROLE_KEY')!
  );

  const { data: results } = await supabase.rpc('match_documents', {
    query_embedding: embedding,
    match_threshold: 0.7,
    match_count: 5
  });

  return new Response(JSON.stringify(results), {
    headers: { 'Content-Type': 'application/json' }
  });
});

Performance Tips

SettingRecommended Value
IVFFlat listssqrt(row count)
HNSW m16-32
HNSW ef_construction64-128

Summary

Supabase Vector enables vector search while leveraging PostgreSQL’s strengths. You can add AI capabilities to your existing database, making RAG application development easier.

← Back to list