Gemini API#

In this notebook we use the model Gemini 1.5 Flash

def prompt_gemini(request, model="gemini-1.5-flash-001"):
    """Send a prompt to Google Gemini and return the response"""
    from google import generativeai as genai
    import os
    genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
    
    client = genai.GenerativeModel(model)
    result = client.generate_content(request)
    return result.text
prompt_gemini("Hello, what is the capital of France?")
'The capital of France is **Paris**. \n'

Exercise#

List the models available by google and try them out!

from google.generativeai import list_models

[m.name for m in list_models()]
['models/chat-bison-001',
 'models/text-bison-001',
 'models/embedding-gecko-001',
 'models/gemini-1.0-pro-latest',
 'models/gemini-1.0-pro',
 'models/gemini-pro',
 'models/gemini-1.0-pro-001',
 'models/gemini-1.0-pro-vision-latest',
 'models/gemini-pro-vision',
 'models/gemini-1.5-pro-latest',
 'models/gemini-1.5-pro-001',
 'models/gemini-1.5-pro-002',
 'models/gemini-1.5-pro',
 'models/gemini-1.5-pro-exp-0801',
 'models/gemini-1.5-pro-exp-0827',
 'models/gemini-1.5-flash-latest',
 'models/gemini-1.5-flash-001',
 'models/gemini-1.5-flash-001-tuning',
 'models/gemini-1.5-flash',
 'models/gemini-1.5-flash-exp-0827',
 'models/gemini-1.5-flash-002',
 'models/gemini-1.5-flash-8b',
 'models/gemini-1.5-flash-8b-001',
 'models/gemini-1.5-flash-8b-latest',
 'models/gemini-1.5-flash-8b-exp-0827',
 'models/gemini-1.5-flash-8b-exp-0924',
 'models/gemini-2.0-flash-exp',
 'models/gemini-exp-1206',
 'models/gemini-exp-1121',
 'models/gemini-exp-1114',
 'models/gemini-2.0-flash-thinking-exp',
 'models/gemini-2.0-flash-thinking-exp-1219',
 'models/learnlm-1.5-pro-experimental',
 'models/embedding-001',
 'models/text-embedding-004',
 'models/aqa']