This is the first assigned codelab on day four of the intensive. Download it here from Github to run locally or run in this Kaggle notebook.
"""Use Google Search in Generation
Google Gen AI 5-Day Intensive Course
Host: Kaggle
Day: 4
Codelab: https://www.kaggle.com/code/markishere/day-4-google-search-grounding
"""
import io
import os
from pprint import pprint
from google import genai
from google.api_core import retry
from google.genai import types
from IPython.display import HTML, Image, Markdown, display
client = genai.Client(api_key=os.environ["GOOGLE_API_KEY"])
# Define a retry policy. The model might make multiple consecutive calls automatically
# for a complex query, this ensures the client retries if it hits quota limits.
is_retriable = lambda e: (
isinstance(e, genai.errors.APIError) and e.code in {429, 503}
)
if not hasattr(genai.models.Models.generate_content, "__wrapped__"):
genai.models.Models.generate_content = retry.Retry(predicate=is_retriable)(
genai.models.Models.generate_content
)
# To enable search grounding, specify it as a tool 'google_search'
# as a parameter in `GenerateContentConfig` passed to `generate_content`
# Ask for information without search grounding
response = client.models.generate_content(
model="gemini-2.0-flash",
contents="When and where is Billie Eilish's next concert?",
)
Markdown(response.text)
# And now rerun the same query with search grounding enabled.
config_with_search = types.GenerateContentConfig(
tools=[types.Tool(google_search=types.GoogleSearch())]
)
def query_with_grounding():
response = client.models.generate_content(
model="gemini-2.0-flash",
contents="When and where is Billie Eilish's next concert?",
config=config_with_search,
)
return response
rc = query_with_grounding()
Markdown(rc.text)
# Response metadata
# Get links to search suggestions, supporting documents and information
# on how they were used.
while (
not rc.grounding_metadata.grounding_supports
or not rc.grounding_metadata.grounding_chunks
):
# If incomplete groundind data was returned, retry.
rc = query_with_grounding()
chunks = rc.grounding_metadata.grounding_chunks
for chunk in chunks:
print(f"{chunk.web.title}: {chunk.web.url}")
HTML(rc.grounding_metadata.search_entry_point.rendered_content)
supports = rc.grounding_metadata.grounding_supports
for support in supports:
pprint(support.to_json_dict())
markdown_buffer = io.StringIO()
# Print the text with footnote markers.
markdown_buffer.write("Supported text:\n\n")
for support in supports:
markdown_buffer.write(" * ")
markdown_buffer.write(
rc.content.parts[0].text[
support.segment.start_index : support.segment.end_index
]
)
for i in support.grounding_chunk_indices:
chunk = chunks[i].web
markdown_buffer.write(f"<sup>[{i + 1}]</sup>")
markdown_buffer.write("\n\n")
# Print footnotes.
markdown_buffer.write("Citations:\n\n")
for i, chunk in enumerate(chunks, start=1):
markdown_buffer.write(f"{i}. [{chunk.web.title}]({chunk.web.url})\n")
Markdown(markdown_buffer.getvalue())
# Search with tools
# Use Google search grounding and code generation tools
def show_response(response):
for p in response.candidates[0].content.parts:
if p.text:
display(Markdown(p.text))
elif p.inline_data:
display(Image(p.inline_data.data))
else:
print(p.to_json_dict())
display(Markdown('----'))
config_with_search = types.GenerateContentConfig(
tools=[types.Tool(google_search=types.GoogleSearch())],
temperature=0.0
)
chat = client.chats.create(model='gemini-2.0-flash')
response = chat.send_message(
message="What were the medal tallies, by top-10 countries, for the 2024 Olympics?",
config=config_with_search
)
show_response(response)
config_with_code = types.GenerateContentConfig(
tools=[types.Tool(code_execution=types.ToolCodeExecution())],
temperature=0.0
)
response = chat.send_message(
message="Now plot this as a Seaborn chart. Break out the medals too.",
config=config_with_code
)
show_response(response)
This is the first assigned codelab on day four of the intensive. Download it here from Github to run locally or run in this Kaggle notebook.
"""Tune Gemini Model for Custom Function
Google Gen AI 5-Day Intensive Course
Host: Kaggle
Day: 4
Codelab: https://www.kaggle.com/code/markishere/day-4-fine-tuning-a-custom-model
"""
import datetime
import email
import os
import re
import time
import warnings
from collections.abc import Iterable
import pandas as pd
import tqdm
from google import genai
from google.api_core import retry
from google.genai import types
from sklearn.datasets import fetch_20newsgroups
from tqdm.rich import tqdm as tqdmr
client = genai.Client(api_key=os.environ["GOOGLE_API_KEY"])
for model in client.models.list():
if "createTunedModel" in model.supported_actions:
print(model.name)
newgroups_train = fetch_20newsgroups(subset='train')
newgroups_test = fetch_20newsgroups(subset='test')
# View list of class names for dataset
newsgroups_train.target_names
print(newsgroups_train.date[0])
def preprocess_newsgroup_row(data):
# Extract only the subject and body.
msg = email.message_from_string(data)
text = f'{msg["Subject"]}\n\n{msg.get_payload()}'
# Strip any remaining email addresses
text = re.sub(r"[\w\.-]+@[\w\.-]+", "", text)
# Truncate the text to fit within the input limits
text = text[:40000]
return text
def preprocess_newsgroup_data(newsgroup_dataset):
# Put the points into a DataFrame
df = pd.DataFrame(
{
'Text': newsgroup_dataset.data,
'Label': newsgroup_dataset.target
}
)
# Clean up the text
df['Text'] = df['Text'].apply(preprocess_newsgroup_row)
# Match label to target name index
df['Class Name'] = df['Label'].map(lambda l: newsgroup_dataset.target_names[l])
return df
# Apply preprocessing to training and test datasets
df_train = preprocess_newsgroup_data(newgroups_train)
df_test = preprocess_newsgroup_data(newgroups_test)
df_train.head()
def sample_data(df, num_samples, classes_to_keep):
# Sample rows, selecting num_samples of each label.
df = (
df.groupby('Label')[df.columns]
.apply(lambda x: x.sample(num_samples))
.reset_index(drop=True)
)
df = df[df['Class Name'].str.contains(classes_to_keep)]
df['Class Name'] = df['Class Name'].astype('category')
return df
TRAIN_NUM_SAMPLES = 50
TEST_NUM_SAMPLES = 10
# Keep rec.* and sci.*
CLASSES_TO_KEEP = '^rec|^sci'
df_train = sample_data(df_train, TRAIN_NUM_SAMPLES, CLASSES_TO_KEEP)
df_test = sample_data(df_test, TEST_NUM_SAMPLES, CLASSES_TO_KEEP)
# Evaluate baseline performance
sample_idx = 0
sample_row = preprocess_newsgroup_row(newsgroups_test.data[sample_idx])
sample_label = newsgroups_test.target_names[newsgroups_test.target[sample_idx]]
print(sample_row)
print('---')
print('Label:', sample_label)
response = client.models.generate_content(
model='gemini-1.5-flash-001',
contents=sample_row
)
print(response.text)
# Ask the model directly in a zero-shot prompt.
prompt = "From what newsgroup does the following message originate?"
baseline_response = client.models.generate_content(
model="gemini-1.5-flash-001",
contents=[prompt, sample_row])
print(baseline_response.text)
# You can use a system instruction to do more direct prompting, and get a
# more succinct answer.
system_instruct = """
You are a classification service. You will be passed input that represents
a newsgroup post and you must respond with the newsgroup from which the post
originates.
"""
# Define a helper to retry when per-minute quota is reached.
is_retriable = lambda e: (isinstance(e, genai.errors.APIError) and e.code in {429, 503})
# If you want to evaluate your own technique, replace this body of this function
# with your model, prompt and other code and return the predicted answer.
@retry.Retry(predicate=is_retriable)
def predict_label(post: str) -> str:
response = client.models.generate_content(
model="gemini-1.5-flash-001",
config=types.GenerateContentConfig(
system_instruction=system_instruct),
contents=post)
rc = response.candidates[0]
# Any errors, filters, recitation, etc we can mark as a general error
if rc.finish_reason.name != "STOP":
return "(error)"
else:
# Clean up the response.
return response.text.strip()
prediction = predict_label(sample_row)
print(prediction)
print()
print("Correct!" if prediction == sample_label else "Incorrect.")
# Enable tqdm features on Pandas.
tqdmr.pandas()
# But suppress the experimental warning
warnings.filterwarnings("ignore", category=tqdm.TqdmExperimentalWarning)
# Further sample the test data to be mindful of the free-tier quota.
df_baseline_eval = sample_data(df_test, 2, '.*')
# Make predictions using the sampled data.
df_baseline_eval['Prediction'] = df_baseline_eval['Text'].progress_apply(predict_label)
# And calculate the accuracy.
accuracy = (df_baseline_eval["Class Name"] == df_baseline_eval["Prediction"]).sum() / len(df_baseline_eval)
print(f"Accuracy: {accuracy:.2%}")
# Tune a custom model
# Convert the data frame into a dataset suitable for tuning.
input_data = {'examples':
df_train[['Text', 'Class Name']]
.rename(columns={'Text': 'textInput', 'Class Name': 'output'})
.to_dict(orient='records')
}
# If you are re-running this lab, add your model_id here.
model_id = None
# Or try and find a recent tuning job.
if not model_id:
queued_model = None
# Newest models first.
for m in reversed(client.tunings.list()):
# Only look at newsgroup classification models.
if m.name.startswith('tunedModels/newsgroup-classification-model'):
# If there is a completed model, use the first (newest) one.
if m.state.name == 'JOB_STATE_SUCCEEDED':
model_id = m.name
print('Found existing tuned model to reuse.')
break
elif m.state.name == 'JOB_STATE_RUNNING' and not queued_model:
# If there's a model still queued, remember the most recent one.
queued_model = m.name
else:
if queued_model:
model_id = queued_model
print('Found queued model, still waiting.')
# Upload the training data and queue the tuning job.
if not model_id:
tuning_op = client.tunings.tune(
base_model="models/gemini-1.5-flash-001-tuning",
training_dataset=input_data,
config=types.CreateTuningJobConfig(
tuned_model_display_name="Newsgroup classification model",
batch_size=16,
epoch_count=2,
),
)
print(tuning_op.state)
model_id = tuning_op.name
print(model_id)
MAX_WAIT = datetime.timedelta(minutes=10)
while not (tuned_model := client.tunings.get(name=model_id)).has_ended:
print(tuned_model.state)
time.sleep(60)
# Don't wait too long. Use a public model if this is going to take a while.
if datetime.datetime.now(datetime.timezone.utc) - tuned_model.create_time > MAX_WAIT:
print("Taking a shortcut, using a previously prepared model.")
model_id = "tunedModels/newsgroup-classification-model-ltenbi1b"
tuned_model = client.tunings.get(name=model_id)
break
print(f"Done! The model state is: {tuned_model.state.name}")
if not tuned_model.has_succeeded and tuned_model.error:
print("Error:", tuned_model.error)
# Use the new model
new_text = """
First-timer looking to get out of here.
Hi, I'm writing about my interest in travelling to the outer limits!
What kind of craft can I buy? What is easiest to access from this 3rd rock?
Let me know how to do that please.
"""
response = client.models.generate_content(
model=model_id, contents=new_text)
print(response.text)
@retry.Retry(predicate=is_retriable)
def classify_text(text: str) -> str:
"""Classify the provided text into a known newsgroup."""
response = client.models.generate_content(
model=model_id,
contents=text)
rc = response.candidates[0]
# Any errors, filters, recitation, etc we can mark as a general error
if rc.finish_reason.name != "STOP":
return "(error)"
else:
return rc.content.parts[0].text
# The sampling here is just to minimise your quota usage. If you can, you should
# evaluate the whole test set with `df_model_eval = df_test.copy()`.
df_model_eval = sample_data(df_test, 4, '.*')
df_model_eval["Prediction"] = df_model_eval["Text"].progress_apply(classify_text)
accuracy = (df_model_eval["Class Name"] == df_model_eval["Prediction"]).sum() / len(df_model_eval)
print(f"Accuracy: {accuracy:.2%}")
# Compare token usage
# Calculate the *input* cost of the baseline model with system instructions.
sysint_tokens = client.models.count_tokens(
model='gemini-1.5-flash-001', contents=[system_instruct, sample_row]
).total_tokens
print(f'System instructed baseline model: {sysint_tokens} (input)')
# Calculate the input cost of the tuned model.
tuned_tokens = client.models.count_tokens(model=tuned_model.base_model, contents=sample_row).total_tokens
print(f'Tuned model: {tuned_tokens} (input)')
savings = (sysint_tokens - tuned_tokens) / tuned_tokens
print(f'Token savings: {savings:.2%}') # Note that this is only n=1.
# Tweak output token quantity
baseline_token_output = baseline_response.usage_metadata.candidates_token_count
print('Baseline (verbose) output tokens:', baseline_token_output)
tuned_model_output = client.models.generate_content(
model=model_id, contents=sample_row)
tuned_tokens_output = tuned_model_output.usage_metadata.candidates_token_count
print('Tuned output tokens:', tuned_tokens_output)
Je ne sais pas pourquoi, mais les leçons françaises sont un peu difficile aujourd’hui. J’avais des problèmes à écouter et penser en français. Alors, j’ai quittée tôt. Peut être j’essayerai plus tard aujourd’hui. Je n’ai pas pu me concentrer.
Because of the effort and time required to create and scale up a new website, sometimes it’s better to start small. There are many reasons to do this, including money, testing a concept, or simply wanting to start small. Then, as the blogs grow and take on their own identity, you can re-evaluate the strategy or migrate them to their website.
Whatever the case, a WordPress CMS blog gives you the flexibility to share multiple blogs on one site. It simplifies your blog by providing a preconfigured framework for adding content. This means that anyone can quickly launch a website without knowing HTML and CSS.
This strategy is being used for this website because I was unsure of what content type or how much I would write on each blog: Coding or French Fluency.
So, it made sense to start both blogs hosted on the same site. Then as they evolve and grow, evaluate them and make a decision about mid and long-term hosting. This is easy enough to do, but does require some consideration.
The Big Picture
By default, each WordPress site is configured to host one blog and one home page. But because of the platform’s flexibility, multiple sites can be hosted with a few tweaks. Because WordPress does preconfigure a site’s framework, to host multiple sites, you only have to consider these key WordPress features to get started: widgets, menus, and pages.
To share two blogs on one site, you must define these three areas for each blog:
Widgets, such as sidebar widgets, contain information relevant to the site, such as recent posts, pages, and search.
Menus are used for navigation around your site. It’s how visitors find your content.
Pages are used to showcase your content, including posts, which are a special type of page.
Pages
Let’s look at what WordPress features help you show your blog to visitors.
The first is that your blog post feed defaults to any blank page, which, once published and configured as your blog posts page, will display all posts published on your site. The name of the page doesn’t matter; you can use any name, but it’s commonly named Blog.
When you specify this page as your blog posts page in Settings|Reading, anytime this page is loaded it will display all of your posts. As you create posts, they will appear on this page in reverse chronological order (newest to oldest).
This default option can no longer be used if your site hosts multiple pages.
Menus
Depending on your theme, WordPress allows you to create custom menus, and as such, there should be a set of menus per blog. The menu on the home page can be inclusive to let new visitors know what’s available. Still, ideally, once they navigate into the content for either blog, the menus should be blog-specific.
Widgets
Widgets are units of code that allow WordPress site owners to quickly display a list of pages, tags, and categories, to name a few. Each widget offers customizations such that you can control what content is visible. For example, a categories widget can be filtered to display specific categories or subcategories.
Keeping our focus on these key areas, we’ll explore a strategy for hosting multiple blogs on one WordPress site.
A Simple Strategy
To configure a multi-blog WordPress website, the default configuration has to be bypassed so that your posts from each blog will appear separately. The simplest strategy is to use categories, pages, menus, and widgets. For this example, we’ll use a scenario where you want to host two blogs on one WordPress CMS site.
Categories
Thinking about your content, consider what master categories you will use for each blog. There can be subcategories under each master, but having two master categories, one per blog, is key. On my site, the two master categories I created are ‘Coding’ and ‘Français’. When I publish a new post of each type, it’s assigned to either category.
You can create subcategories under each main category, and they will also appear in the list of posts for the master category to which they belong. As you write posts, assign them to at least a master category. You don’t need defined subcategories to separate the content for both blogs.
Then use these categories in blog-specific widgets for all relevant pages. To maintain the separation, all pages and links for each blog should be focused. That means creating a sidebar widget and menus for each blog.
Menus
Header Menu
Starting with the header menu, create custom header menu items, one for each blog. These additional menu items will be in addition to those you already have, such as a link to your home and about me pages. Starting with a unified header menu is the simplest option, as creating multiple header templates may require editing PHP code.
The goal is to ensure that once a visitor engages with one of the blogs, the header menu will allow them to navigate to your other blog(s) or the home page. Once a user navigates away from the home page, they will have this global header menu available.
For example, on my home page, the header menu bar contains a link to posts from my coding and French-language blogs. Clicking on each menu item takes you to a page that only contains posts for that category.
Widgets
The ease with which you can create new sidebars depends on your theme. Some themes are configured to allow multiple widget sidebars, while others are not. For my site, I’m using the Twenty Seventeen theme because I like its clean, simple design. However, it only allows one sidebar. To create multiple ones, I had to modify the theme’s PHP code. If this is the case with your site, contact me, and I’ll try to provide assistance for your theme.
Pages
The third component to consider in our multi-blog strategy is Pages, which will require consideration and planning. Without the default Blog page, you will have to create two new pages, one for each blog, with a Latest Posts block to display posts by the master categories you established earlier. For each page, specify one master category to use, even if it has subcategories.
Any links you share for each blog should direct visitors to only the content you want them to see. With this strategy, there will be one home page, but since you can make it static, you can design the page to introduce both of your blogs. This strategy will eliminate the use of the default blog page configuration that WordPress expects in settings.
The home page should be configured as static in Settings>Reading. This will allow you to add any content you desire. For example, you could make the homepage be the home page for your priority business. Or, you could make the home page an introduction to both blogs.
If you create a new page, it should be specific for each blog and include the relevant widgets and links. Because pages can’t be assigned to a category, you will have to manually populate it with relevant content. Be mindful that WordPress allows you to create pages similar to posts using blocks, so you don’t have to learn HTML or CSS.
On this site, I added a posts page for each blog and used the Latest Posts block to show recent posts. For my French blog, I created this page which contains my French language posts in reverse chronological order. For the coding blog, I created this one.
Posts
When I create a new post, I use add links to recent posts filtered by category at the end of each post. My site is fairly new so I expect that over time I can add more related content blocks to a post. This way, when a visitor wants to see more content from one of the blogs, they can use either the sidebar widgets or the latest posts content block. Of course, you can also include links to related content anywhere within the post, not just at the bottom or in the sidebar.
Specifying Sidebar Content
Each page and post for both blogs should contain topic-specific sidebar widgets, menus, and content. Depending on your theme, you may need additional plugins such as Content Aware Sidebars. Content Aware Sidebars lets you specify the content of existing sidebars. It doesn’t create them for you though, so if your theme doesn’t contain a sidebar, you’ll have to edit the theme.
Here you can see the sidebar content specified for posts and pages for each blog.
Then, within the Coding Page Sidebar, a condition and an action have been created.
Content Aware Sidebars configuration can be bit confusing at first, so the included screenshots should be helpful to understanding how to create a sidebar configuration. The plug-in does come with many more options, but it’s best to start simple then customize it as needed.
A Note of Caution
The amount of content that you can add to either blog depends on your hosting plan. If your plan allows a small amount of space but your blogs will contain video-rich content, then you may have to either upgrade your hosting plan or split your blogs into different sites sooner rather than later. If it’s a text and image-based site, you’ll have more time to develop your blogs before deciding to split them up.
Conclusion
You’ve seen one idea for how to separate multiple blogs on one WordPress site. The solution you create will most likely look different from mine because it depends on your theme and how far you push your customizations.
Whether technically savvy or not, the main goal is to control the sidebars, pages, posts and content that visitors will see. If you can devise a strategy for managing them you will be able to host multiple blogs with a reasonable amount of increased administrative overhead. Once you have these basic features configured, then you dig deeper into other parts of your blog such as your homepage and header menus.
Je ne sais pas pourquoi, mais les leçons françaises sont un peu difficile aujourd’hui. J’avais des problèmes à écouter et penser en français. Alors, j’ai quittée tôt. Peut être j’essayerai plus tard aujourd’hui. Je n’ai pas pu me concentrer.
Je prenais du temps à comprendre les expressions aujourd’hui comme <l’examen de demain>, <c’est plus facile de + infinitif> et <Je la lui donne>. Ces expressions sont unique en français. Je les trouvais un peu difficile à comprendre. Mais maintenant, je les comprends plus mieux.
Expressions
Notes
Tu es prête pour <l’examen de demain>?
<l’examen de demain> est le même que <l’examen demain>.
On dit ‘à quelque distance (km, m, etc)’ au lieu de seulement ‘quelque distance’.
J’ai conduit <partout> dans cette ville.
Je pouvais rarement sortir
Le même que ‘je sortais rarement’
Avant, il n’y avait pas l’électricité chez nous
‘Pas l‘électricité’ est il n’a jamais d’électricité. Mais ‘pas d’électricité’ Est quand il y a déjà l’électricité’ mais maintenant ça ne marche pas.
Mais maintenant, c’est plus facile de conduire une voiture ou de rester à la maison avec un ordinateur.
On utilise la préposition ‘de’ avant d’un verbe infinitif suivi d’un complément. Par exemple, il est facile à lire’ contre ‘il est facile de lire ce livre’.
Oui, je vais <t’envoyer le lien>
Ici, <le lien> est un URL.
Mon fils aime ce jeu alors <je le lui offrir>
Voir en bas. Les prénoms <lui> et <leur> suivent les prénoms <le>, <la>, <les> avant le verbe
Si tu veux ce magazine, <je te le donne>
La même qu’avant
Je lui donne <la pomme>. Je <la lui> donne.
Les prénoms me, te, nous, et vous sont viennent avant le, la et les. Mais lui et leur sont après la, le et les. Cette différence m’est importante!
Chaque dimanche, j’achète ce magazine hebdomadaire
J’ai continué étudier l’imparfait est l’orthographe des verbes imparfaits. J’ai comparé le présent, le passé composé et l’imparfait. Par exemple, le différence entre ‘elle va’, ‘elle est allée’ et ‘elle allait’ pour le verbe ‘aller’. Je comprends plus mieux aujourd’hui, qu’avant, mais je veux apprendre l’orthographe de tout.
Français
Les notes
Anglais
Avant, je ne lisais pas de livres numériques
Les livres numériques n’existent pas.
Nous n’avions pas de machine à laver quand j’étais petit. <Tu faisais comment> alors pour laver tes vêtements?
Comment on faisait n’importe quoi.
Avant, je prenais rarement la voiture
L’adverbe ‘rarement’ est après le verbe.
À mon époque, <on prenait le temps>
‘On prenait le temps est une bonne expression à connaître.
Vous étiez beaux à cette époque, et vous êtes toujours très beaux!
Tu as raison. <Je devrais> apprécier la nature.
<On devrait> mange moins sucre.
<Tout à coup>, Vikram entend un bruit dehors
Quelques chose qui se passe très vite maintenant
Fais attention!
Il fait vraiment nuit!
Il est déjà nuit
<Tu veux dire> … deux pizzas!
Je veux dire …
d’à côté
près de quelque chose
réduire
Faire moins
Elle voyageait beaucoup
L’orthographe de ‘voyageait’ avec le ‘e’ entre le ‘g’ et ‘a’.
Avant, cet auteur écrivait ses livres avec un stylo et <cela prenait trop de temps>. Avec son nouvel ordinateur, il va vite et il est content.
J’ai commencé apprendre ‘l’imparfait’. Par exemple, ‘être’ en imparfait est j’étais, tu étais, il/elle était, nous étions, vous étiez, nous étaient. Pour ‘avoir’, c’est j’avais, tu avais, il/elle avait, nous avions, vous aviez, ils/elles avaient. Avec le -er verbe ‘aller’ c’est j’allais, tu allais, il/elle allait, nous allions, vous alliez, ils/elles allaient.
French
English
Notes
<Ça te dit de> venir avec nous ?
Tu désires ou tu veux. On peut dire aussi ‘que tu dit?’ et ‘Comment environ’
<Ça vous dit de> venir avec nous ?
Il trouve toujours de l’argent dans la rue. <Il a de la chance.>
<Nous allons louer> une maison pendant une semaine
payer pour rester dans la maison
Je suis arrivé <en avance> à la gare.
avant l’heure
Souhaiter quelque chose.
je souhaite, tu souhaites, il/elle souhaite, vous souhaitons, ils/elles souhaitent
Nous <y sommes allés> en train
Nous <sommes allés là> en train
Alors, avez-vous passé du temps <en Europe de l’Est>?
Quand on <fait une randonnée>, on se promène longtemps, en général à pied.
Faire une randonnée: ‘prendre une randonnée; Faire de la randonnée: ‘Je vais faire de la randonnée’
J’adore <skier>
faire du ski vs skier: J’aime beaucoup skier. Je fais du ski bientôt.
Vous connaissez <les Pyrénées>?
Je veux aller skier en France cet hiver. D’accord. Ça te dit d’aller dans les Pyrénées?
Nous allons dans <les Alpes françaises>
Nous avons <un pneu crevé>! Et nous n’avons pas de <roue de secours>.
un pneu sans l’air;
Et tu <m’as ouvert> la porte. Tu <m’as surprise> avec des fleurs!
En passé composé
On y est! Entrons.
C’est ça; ici allons y;
sentiers
On fait de la randonnée sur sentiers.
Les vues sont incroyable du sommet
Il faut lui souhaiter bon chance
vs Tu dois lui souhaiter bon chance
Ça prend
sourire
à mon époque, on faisait moins de vélo que maintenant
l’imparfait:
Nous étions jeunes et beaux
Nous avion une voiture jaune quand nous étions plus jeunes
Ils pouvaient danser <pendant des heures> quand ils avaient vingt ans.
Avant, il faisaient la cuisine chez eux
La machine à laver et le micro-ondes sont <des appareil électriques> de la maison.
Quand j’étais petit, mes parent avaient seulement un appareil électrique chez eux.
À mon époque, On faisait moins de vélo que maintenant
J’ai commencé apprendre ‘l’imparfait’. Par exemple, ‘être’ en imparfait est j’étais, tu étais, il/elle était, nous étions, vous étiez, nous étaient. Pour ‘avoir’, c’est j’avais, tu avais, il/elle avait, nous avions, vous aviez, ils/elles avaient. Avec le -er verbe ‘aller’ c’est j’allais, tu allais, il/elle allait, nous allions, vous alliez, ils/elles allaient.
French
English
Notes
<Ça te dit de> venir avec nous ?
Tu désires ou tu veux. On peut dire aussi ‘que tu dit?’ et ‘Comment environ’
<Ça vous dit de> venir avec nous ?
Il trouve toujours de l’argent dans la rue. <Il a de la chance.>
<Nous allons louer> une maison pendant une semaine
payer pour rester dans la maison
Je suis arrivé <en avance> à la gare.
avant l’heure
Souhaiter quelque chose.
je souhaite, tu souhaites, il/elle souhaite, vous souhaitons, ils/elles souhaitent
Nous <y sommes allés> en train
Nous <sommes allés là> en train
Alors, avez-vous passé du temps <en Europe de l’Est>?
Quand on <fait une randonnée>, on se promène longtemps, en général à pied.
Faire une randonnée: ‘prendre une randonnée; Faire de la randonnée: ‘Je vais faire de la randonnée’
J’adore <skier>
faire du ski vs skier: J’aime beaucoup skier. Je fais du ski bientôt.
Vous connaissez <les Pyrénées>?
Je veux aller skier en France cet hiver. D’accord. Ça te dit d’aller dans les Pyrénées?
Nous allons dans <les Alpes françaises>
Nous avons <un pneu crevé>! Et nous n’avons pas de <roue de secours>.
un pneu sans l’air;
Et tu <m’as ouvert> la porte. Tu <m’as surprise> avec des fleurs!
En passé composé
On y est! Entrons.
C’est ça; ici allons y;
sentiers
On fait de la randonnée sur sentiers.
Les vues sont incroyable du sommet
Il faut lui souhaiter bon chance
vs Tu dois lui souhaiter bon chance
Ça prend
sourire
à mon époque, on faisait moins de vélo que maintenant
l’imparfait:
Nous étions jeunes et beaux
Nous avion une voiture jaune quand nous étions plus jeunes
Ils pouvaient danser <pendant des heures> quand ils avaient vingt ans.
Avant, il faisaient la cuisine chez eux
La machine à laver et le micro-ondes sont <des appareil électriques> de la maison.
Quand j’étais petit, mes parent avaient seulement un appareil électrique chez eux.
À mon époque, On faisait moins de vélo que maintenant
How to build an agent using LangGraph. Try code locally by downloading from Github or run it in a Kaggle notebook.
"""
Build an agent with LangGraph
Google Gen AI 5-Day Intensive Course
Host: Kaggle
Day: 3
Codelab: https://www.kaggle.com/code/markishere/day-3-building-an-agent-with-langgraph/#IMPORTANT!
"""
from pprint import pprint
from typing import Annotated, Literal
from google import genai
from IPython.display import Image
from langchain_core.messages.ai import AIMessage
from langchain_core.tools import tool
from langchain_google_genai import ChatGoogleGenerativeAI
from langgraph.graph import END, START, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode
from typing_extensions import TypedDict
from collections.abc import Iterable
from random import randint
from langchain_core.messages.tool import ToolMessage
class OrderState(TypedDict):
"""State representing the customer's order conversation."""
# Preserves the conversaton history between nodes.
# The 'add messages' annotation indicates to LangGraphthat state
# is updated by appending returned messages, not replacing them.
messages: Annotated[list, add_messages]
# The customer's in-progress order.
order: list[str]
# Flag indicating that the order is placed and completed.
finished: bool
# The system instruction defines how the chatbot is expected to behave
# and includes rules for when to call different functions,
# as well as rules for the conversation, such as tone and what is permitted
# for discussion.
BARISTABOT_SYSINT = (
# 'system' indicates the message is a system instruction.
"system",
"You are a BaristaBot, an interactive cafe ordering system. A human will talk to you about the "
"available products you have and you will answer any questions about menu items (and only about "
"menu items - no off-topic discussion, but you can chat about the products and their history). "
"The customer will place an order for 1 or more items from the menu, which you will structure "
"and send to the ordering system after confirming the order with the human. "
"\n\n"
"Add items to the customer's order with add_to_order, and reset the order with clear_order. "
"To see the contents of the order so far, call get_order (this is shown to you, not the user) "
"Always confirm_order with the user (double-check) before calling place_order. Calling confirm_order will "
"display the order items to the user and returns their response to seeing the list. Their response may contain modifications. "
"Always verify and respond with drink and modifier names from the MENU before adding them to the order. "
"If you are unsure a drink or modifier matches those on the MENU, ask a question to clarify or redirect. "
"You only have the modifiers listed on the menu. "
"Once the customer has finished ordering items, Call confirm_order to ensure it is correct then make "
"any necessary updates and then call place_order. Once place_order has returned, thank the user and "
"say goodbye!"
"\n\n"
"If any of the tools are unavailable, you can break the fourth wall and tell the user that "
"they have not implemented them yet and should keep reading to do so.",
)
# This is the message with which the system opens the conversation.
WELCOME_MSG = "Welcome to the BaristaBot cafe. Type `q` to quit. How may I serve you today?"
# Define chatbot node
# This node will represent a single turn in a chat conversation
#
# Try using different models. The Gemini 2.0 flash model is highly capable, great with tools,
# and has a generous free tier. If you try the older 1.5 models, note that the 'pro' models are
# better at complex multi-tool cases like this, but the 'flas' models are faster and have more
# free quota.
#
# Check out the features and quota differences here:
# - https://ai.google.dev/gemini-api/docs/models/gemini
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
def chatbot(state: OrderState) -> OrderState:
"""The chatbot itself. A simple wrapper around the model's own chat interface."""
message_history = [BARISTABOT_SYSINT] + state["messages"]
return {"messages": [llm.invoke(message_history)]}
# Set up the initial graph based on our state definition.
graph_builder = StateGraph(OrderState)
# Add the chatbot function to the app graph as a node called 'chatbot'.
graph_builder.add_node("chatbot", chatbot)
# Define the chatbot node as the app entrypoint.
graph_builder.add_edge(START, "chatbot")
chat_graph = graph_builder.compile()
# Render the graph to visualize it.
Image(chat_graph.get_graph().draw_mermaid_png())
# The defined graph only as one node.
# So the chat will begin at __start__, execute the chatbot node and terminate.
user_msg = "Hello, what can you do?"
state = chat_graph.invoke({"messages": [user_msg]})
# The state object contains lots of informaton. Uncomment the pprint lines to see it all.
pprint(state)
# Note that the final state now has 2 messages. Our HumanMessage, and an additional AIMessage.
for msg in state["messages"]:
print(f"{type(msg).__name__}: {msg.content}")
# Can be executed as Python loop
# Here it is manually invoked once
user_msg2 = "Oh great, what kinds of latte can you make?"
state["messages"].append(user_msg2)
state = chat_graph.invoke(state)
# pprint(state)
for msg in state["messages"]:
print(f"{type(msg).__name__}: {msg.content}")
# Add a human node
# LangGraph can be looped between nodes
# This node will display the last message from the LLM to the user,
# then prompt them for their next input.
def human_node(state: OrderState) -> OrderState:
"""Display the last model message to the user and get the user's input."""
last_msg = state["message"][-1]
print("Model:", last_msg.content)
user_input = input("User: ")
# If it looks like the user is trying to quit, flag the conversaton as over.
if user_input in {"q", "quit", "exit", "goodbye"}:
state["finished"] = True
return state | {"messages": [("user", user_input)]}
def chatbot_with_welcome_msg(state: OrderState) -> OrderState:
"""The chatbot itself. A wrapper around the model's own chat interface."""
if state["messages"]:
# If there are messages, continue the conversation with the Gemini model.
new_output = llm.invoke([BARISTABOT_SYSINT]) + state["messages"]
else:
# If there are no messages, start with the welcome message.
new_output = AIMessage(content=WELCOME_MSG)
return state | {"messages": [new_output]}
# Start building a new graph.
graph_builder = StateGraph(OrderState)
# Add the chatbot and human nodes to the app graph.
graph_builder.add_node("chatbot", chatbot_with_welcome_msg)
graph_builder.add_node("human", human_node)
# Start with the chatbot again.
graph_builder.add_edge(START, "chatbot")
# The chatbot will always go to the human next.
graph_builder.add_edge("chatbot", "human")
# Create a conditional edge
def maybe_exit_human_node(state: OrderState) -> Literal["chatbot", "__end__"]:
"""Route to the chatbot, unless it looks like the user is exiting."""
if state.get("finished", False):
return END
else:
return "chatbot"
graph_builder.add_conditional_edges("human", maybe_exit_human_node)
chat_with_human_graph = graph_builder.compile()
Image(chat_with_human_graph.get_graph().draw_mermaid_png)
# The default recursion limit for traversing nodes is 25 - setting it higher means
# you can try a more complex order with multiple steps and round-trips and you can chat for longer!
config = {"recursion_limit": 100}
# Remember that this will loop forever, unless you input 'q', 'quit' or one of the other exit terms
# defined in 'human_node'.
# Uncomment this line to execute the graph:
# state = chat_with_human_graph.invoke({"messages": []}, config)
#
# Things to try:
# - Just chat! There's no ordering or menu yet.
# - 'q' to exit.
pprint(state)
# Add a "live" menu
# To create a dynamic menu to respond to changing stock levels
# There are two types of tools: stateless and stateful
# Stateless tools run automatically: get current menu: it doesn't make changes
# Stateful tools modify the order
# In LangGraph Python functions can be annotated as tools by applying @tools annotation
@tool
def get_menu() -> str:
"""Provide the latest up-to-date menu."""
# Note that this is just hard-coded text, but you could connect this to a live stock
# database, or you could use Gemini's multi-modal capabilities and take like photos
# of your cafe's chalk menu or the products on the counter, and assemble them into an input.
return """
MENU:
Coffee Drinks:
Espresso
Americano
Cold Brew
Coffee Drinks with Milk:
Latte
Cappuccino
Cortado
Macchiato
Mocha
Flat White
Tea Drinks:
English Breakfast Tea
Green Tea
Earl Grey
Tea Drinks with Milk:
Chai Latte
Matcha Latte
London Fog
Other Drinks:
Steamer
Hot Chocolate
Modifiers:
Milk options: Whole, 2%, Oat, Almond, 2% Lactose Free; Default option: whole
Espresso shots: Single, Double, Triple, Quadruple; default: Double
Caffeine: Decaf, Regular; default: Regular
Hot-Iced: Hot, Iced; Default: Hot
Sweeteners (option to add one or more): vanilla sweetener, hazelnut sweetener, caramel sauce, chocolate sauce, sugar free vanilla sweetener
Special requests: any reasonable modification that does not involve items not on the menu, for example: 'extra hot', 'one pump', 'half caff', 'extra foam', etc.
"dirty" means add a shot of espresso to a drink that doesn't usually have it, like "Dirty Chai Latte".
"Regular milk" is the same as 'whole milk'.
"Sweetened" means add some regular sugar, not a sweetener.
Soy milk has run out of stock today, so soy is not available.
"""
# Add the tool to the graph
# Define the tools and create a "tools" node.
tools = [get_menu]
tool_node = ToolNode(tools)
# Attach the tools to the model so that it knows what it can call.
llm_with_tools = llm.bind_tools(tools)
def maybe_route_to_tools(state: OrderState) -> Literal["tools", "human"]:
"""Route between human or tool nodes, depending if a tool call is made."""
if not (msgs := state.get("messages", [])):
raise ValueError(f"No messages found when parsing state: {state}")
# Only route based on the last message.
msg = msgs[-1]
# When the chatbot returns tool_calls, rout to the "tools" node.
if hasattr(msg, "tool_calls") and len(msg.tool_calls) > 0:
return "tools"
else:
return "human"
def chatbot_with_tools(state: OrderState) -> OrderState:
"""The chatbot with tools. A simple wrapper around the model's own chat interface."""
defaults = {"order": [], "finished": False}
if state["messages"]:
new_output = llm_with_tools.invoke(
[BARISTABOT_SYSINT] + state["messages"]
)
else:
new_output = AIMessage(content=WELCOME_MSG)
# Set up some defaults if not already set, then pass through the provided state,
# overriding only the "messages" field.
return defaults | state | {"messages": [new_output]}
graph_builder = StateGraph(OrderState)
# Add the nodes, including the new tool_node.
graph_builder.add_node("chatbot", chatbot_with_tools)
graph_builder.add_node("human", human_node)
graph_builder.add_node("tools", tool_node)
# Chatbot may go to tools, or human.
graph_builder.add_conditional_edges("chatbot", maybe_route_to_tools)
# Human may go back to chatbot, or exit.
graph_builder.add_conditional_edges("human", maybe_exit_human_node)
# Tools always route back to chat afterwards.
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
graph_with_menu = graph_builder.compile()
Image(graph_with_menu.get_graph().draw_mermaid_png())
# Remember that you have not implemented ordering yet, so this will loop forever,
# unless you input `q`, `quit` or one of the other exit terms defined in the
# `human_node`.
# Uncomment this line to execute the graph:
state = graph_with_menu.invoke({"messages": []}, config)
# Things to try:
# - I'd love an espresso drink, what have you got?
# - What teas do you have?
# - Can you do a long black? (this is on the menu as an "Americano" - see if it can
# figure it out)
# - 'q' to exit.
pprint(state)
# Handle orders
# Update state to track an order and provide simple tools that update the state.
# You will need to be explicit as the model should not directly have access to
# the app's internal state.
#
# These functions have no body; LangGraph does not allow @tools to update the
# conversation state, so you will implement a separate node to handle state
# updates.
@tool
def add_to_order(drink: str, modifiers: Iterable[str]) -> str:
"""Adds the specified drink to the customer's order, including any modifiers.
Returns:
The updated order in progress.
"""
@tool
def confirm_order() -> str:
"""Asks customer if the order is correct.
Returns:
The user's free-text response.
"""
@tool
def get_order() -> str:
"""Returns the users order so far. One item per line."""
@tool
def clear_order():
"""Removes all items from the user's order."""
@tool
def place_order() -> int:
"""Sends the order to the barista for fulfillment.
Returns:
The estimated number of minutes until the order is ready.
"""
def order_node(state: OrderState) -> OrderState:
"""The ordering node. This is where the order state is manipulated."""
tool_msg = state.get("messages", [])[-1]
order = state.get("order", [])
outbound_msgs = []
order_placed = False
for tool_call in tool_msg.tool_calls:
if tool_call["name"] == "add_to_order":
# Each order item is just a string. This is where it is assembled.
# as "drink (modifiers, ...)".
modifiers = tool_call["args"]["modifiers"]
modifier_str = ", ".join(modifiers) if modifiers else "no modifiers"
order.append(f"{tool_call["args"]["drink"]} ({modifier_str}))
response = "\n".join(order)
elif tool_call["name"] == "confirm_order":
# We could entrust the LLM to do order confirmation, but it is a good practice to
# show the user the exact data that comprises their order so that what they confirm
# precisely matches the order that goes to the kitchen - avoiding hallucination
# or reality skew.
# In a real scenario, this is where you would connect your POS screen to show the
# order to the user.
print("Your order:")
if not order:
print(" (no items)")
for drink in order:
print(f" {drink}")
response = input("Is this correct?")
elif tool_call["name"] == "get_order":
response = "\n".join(order) if order else "(no order)"
elif tool_call["name"] == "clear_order":
order.clear()
response = None
elif tool_call["name"] == "place_order":
order_text = "\n".join(order)
print("Sending order to kitchen!")
print(order_text)
# TODO: Implement cafe.
order_placed = True
response = randint(1, 5) # ETA in minutes
else:
raise NotImplementedError(f"Unknown tool call: {tool_call["name"]}")
# Record the tool results as tool message.
outbound_msgs.append(
ToolMessage(
content=response,
name=tool_call["name"],
tool_call_id=tool_call["id"]
)
)
return {"messages": outbound_msgs, "order": order, "finished": order_placed}
def maybe_route_to_tools(state: OrderState) -> str:
"""Route between chat and tool nodes if a tool call is made."""
if not (msgs := state.get("messages", [])):
raise ValueError(f"No messages found when parsing state: {state}")
msg = msgs[-1]
if state.get("finished", False):
# When an order is placed, exit the app. The system instruction indicates
# that the chatbot should say thanks and goodbye at this point, so we can exit
# cleanly.
return END
elif hasattr(msg, "tool_calls") and len(msg.tool_calls) > 0:
# Route to 'tools' node for any automated tool calls first.
if any(
tool["name"] in tool_node.tools_by_name,keys() for tool in msg.tool_calls
):
return "tools"
else:
return "ordering"
else:
return "human"
# Define the graph so the LLM knows about the tools to invoke them.
# Auto-tools will be invoked automatically by the ToolNode
auto_tools = [get_menu]
tool_node = ToolNode(auto_tools)
# Order-tools will be handled by the order node.
order_tools = [add_to_order, confirm_order, get_order, clear_order, place_order]
# The LLM needs to know about all of the tools, so specify everything here
llm_with_tools = llm.bind_tools(auto_tools + order_tools)
graph_builder = StateGraph(OrderState)
# Nodes
graph_builder.add_node("chatbot", chatbot_with_tools)
graph_builder.add_node("human", human_node)
graph_builder.add_node("tools", tool_node)
graph_builder.add_node("ordering", order_node)
# Chatbot -> (ordering, tools, human, END)
graph_builder.add_conditional_edges("chatbot", maybe_route_to_tools)
# Human -> (chatbot, END)
graph_builder.add_conditional_edges("human", maybe_exit_human_node)
# Tools (both kinds) always route back to chat afterwards.
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge("ordering", "chatbot")
graph_builder.add_edge(START, "chatbot")
graph_with_order_tools = graph_builder.compile()
Image(graph_with_order_tools.get_graph().draw_mermaid.png())
# Uncomment this line to execute the graph:
state = graph_with_order_tools.invoke({"messages": []}, config)
# Things to try:
# - Order a drink!
# - Make a change to your order.
# - "Which teas are from England?"
# - Note that the graph should naturally exit after placing an order.
pprint(state)
# Uncomment this once you have run the graph from the previous cell.
pprint(state["order"])