DEV Community

Connie Leung
Connie Leung

Posted on

Multi-Turn Chat to Edit Images with NanoBanana, Angular and Firebase AI Logic

In this blog post, I want to demonstrate using Firebase AI Logic to edit an image in a chat, which is the recommended way.

Chat is preferred over single-turn because single-turn does not retain context. When Nano Banana generates an image that does not meet the expectations, users have to add new context to the original prompt to start over.

Using this approach, users can refine a text prompt using natural language to edit an existing image iteratively. The chat is powered by the multimodal model, Nano Banana, using Angular and Firebase AI Logic.


Firebase Configuration Object

// firebase-ai.json
{
  "app": {
    "apiKey": "<API key>",
    "authDomain": "<Firebase ID>.firebaseapp.com",
    "projectId": "<Firebase ID>",
    "storageBucket": "<Firebase ID>.firebasestorage.app",
    "messagingSenderId": "<Messaging ID>",
    "appId": "<App ID>"
  },
  "geminiModelName": "gemini-2.5-flash-image",
  "geminiAPIKey": "<Gemini API Key>",
  "geminiVideoModelName": "veo-3.1-fast-generate-preview",
  "poillingPeriod": 10000,
  "is_veo31_used": true
}
Enter fullscreen mode Exit fullscreen mode

The JSON object stores the configuration required to initialize a Firebase application. The Angular application uses the firebase application to create the Nano Banana model.

Create the Nano Banana model

// angular-fire.provider.ts

const { app, geminiModelName = 'gemini-2.5-flash-image' } = firebaseConfig;
const firebaseApp = initializeApp(app);
const ai = getAI(firebaseApp, { backend: new GoogleAIBackend() });

const DEFAULT_CONFIG: ModelParams = {
  model: geminiModelName,
  generationConfig: {
      responseModalities: [ResponseModality.IMAGE],
      candidateCount: 1,
  }
};

export function provideAngularFire() {
    return makeEnvironmentProviders([
        provideFirebaseApp(() => firebaseApp),
        {
            provide: NANO_BANANA_MODEL,
            useFactory: () => getGenerativeModel(ai, DEFAULT_CONFIG),
        }
    ]);
}
Enter fullscreen mode Exit fullscreen mode

The provideAngularFire function initializes a Firebase application and the injection token, NANO_BANANA_MODEL, injects the Nano Banana model.

// app.config.ts

export const appConfig = {
  providers: [
    provideAngularFire(),
  ]
};

bootstrapApplication(AppComponent, appConfig)
  .catch((err) => console.error(err));
Enter fullscreen mode Exit fullscreen mode

The application boostraps appConfig that provides the firebase functionality and the Nano Banana model.

Firebase Chat Session

// firebase.service.ts

@Injectable({
  providedIn: 'root'
})
export class FirebaseService  {
    private readonly geminiModel = inject(NANO_BANANA_MODEL);

    createChat(): ChatSession {
      return this.geminiModel.startChat();
    }
}
Enter fullscreen mode Exit fullscreen mode

The createChat method uses the Nano Banana model to create a new ChatSession. Messages will be sent to the chat to edit the old image to create a new one.

Multi-turn Chat to edit an image in Firebase AI Logic

@Injectable({
  providedIn: 'root'
})
export class ConversationEditService {
  chat = signal<ChatSession | undefined>(undefined);

  firebaseService = inject(FirebaseService);

  startEdit(): void {
    const chatInstance = this.firebaseService.createChat();
    this.chat.set(chatInstance);
  }

  async editImage(prompt: string, inlineData: GenerativeContentBlob): Promise<Base64InlineData> {
     const contentParts = await this.getGeneratedParts(inlineData, prompt);

      if (contentParts.length > 0) {
        const { data = '', mimeType = '' } = contentParts[0].inlineData;
        if (data && mimeType) {
          return {
            inlineData: { data, mimeType },
            base64: getBase64EncodedString({ data, mimeType })
          };
        }
      }
      throw new Error('Send message completed but image is not generated.');
  }

  private async getGeneratedParts(inlineData: GenerativeContentBlob, prompt: string) {
    const currentChat = this.chat();
    if (!currentChat) {
      return [];
    }
    const inlineDataPart = inlineData.data && inlineData.mimeType ? { inlineData } : undefined;
    const message = inlineDataPart ? [prompt, inlineDataPart] : [prompt];
    const response = await currentChat.sendMessage(message);

    return response.response.inlineDataParts() || [];
  }

  endEdit(): void {
    this.chat.set(undefined);
  }
}
Enter fullscreen mode Exit fullscreen mode

The ConversationEditService has a chat signal to store a running ChatSession.

The getGeneratedParts method constructs a new message and sends it to the chat to generate a new image. When the inlineData has a valid data URL and a valid mime type, the message is composed of the prompt and the image. Otherwise, the message only has a text prompt.

The editImage method uses the Nano Banana mode to create a new image and return it to the component.

Multi-turn Chat Component

// conversation-edit.html

<div class="w-full max-w-2xl mx-auto mb-8 h-[70vh] flex flex-col bg-gray-800 border border-gray-700 rounded-xl shadow-2xl">
    <app-conversation-messages
      class="flex-grow p-6 space-y-6 overflow-y-auto"
      [messages]="messages()"
      [isLoading]="isLoading()"
      (downloadImage)="handleDownloadImage($event)"
    />
    <app-conversation-input-form
      [pauseSendPrompt]="!isEditing() || isLoading()"
      [isGeneratingImage]="isLoading()"
      (newEditRequest)="handleSendPrompt($event)"
    />
</div>
Enter fullscreen mode Exit fullscreen mode

When the send button is clicked, the newEditRequest custom event executes the handleSendPrompt method of the ConversationEditComponent.

// conversation-edit.component.ts
lastEditedImage = signal<GenerativeContentBlob>(DEFAULT_BASE64_INLINE_DATA.inlineData);

conversationEditService = inject(ConversationEditService);

async handleSendPrompt(prompt: string): Promise<void> {
    this.isLoading.set(true);

    const { aiMessageId, pair } = makeAIResponsePair(prompt, this.messages()[this.messages().length - 1].id);
    this.messages.update(messages => ([...messages, ...pair]));

    try {
      const { inlineData, base64 }
        = await this.conversationEditService.editImage(prompt, this.lastEditedImage());
      this.messages.update(messages => {
        return messages.map(message => message.id !== aiMessageId  ?
          message : makeSuccessMessage(message, base64)
        );
      });

      this.lastEditedImage.set(inlineData);
    } finally {
      this.isLoading.set(false);
    }
 }
Enter fullscreen mode Exit fullscreen mode

The lastEditedImage signal stores the last image in the chat.

handleSendPrompt appends the user message and a placeholder AI message to the messages. The this.conversationEditService.editImage method prompts the Nano Banana model to edit the last image based on the submitted prompt. The placeholder AI image is replaced with the new image. The lastEditedImage signal is updated with the new image for the next turn of editing.

This loop of sending a prompt and the previous image data is what enables the powerful, contextual, multi-turn image editing experience.

Resources

Top comments (0)