Name your app (e.g., Bee) and select a workspace to develop your app in
Configure bot permissions
Navigate to OAuth & Permissions in the sidebar
Under “Bot Token Scopes”, add the chat:write scope
Click “Install to [Workspace]” and authorize the app
Gather credentials
Copy the “Bot User OAuth Token” and add it to your .env file as SLACK_BOT_TOKEN=xoxb-your-token
Get your Slack Team ID from your workspace URL (https://app.slack.com/client/TXXXXXXX/...)
Tip: Visit https://<your-workspace>.slack.com, after redirect, your URL will change to https://app.slack.com/client/TXXXXXXX/CXXXXXXX, pick the segment starting with TXXXXXXX
Add the Team ID to your .env file as SLACK_TEAM_ID=TXXXXXXX
Create a channel
Create a public channel named bee-playground in your Slack workspace
Invite your bot to the channel by typing /invite @Bee in the channel
The framework doesn’t have any specialized tools for using Slack API. However, it supports tools exposed via Model Context Protocol (MCP) and performs automatic tool discovery. We will use that to give our agent the capability to post Slack messages.Now, copy and paste the following code into slack_agent.py module. Then, follow along with the comments for an explanation.
Python
Copy
Ask AI
import asyncioimport osimport sysimport tracebackfrom typing import Anyfrom dotenv import load_dotenvfrom mcp import StdioServerParametersfrom mcp.client.stdio import stdio_clientfrom beeai_framework.agents.tool_calling import ToolCallingAgentfrom beeai_framework.backend import ChatModel, ChatModelParametersfrom beeai_framework.emitter import EventMetafrom beeai_framework.errors import FrameworkErrorfrom beeai_framework.memory import UnconstrainedMemoryfrom beeai_framework.tools.mcp import MCPClient, MCPToolfrom beeai_framework.tools.weather import OpenMeteoTool# Load environment variablesload_dotenv()# Create server parameters for stdio connectionserver_params = StdioServerParameters( command="npx", args=["-y", "@modelcontextprotocol/server-slack"], env={ "SLACK_BOT_TOKEN": os.environ["SLACK_BOT_TOKEN"], "SLACK_TEAM_ID": os.environ["SLACK_TEAM_ID"], "PATH": os.getenv("PATH", default=""), },)async def slack_tool(client: MCPClient) -> MCPTool: # Discover Slack tools via MCP client slacktools = await MCPTool.from_client(client) filter_tool = filter(lambda tool: tool.name == "slack_post_message", slacktools) slack = list(filter_tool) return slack[0]async def create_agent() -> ToolCallingAgent: """Create and configure the agent with tools and LLM""" # Other models to try: # "llama3.1" # "deepseek-r1" # ensure the model is pulled before running llm = ChatModel.from_name( "ollama:llama3.1", ChatModelParameters(temperature=0), ) # Configure tools slack = await slack_tool(stdio_client(server_params)) weather = OpenMeteoTool() # Create agent with memory and tools and custom system prompt template agent = ToolCallingAgent( llm=llm, tools=[slack, weather], memory=UnconstrainedMemory(), templates={ "system": lambda template: template.update( defaults={ "instructions": """IMPORTANT: When the user mentions Slack, you must interact with the Slack tool before sending the final answer.""", } ) }, ) return agentdef print_events(data: Any, event: EventMeta) -> None: """Print agent events""" if event.name in ["start", "retry", "update", "success", "error"]: print(f"\n** Event ({event.name}): {event.path} **\n{data}")async def main() -> None: """Main application loop""" # Create agent agent = await create_agent() # Run agent with the prompt response = await agent.run( "Post the current temperature in Prague to the '#bee-playground-xxx' Slack channel.", max_retries_per_step=3, total_max_retries=10, max_iterations=20, ).on("*", print_events) print("Agent 🤖 : ", response.last_message.text)if __name__ == "__main__": try: asyncio.run(main()) except FrameworkError as e: traceback.print_exc() sys.exit(e.explain())
Use the OpenMeteo tool to get the current temperature
Use the slack_post_message tool to post to the #bee-playground Slack channel
As you might have noticed, we made some restrictions to make the agent work with smaller models so that it can be executed locally. With larger LLMs, we could further simplify the code, use more tools, and create simpler prompts.
This tutorial can be easily generalized to any MCP server with tools capability. Just plug it into Bee and execute.