Skip to content

API Reference

AutoGenerator

Source code in praisonai/auto.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
class AutoGenerator:
    def __init__(self, topic="Movie Story writing about AI", agent_file="test.yaml", framework="crewai", config_list: Optional[List[Dict]] = None):
        """
        Initialize the AutoGenerator class with the specified topic, agent file, and framework.
        Note: autogen framework is different from this AutoGenerator class.

        Args:
            topic (str, optional): The topic for the generated team structure. Defaults to "Movie Story writing about AI".
            agent_file (str, optional): The name of the YAML file to save the generated team structure. Defaults to "test.yaml".
            framework (str, optional): The framework for the generated team structure. Defaults to "crewai".
            config_list (Optional[List[Dict]], optional): A list containing the configuration details for the OpenAI API. 
                                                          If None, it defaults to using environment variables or hardcoded values.
        Attributes:
            config_list (list): A list containing the configuration details for the OpenAI API.
            topic (str): The specified topic for the generated team structure.
            agent_file (str): The specified name of the YAML file to save the generated team structure.
            framework (str): The specified framework for the generated team structure.
            client (instructor.Client): An instance of the instructor.Client class initialized with the specified OpenAI API configuration.
        """
        self.config_list = config_list or [
            {
                'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
                'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
                'api_key': os.environ.get("OPENAI_API_KEY")
            }
        ]
        self.topic = topic
        self.agent_file = agent_file
        self.framework = framework or "crewai"
        self.client = instructor.patch(
            OpenAI(
                base_url=self.config_list[0]['base_url'],
                api_key=os.getenv("OPENAI_API_KEY"),
            ),
            mode=instructor.Mode.JSON,
        )

    def generate(self):
        """
        Generates a team structure for the specified topic.

        Args:
            None

        Returns:
            str: The full path of the YAML file containing the generated team structure.

        Raises:
            Exception: If the generation process fails.

        Usage:
            generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
            path = generator.generate()
            print(path)
        """
        response = self.client.chat.completions.create(
            model=self.config_list[0]['model'],
            response_model=TeamStructure,
            max_retries=10,
            messages=[
                {"role": "system", "content": "You are a helpful assistant designed to output complex team structures."},
                {"role": "user", "content": self.get_user_content()}
            ]
        )
        json_data = json.loads(response.model_dump_json())
        self.convert_and_save(json_data)
        full_path = os.path.abspath(self.agent_file)
        return full_path

    def convert_and_save(self, json_data):
        """Converts the provided JSON data into the desired YAML format and saves it to a file.

        Args:
            json_data (dict): The JSON data representing the team structure.
            topic (str, optional): The topic to be inserted into the YAML. Defaults to "Artificial Intelligence".
            agent_file (str, optional): The name of the YAML file to save. Defaults to "test.yaml".
        """

        yaml_data = {
            "framework": self.framework,
            "topic": self.topic,
            "roles": {},
            "dependencies": []
        }

        for role_id, role_details in json_data['roles'].items():
            yaml_data['roles'][role_id] = {
                "backstory": "" + role_details['backstory'],
                "goal": role_details['goal'],
                "role": role_details['role'],
                "tasks": {},
                # "tools": role_details.get('tools', []),
                "tools": ['']
            }

            for task_id, task_details in role_details['tasks'].items():
                yaml_data['roles'][role_id]['tasks'][task_id] = {
                    "description": "" + task_details['description'],
                    "expected_output": "" + task_details['expected_output']
                }

        # Save to YAML file, maintaining the order
        with open(self.agent_file, 'w') as f:
            yaml.dump(yaml_data, f, allow_unicode=True, sort_keys=False)

    def get_user_content(self):
        """
        Generates a prompt for the OpenAI API to generate a team structure.

        Args:
            None

        Returns:
            str: The prompt for the OpenAI API.

        Usage:
            generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
            prompt = generator.get_user_content()
            print(prompt)
        """
        user_content = """Generate a team structure for  \"""" + self.topic + """\" task. 
No Input data will be provided to the team.
The team will work in sequence. First role will pass the output to the next role, and so on.
The last role will generate the final output.
Think step by step.
With maximum 3 roles, each with 1 task. Include role goals, backstories, task descriptions, and expected outputs.
List of Available Tools: CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, DirectoryReadTool, FileReadTool, TXTSearchTool, JSONSearchTool, MDXSearchTool, PDFSearchTool, RagTool, ScrapeElementFromWebsiteTool, ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool.
Only use Available Tools. Do Not use any other tools. 
Example Below: 
Use below example to understand the structure of the output. 
The final role you create should satisfy the provided task: """ + self.topic + """.
{
"roles": {
"narrative_designer": {
"role": "Narrative Designer",
"goal": "Create AI storylines",
"backstory": "Skilled in narrative development for AI, with a focus on story resonance.",
"tools": ["ScrapeWebsiteTool"],
"tasks": {
"story_concept_development": {
"description": "Craft a unique AI story concept with depth and engagement using concept from this page the content https://www.asthebirdfliesblog.com/posts/how-to-write-book-story-development .",
"expected_output": "Document with narrative arcs, character bios, and settings."
}
}
},
"scriptwriter": {
"role": "Scriptwriter",
"goal": "Write scripts from AI concepts",
"backstory": "Expert in dialogue and script structure, translating concepts into scripts.",
"tasks": {
"scriptwriting_task": {
"description": "Turn narrative concepts into scripts, including dialogue and scenes.",
"expected_output": "Production-ready script with dialogue and scene details."
}
}
}
}
}
        """
        return user_content

__init__(topic='Movie Story writing about AI', agent_file='test.yaml', framework='crewai', config_list=None)

Initialize the AutoGenerator class with the specified topic, agent file, and framework. Note: autogen framework is different from this AutoGenerator class.

Parameters:

Name Type Description Default
topic str

The topic for the generated team structure. Defaults to "Movie Story writing about AI".

'Movie Story writing about AI'
agent_file str

The name of the YAML file to save the generated team structure. Defaults to "test.yaml".

'test.yaml'
framework str

The framework for the generated team structure. Defaults to "crewai".

'crewai'
config_list Optional[List[Dict]]

A list containing the configuration details for the OpenAI API. If None, it defaults to using environment variables or hardcoded values.

None

Attributes: config_list (list): A list containing the configuration details for the OpenAI API. topic (str): The specified topic for the generated team structure. agent_file (str): The specified name of the YAML file to save the generated team structure. framework (str): The specified framework for the generated team structure. client (instructor.Client): An instance of the instructor.Client class initialized with the specified OpenAI API configuration.

Source code in praisonai/auto.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def __init__(self, topic="Movie Story writing about AI", agent_file="test.yaml", framework="crewai", config_list: Optional[List[Dict]] = None):
    """
    Initialize the AutoGenerator class with the specified topic, agent file, and framework.
    Note: autogen framework is different from this AutoGenerator class.

    Args:
        topic (str, optional): The topic for the generated team structure. Defaults to "Movie Story writing about AI".
        agent_file (str, optional): The name of the YAML file to save the generated team structure. Defaults to "test.yaml".
        framework (str, optional): The framework for the generated team structure. Defaults to "crewai".
        config_list (Optional[List[Dict]], optional): A list containing the configuration details for the OpenAI API. 
                                                      If None, it defaults to using environment variables or hardcoded values.
    Attributes:
        config_list (list): A list containing the configuration details for the OpenAI API.
        topic (str): The specified topic for the generated team structure.
        agent_file (str): The specified name of the YAML file to save the generated team structure.
        framework (str): The specified framework for the generated team structure.
        client (instructor.Client): An instance of the instructor.Client class initialized with the specified OpenAI API configuration.
    """
    self.config_list = config_list or [
        {
            'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
            'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
            'api_key': os.environ.get("OPENAI_API_KEY")
        }
    ]
    self.topic = topic
    self.agent_file = agent_file
    self.framework = framework or "crewai"
    self.client = instructor.patch(
        OpenAI(
            base_url=self.config_list[0]['base_url'],
            api_key=os.getenv("OPENAI_API_KEY"),
        ),
        mode=instructor.Mode.JSON,
    )

convert_and_save(json_data)

Converts the provided JSON data into the desired YAML format and saves it to a file.

Parameters:

Name Type Description Default
json_data dict

The JSON data representing the team structure.

required
topic str

The topic to be inserted into the YAML. Defaults to "Artificial Intelligence".

required
agent_file str

The name of the YAML file to save. Defaults to "test.yaml".

required
Source code in praisonai/auto.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def convert_and_save(self, json_data):
    """Converts the provided JSON data into the desired YAML format and saves it to a file.

    Args:
        json_data (dict): The JSON data representing the team structure.
        topic (str, optional): The topic to be inserted into the YAML. Defaults to "Artificial Intelligence".
        agent_file (str, optional): The name of the YAML file to save. Defaults to "test.yaml".
    """

    yaml_data = {
        "framework": self.framework,
        "topic": self.topic,
        "roles": {},
        "dependencies": []
    }

    for role_id, role_details in json_data['roles'].items():
        yaml_data['roles'][role_id] = {
            "backstory": "" + role_details['backstory'],
            "goal": role_details['goal'],
            "role": role_details['role'],
            "tasks": {},
            # "tools": role_details.get('tools', []),
            "tools": ['']
        }

        for task_id, task_details in role_details['tasks'].items():
            yaml_data['roles'][role_id]['tasks'][task_id] = {
                "description": "" + task_details['description'],
                "expected_output": "" + task_details['expected_output']
            }

    # Save to YAML file, maintaining the order
    with open(self.agent_file, 'w') as f:
        yaml.dump(yaml_data, f, allow_unicode=True, sort_keys=False)

generate()

Generates a team structure for the specified topic.

Returns:

Name Type Description
str

The full path of the YAML file containing the generated team structure.

Raises:

Type Description
Exception

If the generation process fails.

Usage

generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars") path = generator.generate() print(path)

Source code in praisonai/auto.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def generate(self):
    """
    Generates a team structure for the specified topic.

    Args:
        None

    Returns:
        str: The full path of the YAML file containing the generated team structure.

    Raises:
        Exception: If the generation process fails.

    Usage:
        generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
        path = generator.generate()
        print(path)
    """
    response = self.client.chat.completions.create(
        model=self.config_list[0]['model'],
        response_model=TeamStructure,
        max_retries=10,
        messages=[
            {"role": "system", "content": "You are a helpful assistant designed to output complex team structures."},
            {"role": "user", "content": self.get_user_content()}
        ]
    )
    json_data = json.loads(response.model_dump_json())
    self.convert_and_save(json_data)
    full_path = os.path.abspath(self.agent_file)
    return full_path

get_user_content()

Generates a prompt for the OpenAI API to generate a team structure.

Returns:

Name Type Description
str

The prompt for the OpenAI API.

Usage

generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars") prompt = generator.get_user_content() print(prompt)

Source code in praisonai/auto.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
    def get_user_content(self):
        """
        Generates a prompt for the OpenAI API to generate a team structure.

        Args:
            None

        Returns:
            str: The prompt for the OpenAI API.

        Usage:
            generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
            prompt = generator.get_user_content()
            print(prompt)
        """
        user_content = """Generate a team structure for  \"""" + self.topic + """\" task. 
No Input data will be provided to the team.
The team will work in sequence. First role will pass the output to the next role, and so on.
The last role will generate the final output.
Think step by step.
With maximum 3 roles, each with 1 task. Include role goals, backstories, task descriptions, and expected outputs.
List of Available Tools: CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, DirectoryReadTool, FileReadTool, TXTSearchTool, JSONSearchTool, MDXSearchTool, PDFSearchTool, RagTool, ScrapeElementFromWebsiteTool, ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool.
Only use Available Tools. Do Not use any other tools. 
Example Below: 
Use below example to understand the structure of the output. 
The final role you create should satisfy the provided task: """ + self.topic + """.
{
"roles": {
"narrative_designer": {
"role": "Narrative Designer",
"goal": "Create AI storylines",
"backstory": "Skilled in narrative development for AI, with a focus on story resonance.",
"tools": ["ScrapeWebsiteTool"],
"tasks": {
"story_concept_development": {
"description": "Craft a unique AI story concept with depth and engagement using concept from this page the content https://www.asthebirdfliesblog.com/posts/how-to-write-book-story-development .",
"expected_output": "Document with narrative arcs, character bios, and settings."
}
}
},
"scriptwriter": {
"role": "Scriptwriter",
"goal": "Write scripts from AI concepts",
"backstory": "Expert in dialogue and script structure, translating concepts into scripts.",
"tasks": {
"scriptwriting_task": {
"description": "Turn narrative concepts into scripts, including dialogue and scenes.",
"expected_output": "Production-ready script with dialogue and scene details."
}
}
}
}
}
        """
        return user_content

AgentsGenerator

Source code in praisonai/agents_generator.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
class AgentsGenerator:
    def __init__(self, agent_file, framework, config_list, log_level=None, agent_callback=None, task_callback=None, agent_yaml=None):
        """
        Initialize the AgentsGenerator object.

        Parameters:
            agent_file (str): The path to the agent file.
            framework (str): The framework to be used for the agents.
            config_list (list): A list of configurations for the agents.
            log_level (int, optional): The logging level to use. Defaults to logging.INFO.
            agent_callback (callable, optional): A callback function to be executed after each agent step.
            task_callback (callable, optional): A callback function to be executed after each tool run.
            agent_yaml (str, optional): The content of the YAML file. Defaults to None.

        Attributes:
            agent_file (str): The path to the agent file.
            framework (str): The framework to be used for the agents.
            config_list (list): A list of configurations for the agents.
            log_level (int): The logging level to use.
            agent_callback (callable, optional): A callback function to be executed after each agent step.
            task_callback (callable, optional): A callback function to be executed after each tool run.
        """
        self.agent_file = agent_file
        self.framework = framework
        self.config_list = config_list
        self.log_level = log_level
        self.agent_callback = agent_callback
        self.task_callback = task_callback
        self.agent_yaml = agent_yaml
        self.log_level = log_level or logging.getLogger().getEffectiveLevel()
        if self.log_level == logging.NOTSET:
            self.log_level = os.environ.get('LOGLEVEL', 'INFO').upper()

        logging.basicConfig(level=self.log_level, format='%(asctime)s - %(levelname)s - %(message)s')
        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(self.log_level)

    def is_function_or_decorated(self, obj):
        """
        Checks if the given object is a function or has a __call__ method.

        Parameters:
            obj (object): The object to be checked.

        Returns:
            bool: True if the object is a function or has a __call__ method, False otherwise.
        """
        return inspect.isfunction(obj) or hasattr(obj, '__call__')

    def load_tools_from_module(self, module_path):
        """
        Loads tools from a specified module path.

        Parameters:
            module_path (str): The path to the module containing the tools.

        Returns:
            dict: A dictionary containing the names of the tools as keys and the corresponding functions or objects as values.

        Raises:
            FileNotFoundError: If the specified module path does not exist.
        """
        spec = importlib.util.spec_from_file_location("tools_module", module_path)
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        return {name: obj for name, obj in inspect.getmembers(module, self.is_function_or_decorated)}

    def load_tools_from_module_class(self, module_path):
        """
        Loads tools from a specified module path containing classes that inherit from BaseTool or are part of langchain_community.tools package.

        Parameters:
            module_path (str): The path to the module containing the tools.

        Returns:
            dict: A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.

        Raises:
            FileNotFoundError: If the specified module path does not exist.
        """
        spec = importlib.util.spec_from_file_location("tools_module", module_path)
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        return {name: obj() for name, obj in inspect.getmembers(module, lambda x: inspect.isclass(x) and (x.__module__.startswith('langchain_community.tools') or issubclass(x, BaseTool)) and x is not BaseTool)}

    def load_tools_from_package(self, package_path):
        """
        Loads tools from a specified package path containing modules with functions or classes.

        Parameters:
            package_path (str): The path to the package containing the tools.

        Returns:
            dict: A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.

        Raises:
            FileNotFoundError: If the specified package path does not exist.

        This function iterates through all the .py files in the specified package path, excluding those that start with "__". For each file, it imports the corresponding module and checks if it contains any functions or classes that can be loaded as tools. The function then returns a dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.
        """
        tools_dict = {}
        for module_file in os.listdir(package_path):
            if module_file.endswith('.py') and not module_file.startswith('__'):
                module_name = f"{package_path.name}.{module_file[:-3]}"  # Remove .py for import
                module = importlib.import_module(module_name)
                for name, obj in inspect.getmembers(module, self.is_function_or_decorated):
                    tools_dict[name] = obj
        return tools_dict

    def generate_crew_and_kickoff(self):
        """
        Generates a crew of agents and initiates tasks based on the provided configuration.

        Parameters:
            agent_file (str): The path to the agent file.
            framework (str): The framework to be used for the agents.
            config_list (list): A list of configurations for the agents.

        Returns:
            str: The output of the tasks performed by the crew of agents.

        Raises:
            FileNotFoundError: If the specified agent file does not exist.

        This function first loads the agent configuration from the specified file. It then initializes the tools required for the agents based on the specified framework. If the specified framework is "autogen", it loads the LLM configuration dynamically and creates an AssistantAgent for each role in the configuration. It then adds tools to the agents if specified in the configuration. Finally, it prepares tasks for the agents based on the configuration and initiates the tasks using the crew of agents. If the specified framework is not "autogen", it creates a crew of agents and initiates tasks based on the configuration.
        """
        if self.agent_yaml:
            config = yaml.safe_load(self.agent_yaml)
        else:
            if self.agent_file == '/app/api:app' or self.agent_file == 'api:app':
                self.agent_file = 'agents.yaml'
            try:
                with open(self.agent_file, 'r') as f:
                    config = yaml.safe_load(f)
            except FileNotFoundError:
                print(f"File not found: {self.agent_file}")
                return

        topic = config['topic']
        tools_dict = {
            'CodeDocsSearchTool': CodeDocsSearchTool(),
            'CSVSearchTool': CSVSearchTool(),
            'DirectorySearchTool': DirectorySearchTool(),
            'DOCXSearchTool': DOCXSearchTool(),
            'DirectoryReadTool': DirectoryReadTool(),
            'FileReadTool': FileReadTool(),
            # 'GithubSearchTool': GithubSearchTool(),
            # 'SeperDevTool': SeperDevTool(),
            'TXTSearchTool': TXTSearchTool(),
            'JSONSearchTool': JSONSearchTool(),
            'MDXSearchTool': MDXSearchTool(),
            'PDFSearchTool': PDFSearchTool(),
            # 'PGSearchTool': PGSearchTool(),
            'RagTool': RagTool(),
            'ScrapeElementFromWebsiteTool': ScrapeElementFromWebsiteTool(),
            'ScrapeWebsiteTool': ScrapeWebsiteTool(),
            'WebsiteSearchTool': WebsiteSearchTool(),
            'XMLSearchTool': XMLSearchTool(),
            'YoutubeChannelSearchTool': YoutubeChannelSearchTool(),
            'YoutubeVideoSearchTool': YoutubeVideoSearchTool(),
        }
        root_directory = os.getcwd()
        tools_py_path = os.path.join(root_directory, 'tools.py')
        tools_dir_path = Path(root_directory) / 'tools'

        if os.path.isfile(tools_py_path):
            tools_dict.update(self.load_tools_from_module_class(tools_py_path))
            self.logger.debug("tools.py exists in the root directory. Loading tools.py and skipping tools folder.")
        elif tools_dir_path.is_dir():
            tools_dict.update(self.load_tools_from_module_class(tools_dir_path))
            self.logger.debug("tools folder exists in the root directory")

        framework = self.framework or config.get('framework')

        agents = {}
        tasks = []
        if framework == "autogen":
            # Load the LLM configuration dynamically
            # print(self.config_list)
            llm_config = {"config_list": self.config_list}

            if agentops_exists:
                agentops.init(os.environ.get("AGENTOPS_API_KEY"), tags=["autogen"])
            # Assuming the user proxy agent is set up as per your requirements
            user_proxy = autogen.UserProxyAgent(
                name="User",
                human_input_mode="NEVER",
                is_termination_msg=lambda x: (x.get("content") or "").rstrip().rstrip(".").lower().endswith("terminate") or "TERMINATE" in (x.get("content") or ""),
                code_execution_config={
                    "work_dir": "coding",
                    "use_docker": False,
                },
                # additional setup for the user proxy agent
            )

            for role, details in config['roles'].items():
                agent_name = details['role'].format(topic=topic).replace("{topic}", topic)
                agent_goal = details['goal'].format(topic=topic)
                # Creating an AssistantAgent for each role dynamically
                agents[role] = autogen.AssistantAgent(
                    name=agent_name,
                    llm_config=llm_config,
                    system_message=details['backstory'].format(topic=topic)+". Must Reply \"TERMINATE\" in the end when everything is done.",
                )
                for tool in details.get('tools', []):
                    if tool in tools_dict:
                        try:
                            tool_class = globals()[f'autogen_{type(tools_dict[tool]).__name__}']
                            print(f"Found {tool_class.__name__} for {tool}")
                        except KeyError:
                            print(f"Warning: autogen_{type(tools_dict[tool]).__name__} function not found. Skipping this tool.")
                            continue
                        tool_class(agents[role], user_proxy)

                # Preparing tasks for initiate_chats
                for task_name, task_details in details.get('tasks', {}).items():
                    description_filled = task_details['description'].format(topic=topic)
                    expected_output_filled = task_details['expected_output'].format(topic=topic)

                    chat_task = {
                        "recipient": agents[role],
                        "message": description_filled,
                        "summary_method": "last_msg", 
                        # Additional fields like carryover can be added based on dependencies
                    }
                    tasks.append(chat_task)
            response = user_proxy.initiate_chats(tasks)
            result = "### Output ###\n"+response[-1].summary if hasattr(response[-1], 'summary') else ""
            if agentops_exists:
                agentops.end_session("Success")
        else: # framework=crewai
            if agentops_exists:
                agentops.init(os.environ.get("AGENTOPS_API_KEY"), tags=["crewai"])

            tasks_dict = {}

            for role, details in config['roles'].items():
                role_filled = details['role'].format(topic=topic)
                goal_filled = details['goal'].format(topic=topic)
                backstory_filled = details['backstory'].format(topic=topic)

                # Adding tools to the agent if exists
                agent_tools = [tools_dict[tool] for tool in details.get('tools', []) if tool in tools_dict]

                llm_model = details.get('llm')  # Get the llm configuration
                if llm_model:
                    llm = PraisonAIModel(
                        model=llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
                    ).get_model()
                else:
                    llm = PraisonAIModel().get_model()

                function_calling_llm_model = details.get('function_calling_llm')
                if function_calling_llm_model:
                    function_calling_llm = PraisonAIModel(
                        model=function_calling_llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
                    ).get_model()
                else:
                    function_calling_llm = PraisonAIModel().get_model()

                agent = Agent(
                    role=role_filled, 
                    goal=goal_filled, 
                    backstory=backstory_filled, 
                    tools=agent_tools, 
                    allow_delegation=details.get('allow_delegation', False),
                    llm=llm,
                    function_calling_llm=function_calling_llm,
                    max_iter=details.get('max_iter', 15),
                    max_rpm=details.get('max_rpm'),
                    max_execution_time=details.get('max_execution_time'),
                    verbose=details.get('verbose', True),
                    cache=details.get('cache', True),
                    system_template=details.get('system_template'),
                    prompt_template=details.get('prompt_template'),
                    response_template=details.get('response_template'),
                )

                # Set agent callback if provided
                if self.agent_callback:
                    agent.step_callback = self.agent_callback

                agents[role] = agent

                for task_name, task_details in details.get('tasks', {}).items():
                    description_filled = task_details['description'].format(topic=topic)
                    expected_output_filled = task_details['expected_output'].format(topic=topic)

                    task = Task(
                        description=description_filled,  # Clear, concise statement of what the task entails
                        expected_output=expected_output_filled,  # Detailed description of what task's completion looks like
                        agent=agent,  # The agent responsible for the task
                        tools=task_details.get('tools', []),  # Functions or capabilities the agent can utilize
                        async_execution=task_details.get('async_execution') if task_details.get('async_execution') is not None else False,  # Execute asynchronously if set
                        context=[], ## TODO: 
                        config=task_details.get('config') if task_details.get('config') is not None else {},  # Additional configuration details
                        output_json=task_details.get('output_json') if task_details.get('output_json') is not None else None,  # Outputs a JSON object
                        output_pydantic=task_details.get('output_pydantic') if task_details.get('output_pydantic') is not None else None,  # Outputs a Pydantic model object
                        output_file=task_details.get('output_file') if task_details.get('output_file') is not None else "",  # Saves the task output to a file
                        callback=task_details.get('callback') if task_details.get('callback') is not None else None,  # Python callable executed with the task's output
                        human_input=task_details.get('human_input') if task_details.get('human_input') is not None else False,  # Indicates if the task requires human feedback
                        create_directory=task_details.get('create_directory') if task_details.get('create_directory') is not None else False  # Indicates if a directory needs to be created
                    )

                    # Set tool callback if provided
                    if self.task_callback:
                        task.callback = self.task_callback

                    tasks.append(task)
                    tasks_dict[task_name] = task

            for role, details in config['roles'].items():
                for task_name, task_details in details.get('tasks', {}).items():
                    task = tasks_dict[task_name]
                    context_tasks = [tasks_dict[ctx] for ctx in task_details.get('context', []) if ctx in tasks_dict]
                    task.context = context_tasks

            crew = Crew(
                agents=list(agents.values()),
                tasks=tasks,
                verbose=2
            )

            self.logger.debug("Final Crew Configuration:")
            self.logger.debug(f"Agents: {crew.agents}")
            self.logger.debug(f"Tasks: {crew.tasks}")

            response = crew.kickoff()
            result = f"### Task Output ###\n{response}"
            if agentops_exists:
                agentops.end_session("Success")
        return result

__init__(agent_file, framework, config_list, log_level=None, agent_callback=None, task_callback=None, agent_yaml=None)

Initialize the AgentsGenerator object.

Parameters:

Name Type Description Default
agent_file str

The path to the agent file.

required
framework str

The framework to be used for the agents.

required
config_list list

A list of configurations for the agents.

required
log_level int

The logging level to use. Defaults to logging.INFO.

None
agent_callback callable

A callback function to be executed after each agent step.

None
task_callback callable

A callback function to be executed after each tool run.

None
agent_yaml str

The content of the YAML file. Defaults to None.

None

Attributes:

Name Type Description
agent_file str

The path to the agent file.

framework str

The framework to be used for the agents.

config_list list

A list of configurations for the agents.

log_level int

The logging level to use.

agent_callback callable

A callback function to be executed after each agent step.

task_callback callable

A callback function to be executed after each tool run.

Source code in praisonai/agents_generator.py
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
def __init__(self, agent_file, framework, config_list, log_level=None, agent_callback=None, task_callback=None, agent_yaml=None):
    """
    Initialize the AgentsGenerator object.

    Parameters:
        agent_file (str): The path to the agent file.
        framework (str): The framework to be used for the agents.
        config_list (list): A list of configurations for the agents.
        log_level (int, optional): The logging level to use. Defaults to logging.INFO.
        agent_callback (callable, optional): A callback function to be executed after each agent step.
        task_callback (callable, optional): A callback function to be executed after each tool run.
        agent_yaml (str, optional): The content of the YAML file. Defaults to None.

    Attributes:
        agent_file (str): The path to the agent file.
        framework (str): The framework to be used for the agents.
        config_list (list): A list of configurations for the agents.
        log_level (int): The logging level to use.
        agent_callback (callable, optional): A callback function to be executed after each agent step.
        task_callback (callable, optional): A callback function to be executed after each tool run.
    """
    self.agent_file = agent_file
    self.framework = framework
    self.config_list = config_list
    self.log_level = log_level
    self.agent_callback = agent_callback
    self.task_callback = task_callback
    self.agent_yaml = agent_yaml
    self.log_level = log_level or logging.getLogger().getEffectiveLevel()
    if self.log_level == logging.NOTSET:
        self.log_level = os.environ.get('LOGLEVEL', 'INFO').upper()

    logging.basicConfig(level=self.log_level, format='%(asctime)s - %(levelname)s - %(message)s')
    self.logger = logging.getLogger(__name__)
    self.logger.setLevel(self.log_level)

generate_crew_and_kickoff()

Generates a crew of agents and initiates tasks based on the provided configuration.

Parameters:

Name Type Description Default
agent_file str

The path to the agent file.

required
framework str

The framework to be used for the agents.

required
config_list list

A list of configurations for the agents.

required

Returns:

Name Type Description
str

The output of the tasks performed by the crew of agents.

Raises:

Type Description
FileNotFoundError

If the specified agent file does not exist.

This function first loads the agent configuration from the specified file. It then initializes the tools required for the agents based on the specified framework. If the specified framework is "autogen", it loads the LLM configuration dynamically and creates an AssistantAgent for each role in the configuration. It then adds tools to the agents if specified in the configuration. Finally, it prepares tasks for the agents based on the configuration and initiates the tasks using the crew of agents. If the specified framework is not "autogen", it creates a crew of agents and initiates tasks based on the configuration.

Source code in praisonai/agents_generator.py
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
def generate_crew_and_kickoff(self):
    """
    Generates a crew of agents and initiates tasks based on the provided configuration.

    Parameters:
        agent_file (str): The path to the agent file.
        framework (str): The framework to be used for the agents.
        config_list (list): A list of configurations for the agents.

    Returns:
        str: The output of the tasks performed by the crew of agents.

    Raises:
        FileNotFoundError: If the specified agent file does not exist.

    This function first loads the agent configuration from the specified file. It then initializes the tools required for the agents based on the specified framework. If the specified framework is "autogen", it loads the LLM configuration dynamically and creates an AssistantAgent for each role in the configuration. It then adds tools to the agents if specified in the configuration. Finally, it prepares tasks for the agents based on the configuration and initiates the tasks using the crew of agents. If the specified framework is not "autogen", it creates a crew of agents and initiates tasks based on the configuration.
    """
    if self.agent_yaml:
        config = yaml.safe_load(self.agent_yaml)
    else:
        if self.agent_file == '/app/api:app' or self.agent_file == 'api:app':
            self.agent_file = 'agents.yaml'
        try:
            with open(self.agent_file, 'r') as f:
                config = yaml.safe_load(f)
        except FileNotFoundError:
            print(f"File not found: {self.agent_file}")
            return

    topic = config['topic']
    tools_dict = {
        'CodeDocsSearchTool': CodeDocsSearchTool(),
        'CSVSearchTool': CSVSearchTool(),
        'DirectorySearchTool': DirectorySearchTool(),
        'DOCXSearchTool': DOCXSearchTool(),
        'DirectoryReadTool': DirectoryReadTool(),
        'FileReadTool': FileReadTool(),
        # 'GithubSearchTool': GithubSearchTool(),
        # 'SeperDevTool': SeperDevTool(),
        'TXTSearchTool': TXTSearchTool(),
        'JSONSearchTool': JSONSearchTool(),
        'MDXSearchTool': MDXSearchTool(),
        'PDFSearchTool': PDFSearchTool(),
        # 'PGSearchTool': PGSearchTool(),
        'RagTool': RagTool(),
        'ScrapeElementFromWebsiteTool': ScrapeElementFromWebsiteTool(),
        'ScrapeWebsiteTool': ScrapeWebsiteTool(),
        'WebsiteSearchTool': WebsiteSearchTool(),
        'XMLSearchTool': XMLSearchTool(),
        'YoutubeChannelSearchTool': YoutubeChannelSearchTool(),
        'YoutubeVideoSearchTool': YoutubeVideoSearchTool(),
    }
    root_directory = os.getcwd()
    tools_py_path = os.path.join(root_directory, 'tools.py')
    tools_dir_path = Path(root_directory) / 'tools'

    if os.path.isfile(tools_py_path):
        tools_dict.update(self.load_tools_from_module_class(tools_py_path))
        self.logger.debug("tools.py exists in the root directory. Loading tools.py and skipping tools folder.")
    elif tools_dir_path.is_dir():
        tools_dict.update(self.load_tools_from_module_class(tools_dir_path))
        self.logger.debug("tools folder exists in the root directory")

    framework = self.framework or config.get('framework')

    agents = {}
    tasks = []
    if framework == "autogen":
        # Load the LLM configuration dynamically
        # print(self.config_list)
        llm_config = {"config_list": self.config_list}

        if agentops_exists:
            agentops.init(os.environ.get("AGENTOPS_API_KEY"), tags=["autogen"])
        # Assuming the user proxy agent is set up as per your requirements
        user_proxy = autogen.UserProxyAgent(
            name="User",
            human_input_mode="NEVER",
            is_termination_msg=lambda x: (x.get("content") or "").rstrip().rstrip(".").lower().endswith("terminate") or "TERMINATE" in (x.get("content") or ""),
            code_execution_config={
                "work_dir": "coding",
                "use_docker": False,
            },
            # additional setup for the user proxy agent
        )

        for role, details in config['roles'].items():
            agent_name = details['role'].format(topic=topic).replace("{topic}", topic)
            agent_goal = details['goal'].format(topic=topic)
            # Creating an AssistantAgent for each role dynamically
            agents[role] = autogen.AssistantAgent(
                name=agent_name,
                llm_config=llm_config,
                system_message=details['backstory'].format(topic=topic)+". Must Reply \"TERMINATE\" in the end when everything is done.",
            )
            for tool in details.get('tools', []):
                if tool in tools_dict:
                    try:
                        tool_class = globals()[f'autogen_{type(tools_dict[tool]).__name__}']
                        print(f"Found {tool_class.__name__} for {tool}")
                    except KeyError:
                        print(f"Warning: autogen_{type(tools_dict[tool]).__name__} function not found. Skipping this tool.")
                        continue
                    tool_class(agents[role], user_proxy)

            # Preparing tasks for initiate_chats
            for task_name, task_details in details.get('tasks', {}).items():
                description_filled = task_details['description'].format(topic=topic)
                expected_output_filled = task_details['expected_output'].format(topic=topic)

                chat_task = {
                    "recipient": agents[role],
                    "message": description_filled,
                    "summary_method": "last_msg", 
                    # Additional fields like carryover can be added based on dependencies
                }
                tasks.append(chat_task)
        response = user_proxy.initiate_chats(tasks)
        result = "### Output ###\n"+response[-1].summary if hasattr(response[-1], 'summary') else ""
        if agentops_exists:
            agentops.end_session("Success")
    else: # framework=crewai
        if agentops_exists:
            agentops.init(os.environ.get("AGENTOPS_API_KEY"), tags=["crewai"])

        tasks_dict = {}

        for role, details in config['roles'].items():
            role_filled = details['role'].format(topic=topic)
            goal_filled = details['goal'].format(topic=topic)
            backstory_filled = details['backstory'].format(topic=topic)

            # Adding tools to the agent if exists
            agent_tools = [tools_dict[tool] for tool in details.get('tools', []) if tool in tools_dict]

            llm_model = details.get('llm')  # Get the llm configuration
            if llm_model:
                llm = PraisonAIModel(
                    model=llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
                ).get_model()
            else:
                llm = PraisonAIModel().get_model()

            function_calling_llm_model = details.get('function_calling_llm')
            if function_calling_llm_model:
                function_calling_llm = PraisonAIModel(
                    model=function_calling_llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
                ).get_model()
            else:
                function_calling_llm = PraisonAIModel().get_model()

            agent = Agent(
                role=role_filled, 
                goal=goal_filled, 
                backstory=backstory_filled, 
                tools=agent_tools, 
                allow_delegation=details.get('allow_delegation', False),
                llm=llm,
                function_calling_llm=function_calling_llm,
                max_iter=details.get('max_iter', 15),
                max_rpm=details.get('max_rpm'),
                max_execution_time=details.get('max_execution_time'),
                verbose=details.get('verbose', True),
                cache=details.get('cache', True),
                system_template=details.get('system_template'),
                prompt_template=details.get('prompt_template'),
                response_template=details.get('response_template'),
            )

            # Set agent callback if provided
            if self.agent_callback:
                agent.step_callback = self.agent_callback

            agents[role] = agent

            for task_name, task_details in details.get('tasks', {}).items():
                description_filled = task_details['description'].format(topic=topic)
                expected_output_filled = task_details['expected_output'].format(topic=topic)

                task = Task(
                    description=description_filled,  # Clear, concise statement of what the task entails
                    expected_output=expected_output_filled,  # Detailed description of what task's completion looks like
                    agent=agent,  # The agent responsible for the task
                    tools=task_details.get('tools', []),  # Functions or capabilities the agent can utilize
                    async_execution=task_details.get('async_execution') if task_details.get('async_execution') is not None else False,  # Execute asynchronously if set
                    context=[], ## TODO: 
                    config=task_details.get('config') if task_details.get('config') is not None else {},  # Additional configuration details
                    output_json=task_details.get('output_json') if task_details.get('output_json') is not None else None,  # Outputs a JSON object
                    output_pydantic=task_details.get('output_pydantic') if task_details.get('output_pydantic') is not None else None,  # Outputs a Pydantic model object
                    output_file=task_details.get('output_file') if task_details.get('output_file') is not None else "",  # Saves the task output to a file
                    callback=task_details.get('callback') if task_details.get('callback') is not None else None,  # Python callable executed with the task's output
                    human_input=task_details.get('human_input') if task_details.get('human_input') is not None else False,  # Indicates if the task requires human feedback
                    create_directory=task_details.get('create_directory') if task_details.get('create_directory') is not None else False  # Indicates if a directory needs to be created
                )

                # Set tool callback if provided
                if self.task_callback:
                    task.callback = self.task_callback

                tasks.append(task)
                tasks_dict[task_name] = task

        for role, details in config['roles'].items():
            for task_name, task_details in details.get('tasks', {}).items():
                task = tasks_dict[task_name]
                context_tasks = [tasks_dict[ctx] for ctx in task_details.get('context', []) if ctx in tasks_dict]
                task.context = context_tasks

        crew = Crew(
            agents=list(agents.values()),
            tasks=tasks,
            verbose=2
        )

        self.logger.debug("Final Crew Configuration:")
        self.logger.debug(f"Agents: {crew.agents}")
        self.logger.debug(f"Tasks: {crew.tasks}")

        response = crew.kickoff()
        result = f"### Task Output ###\n{response}"
        if agentops_exists:
            agentops.end_session("Success")
    return result

is_function_or_decorated(obj)

Checks if the given object is a function or has a call method.

Parameters:

Name Type Description Default
obj object

The object to be checked.

required

Returns:

Name Type Description
bool

True if the object is a function or has a call method, False otherwise.

Source code in praisonai/agents_generator.py
86
87
88
89
90
91
92
93
94
95
96
def is_function_or_decorated(self, obj):
    """
    Checks if the given object is a function or has a __call__ method.

    Parameters:
        obj (object): The object to be checked.

    Returns:
        bool: True if the object is a function or has a __call__ method, False otherwise.
    """
    return inspect.isfunction(obj) or hasattr(obj, '__call__')

load_tools_from_module(module_path)

Loads tools from a specified module path.

Parameters:

Name Type Description Default
module_path str

The path to the module containing the tools.

required

Returns:

Name Type Description
dict

A dictionary containing the names of the tools as keys and the corresponding functions or objects as values.

Raises:

Type Description
FileNotFoundError

If the specified module path does not exist.

Source code in praisonai/agents_generator.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def load_tools_from_module(self, module_path):
    """
    Loads tools from a specified module path.

    Parameters:
        module_path (str): The path to the module containing the tools.

    Returns:
        dict: A dictionary containing the names of the tools as keys and the corresponding functions or objects as values.

    Raises:
        FileNotFoundError: If the specified module path does not exist.
    """
    spec = importlib.util.spec_from_file_location("tools_module", module_path)
    module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(module)
    return {name: obj for name, obj in inspect.getmembers(module, self.is_function_or_decorated)}

load_tools_from_module_class(module_path)

Loads tools from a specified module path containing classes that inherit from BaseTool or are part of langchain_community.tools package.

Parameters:

Name Type Description Default
module_path str

The path to the module containing the tools.

required

Returns:

Name Type Description
dict

A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.

Raises:

Type Description
FileNotFoundError

If the specified module path does not exist.

Source code in praisonai/agents_generator.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def load_tools_from_module_class(self, module_path):
    """
    Loads tools from a specified module path containing classes that inherit from BaseTool or are part of langchain_community.tools package.

    Parameters:
        module_path (str): The path to the module containing the tools.

    Returns:
        dict: A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.

    Raises:
        FileNotFoundError: If the specified module path does not exist.
    """
    spec = importlib.util.spec_from_file_location("tools_module", module_path)
    module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(module)
    return {name: obj() for name, obj in inspect.getmembers(module, lambda x: inspect.isclass(x) and (x.__module__.startswith('langchain_community.tools') or issubclass(x, BaseTool)) and x is not BaseTool)}

load_tools_from_package(package_path)

Loads tools from a specified package path containing modules with functions or classes.

Parameters:

Name Type Description Default
package_path str

The path to the package containing the tools.

required

Returns:

Name Type Description
dict

A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.

Raises:

Type Description
FileNotFoundError

If the specified package path does not exist.

This function iterates through all the .py files in the specified package path, excluding those that start with "__". For each file, it imports the corresponding module and checks if it contains any functions or classes that can be loaded as tools. The function then returns a dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.

Source code in praisonai/agents_generator.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
def load_tools_from_package(self, package_path):
    """
    Loads tools from a specified package path containing modules with functions or classes.

    Parameters:
        package_path (str): The path to the package containing the tools.

    Returns:
        dict: A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.

    Raises:
        FileNotFoundError: If the specified package path does not exist.

    This function iterates through all the .py files in the specified package path, excluding those that start with "__". For each file, it imports the corresponding module and checks if it contains any functions or classes that can be loaded as tools. The function then returns a dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.
    """
    tools_dict = {}
    for module_file in os.listdir(package_path):
        if module_file.endswith('.py') and not module_file.startswith('__'):
            module_name = f"{package_path.name}.{module_file[:-3]}"  # Remove .py for import
            module = importlib.import_module(module_name)
            for name, obj in inspect.getmembers(module, self.is_function_or_decorated):
                tools_dict[name] = obj
    return tools_dict

PraisonAI

Source code in praisonai/cli.py
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
class PraisonAI:
    def __init__(self, agent_file="agents.yaml", framework="", auto=False, init=False, agent_yaml=None):
        """
        Initialize the PraisonAI object with default parameters.

        Parameters:
            agent_file (str): The default agent file to use. Defaults to "agents.yaml".
            framework (str): The default framework to use. Defaults to "crewai".
            auto (bool): A flag indicating whether to enable auto mode. Defaults to False.
            init (bool): A flag indicating whether to enable initialization mode. Defaults to False.

        Attributes:
            config_list (list): A list of configuration dictionaries for the OpenAI API.
            agent_file (str): The agent file to use.
            framework (str): The framework to use.
            auto (bool): A flag indicating whether to enable auto mode.
            init (bool): A flag indicating whether to enable initialization mode.
            agent_yaml (str, optional): The content of the YAML file. Defaults to None.
        """
        self.agent_yaml = agent_yaml
        self.config_list = [
            {
                'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
                'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
                'api_key': os.environ.get("OPENAI_API_KEY")
            }
        ]
        self.agent_file = agent_file
        self.framework = framework
        self.auto = auto
        self.init = init

    def run(self):
        """
        Run the PraisonAI application.
        """
        self.main()

    def main(self):
        """
        The main function of the PraisonAI object. It parses the command-line arguments,
        initializes the necessary attributes, and then calls the appropriate methods based on the
        provided arguments.

        Args:
            self (PraisonAI): An instance of the PraisonAI class.

        Returns:
            Any: Depending on the arguments provided, the function may return a result from the
            AgentsGenerator, a deployment result from the CloudDeployer, or a message indicating
            the successful creation of a file.
        """
        args = self.parse_args()
        if args is None:
            agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list)
            result = agents_generator.generate_crew_and_kickoff()
            return result
        if args.deploy:
            from .deploy import CloudDeployer
            deployer = CloudDeployer()
            deployer.run_commands()
            return

        if getattr(args, 'chat', False):
            self.create_chainlit_chat_interface()
            return

        if getattr(args, 'code', False):
            self.create_code_interface()
            return

        if args.agent_file == 'train':
            package_root = os.path.dirname(os.path.abspath(__file__))
            config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')

            # Create config.yaml only if it doesn't exist or --model or --dataset is provided
            if not os.path.exists(config_yaml_destination) or args.model or args.dataset:
                config = generate_config(
                    model_name=args.model,
                    hf_model_name=args.hf,
                    ollama_model_name=args.ollama,
                    dataset=[{
                        "name": args.dataset
                    }]
                )
                with open('config.yaml', 'w') as f:
                    yaml.dump(config, f, default_flow_style=False, indent=2) 

            # Overwrite huggingface_save and ollama_save if --hf or --ollama are provided 
            if args.hf:
                config["huggingface_save"] = "true"
            if args.ollama:
                config["ollama_save"] = "true"

            if 'init' in sys.argv:
                from praisonai.setup.setup_conda_env import main as setup_conda_main
                setup_conda_main()
                print("All packages installed")
                return

            try:
                result = subprocess.check_output(['conda', 'env', 'list'])
                if 'praison_env' in result.decode('utf-8'):
                    print("Conda environment 'praison_env' found.")
                else:
                    raise subprocess.CalledProcessError(1, 'grep')
            except subprocess.CalledProcessError:
                print("Conda environment 'praison_env' not found. Setting it up...")
                from praisonai.setup.setup_conda_env import main as setup_conda_main
                setup_conda_main()
                print("All packages installed.")

            train_args = sys.argv[2:]  # Get all arguments after 'train'
            train_script_path = os.path.join(package_root, 'train.py')

            # Set environment variables
            env = os.environ.copy()
            env['PYTHONUNBUFFERED'] = '1'

            stream_subprocess(['conda', 'run', '--no-capture-output', '--name', 'praison_env', 'python', '-u', train_script_path, 'train'], env=env)
            return

        invocation_cmd = "praisonai"
        version_string = f"PraisonAI version {__version__}"

        self.framework = args.framework or self.framework 

        if args.agent_file:
            if args.agent_file.startswith("tests.test"): # Argument used for testing purposes. eg: python -m unittest tests.test 
                print("test")
            else:
                self.agent_file = args.agent_file


        if args.auto or args.init:
            temp_topic = ' '.join(args.auto) if args.auto else ' '.join(args.init)
            self.topic = temp_topic
        elif self.auto or self.init:  # Use the auto attribute if args.auto is not provided
            self.topic = self.auto

        if args.auto or self.auto:
            self.agent_file = "test.yaml"
            generator = AutoGenerator(topic=self.topic , framework=self.framework, agent_file=self.agent_file)
            self.agent_file = generator.generate()
            agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list)
            result = agents_generator.generate_crew_and_kickoff()
            return result
        elif args.init or self.init:
            self.agent_file = "agents.yaml"
            generator = AutoGenerator(topic=self.topic , framework=self.framework, agent_file=self.agent_file)
            self.agent_file = generator.generate()
            print("File {} created successfully".format(self.agent_file))
            return "File {} created successfully".format(self.agent_file)

        if args.ui:
            if args.ui == "gradio":
                self.create_gradio_interface()
            elif args.ui == "chainlit":
                self.create_chainlit_interface()
            else:
                # Modify below code to allow default ui
                agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list, agent_yaml=self.agent_yaml)
                result = agents_generator.generate_crew_and_kickoff()
                return result
        else:
            agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list, agent_yaml=self.agent_yaml)
            result = agents_generator.generate_crew_and_kickoff()
            return result

    def parse_args(self):
        """
        Parse the command-line arguments for the PraisonAI CLI.

        Args:
            self (PraisonAI): An instance of the PraisonAI class.

        Returns:
            argparse.Namespace: An object containing the parsed command-line arguments.

        Raises:
            argparse.ArgumentError: If the arguments provided are invalid.

        Example:
            >>> args = praison_ai.parse_args()
            >>> print(args.agent_file)  # Output: 'agents.yaml'
        """
        parser = argparse.ArgumentParser(prog="praisonai", description="praisonAI command-line interface")
        parser.add_argument("--framework", choices=["crewai", "autogen"], help="Specify the framework")
        parser.add_argument("--ui", choices=["chainlit", "gradio"], help="Specify the UI framework (gradio or chainlit).")
        parser.add_argument("--auto", nargs=argparse.REMAINDER, help="Enable auto mode and pass arguments for it")
        parser.add_argument("--init", nargs=argparse.REMAINDER, help="Enable auto mode and pass arguments for it")
        parser.add_argument("agent_file", nargs="?", help="Specify the agent file")
        parser.add_argument("--deploy", action="store_true", help="Deploy the application") 
        parser.add_argument("--model", type=str, help="Model name")
        parser.add_argument("--hf", type=str, help="Hugging Face model name")
        parser.add_argument("--ollama", type=str, help="Ollama model name")
        parser.add_argument("--dataset", type=str, help="Dataset name for training", default="yahma/alpaca-cleaned")
        args, unknown_args = parser.parse_known_args()

        if unknown_args and unknown_args[0] == '-b' and unknown_args[1] == 'api:app':
            args.agent_file = 'agents.yaml'
        if args.agent_file == 'api:app' or args.agent_file == '/app/api:app':
            args.agent_file = 'agents.yaml'
        if args.agent_file == 'ui':
            args.ui = 'chainlit'
        if args.agent_file == 'chat':
            args.ui = 'chainlit'
            args.chat = True
        if args.agent_file == 'code':
            args.ui = 'chainlit'
            args.code = True

        return args

    def create_chainlit_chat_interface(self):
        """
        Create a Chainlit interface for the chat application.

        This function sets up a Chainlit application that listens for messages.
        When a message is received, it runs PraisonAI with the provided message as the topic.
        The generated agents are then used to perform tasks.

        Returns:
            None: This function does not return any value. It starts the Chainlit application.
        """
        if CHAINLIT_AVAILABLE:
            import praisonai
            os.environ["CHAINLIT_PORT"] = "8084"
            root_path = os.path.join(os.path.expanduser("~"), ".praison")
            os.environ["CHAINLIT_APP_ROOT"] = root_path
            public_folder = os.path.join(os.path.dirname(praisonai.__file__), 'public')
            if not os.path.exists(os.path.join(root_path, "public")):  # Check if the folder exists in the current directory
                if os.path.exists(public_folder):
                    shutil.copytree(public_folder, os.path.join(root_path, "public"), dirs_exist_ok=True)
                    logging.info("Public folder copied successfully!")
                else:
                    logging.info("Public folder not found in the package.")
            else:
                logging.info("Public folder already exists.")
            chat_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'chat.py')
            chainlit_run([chat_ui_path])
        else:
            print("ERROR: Chat UI is not installed. Please install it with 'pip install \"praisonai\[chat]\"' to use the chat UI.")

    def create_code_interface(self):
        """
        Create a Chainlit interface for the code application.

        This function sets up a Chainlit application that listens for messages.
        When a message is received, it runs PraisonAI with the provided message as the topic.
        The generated agents are then used to perform tasks.

        Returns:
            None: This function does not return any value. It starts the Chainlit application.
        """
        if CHAINLIT_AVAILABLE:
            import praisonai
            os.environ["CHAINLIT_PORT"] = "8086"
            root_path = os.path.join(os.path.expanduser("~"), ".praison")
            os.environ["CHAINLIT_APP_ROOT"] = root_path
            public_folder = os.path.join(os.path.dirname(__file__), 'public')
            if not os.path.exists(os.path.join(root_path, "public")):  # Check if the folder exists in the current directory
                if os.path.exists(public_folder):
                    shutil.copytree(public_folder, os.path.join(root_path, "public"), dirs_exist_ok=True)
                    logging.info("Public folder copied successfully!")
                else:
                    logging.info("Public folder not found in the package.")
            else:
                logging.info("Public folder already exists.")
            code_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'code.py')
            chainlit_run([code_ui_path])
        else:
            print("ERROR: Code UI is not installed. Please install it with 'pip install \"praisonai\[code]\"' to use the code UI.")

    def create_gradio_interface(self):
        """
        Create a Gradio interface for generating agents and performing tasks.

        Args:
            self (PraisonAI): An instance of the PraisonAI class.

        Returns:
            None: This method does not return any value. It launches the Gradio interface.

        Raises:
            None: This method does not raise any exceptions.

        Example:
            >>> praison_ai.create_gradio_interface()
        """
        if GRADIO_AVAILABLE:
            def generate_crew_and_kickoff_interface(auto_args, framework):
                """
                Generate a crew and kick off tasks based on the provided auto arguments and framework.

                Args:
                    auto_args (list): Topic.
                    framework (str): The framework to use for generating agents.

                Returns:
                    str: A string representing the result of generating the crew and kicking off tasks.

                Raises:
                    None: This method does not raise any exceptions.

                Example:
                    >>> result = generate_crew_and_kickoff_interface("Create a movie about Cat in Mars", "crewai")
                    >>> print(result)
                """
                self.framework = framework
                self.agent_file = "test.yaml"
                generator = AutoGenerator(topic=auto_args , framework=self.framework)
                self.agent_file = generator.generate()
                agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list)
                result = agents_generator.generate_crew_and_kickoff()
                return result

            gr.Interface(
                fn=generate_crew_and_kickoff_interface,
                inputs=[gr.Textbox(lines=2, label="Auto Args"), gr.Dropdown(choices=["crewai", "autogen"], label="Framework")],
                outputs="textbox",
                title="Praison AI Studio",
                description="Create Agents and perform tasks",
                theme="default"
            ).launch()
        else:
            print("ERROR: Gradio is not installed. Please install it with 'pip install gradio' to use this feature.") 

    def create_chainlit_interface(self):
        """
        Create a Chainlit interface for generating agents and performing tasks.

        This function sets up a Chainlit application that listens for messages.
        When a message is received, it runs PraisonAI with the provided message as the topic.
        The generated agents are then used to perform tasks.

        Returns:
            None: This function does not return any value. It starts the Chainlit application.
        """
        if CHAINLIT_AVAILABLE:
            import praisonai
            os.environ["CHAINLIT_PORT"] = "8082"
            # Get the path to the 'public' folder within the package
            public_folder = os.path.join(os.path.dirname(praisonai.__file__), 'public')
            if not os.path.exists("public"):  # Check if the folder exists in the current directory
                if os.path.exists(public_folder):
                    shutil.copytree(public_folder, 'public', dirs_exist_ok=True)
                    logging.info("Public folder copied successfully!")
                else:
                    logging.info("Public folder not found in the package.")
            else:
                logging.info("Public folder already exists.")
            chainlit_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'chainlit_ui.py')
            chainlit_run([chainlit_ui_path])
        else:
            print("ERROR: Chainlit is not installed. Please install it with 'pip install \"praisonai\[ui]\"' to use the UI.")        

__init__(agent_file='agents.yaml', framework='', auto=False, init=False, agent_yaml=None)

Initialize the PraisonAI object with default parameters.

Parameters:

Name Type Description Default
agent_file str

The default agent file to use. Defaults to "agents.yaml".

'agents.yaml'
framework str

The default framework to use. Defaults to "crewai".

''
auto bool

A flag indicating whether to enable auto mode. Defaults to False.

False
init bool

A flag indicating whether to enable initialization mode. Defaults to False.

False

Attributes:

Name Type Description
config_list list

A list of configuration dictionaries for the OpenAI API.

agent_file str

The agent file to use.

framework str

The framework to use.

auto bool

A flag indicating whether to enable auto mode.

init bool

A flag indicating whether to enable initialization mode.

agent_yaml str

The content of the YAML file. Defaults to None.

Source code in praisonai/cli.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def __init__(self, agent_file="agents.yaml", framework="", auto=False, init=False, agent_yaml=None):
    """
    Initialize the PraisonAI object with default parameters.

    Parameters:
        agent_file (str): The default agent file to use. Defaults to "agents.yaml".
        framework (str): The default framework to use. Defaults to "crewai".
        auto (bool): A flag indicating whether to enable auto mode. Defaults to False.
        init (bool): A flag indicating whether to enable initialization mode. Defaults to False.

    Attributes:
        config_list (list): A list of configuration dictionaries for the OpenAI API.
        agent_file (str): The agent file to use.
        framework (str): The framework to use.
        auto (bool): A flag indicating whether to enable auto mode.
        init (bool): A flag indicating whether to enable initialization mode.
        agent_yaml (str, optional): The content of the YAML file. Defaults to None.
    """
    self.agent_yaml = agent_yaml
    self.config_list = [
        {
            'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
            'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
            'api_key': os.environ.get("OPENAI_API_KEY")
        }
    ]
    self.agent_file = agent_file
    self.framework = framework
    self.auto = auto
    self.init = init

create_chainlit_chat_interface()

Create a Chainlit interface for the chat application.

This function sets up a Chainlit application that listens for messages. When a message is received, it runs PraisonAI with the provided message as the topic. The generated agents are then used to perform tasks.

Returns:

Name Type Description
None

This function does not return any value. It starts the Chainlit application.

Source code in praisonai/cli.py
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
def create_chainlit_chat_interface(self):
    """
    Create a Chainlit interface for the chat application.

    This function sets up a Chainlit application that listens for messages.
    When a message is received, it runs PraisonAI with the provided message as the topic.
    The generated agents are then used to perform tasks.

    Returns:
        None: This function does not return any value. It starts the Chainlit application.
    """
    if CHAINLIT_AVAILABLE:
        import praisonai
        os.environ["CHAINLIT_PORT"] = "8084"
        root_path = os.path.join(os.path.expanduser("~"), ".praison")
        os.environ["CHAINLIT_APP_ROOT"] = root_path
        public_folder = os.path.join(os.path.dirname(praisonai.__file__), 'public')
        if not os.path.exists(os.path.join(root_path, "public")):  # Check if the folder exists in the current directory
            if os.path.exists(public_folder):
                shutil.copytree(public_folder, os.path.join(root_path, "public"), dirs_exist_ok=True)
                logging.info("Public folder copied successfully!")
            else:
                logging.info("Public folder not found in the package.")
        else:
            logging.info("Public folder already exists.")
        chat_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'chat.py')
        chainlit_run([chat_ui_path])
    else:
        print("ERROR: Chat UI is not installed. Please install it with 'pip install \"praisonai\[chat]\"' to use the chat UI.")

create_chainlit_interface()

Create a Chainlit interface for generating agents and performing tasks.

This function sets up a Chainlit application that listens for messages. When a message is received, it runs PraisonAI with the provided message as the topic. The generated agents are then used to perform tasks.

Returns:

Name Type Description
None

This function does not return any value. It starts the Chainlit application.

Source code in praisonai/cli.py
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
def create_chainlit_interface(self):
    """
    Create a Chainlit interface for generating agents and performing tasks.

    This function sets up a Chainlit application that listens for messages.
    When a message is received, it runs PraisonAI with the provided message as the topic.
    The generated agents are then used to perform tasks.

    Returns:
        None: This function does not return any value. It starts the Chainlit application.
    """
    if CHAINLIT_AVAILABLE:
        import praisonai
        os.environ["CHAINLIT_PORT"] = "8082"
        # Get the path to the 'public' folder within the package
        public_folder = os.path.join(os.path.dirname(praisonai.__file__), 'public')
        if not os.path.exists("public"):  # Check if the folder exists in the current directory
            if os.path.exists(public_folder):
                shutil.copytree(public_folder, 'public', dirs_exist_ok=True)
                logging.info("Public folder copied successfully!")
            else:
                logging.info("Public folder not found in the package.")
        else:
            logging.info("Public folder already exists.")
        chainlit_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'chainlit_ui.py')
        chainlit_run([chainlit_ui_path])
    else:
        print("ERROR: Chainlit is not installed. Please install it with 'pip install \"praisonai\[ui]\"' to use the UI.")        

create_code_interface()

Create a Chainlit interface for the code application.

This function sets up a Chainlit application that listens for messages. When a message is received, it runs PraisonAI with the provided message as the topic. The generated agents are then used to perform tasks.

Returns:

Name Type Description
None

This function does not return any value. It starts the Chainlit application.

Source code in praisonai/cli.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
def create_code_interface(self):
    """
    Create a Chainlit interface for the code application.

    This function sets up a Chainlit application that listens for messages.
    When a message is received, it runs PraisonAI with the provided message as the topic.
    The generated agents are then used to perform tasks.

    Returns:
        None: This function does not return any value. It starts the Chainlit application.
    """
    if CHAINLIT_AVAILABLE:
        import praisonai
        os.environ["CHAINLIT_PORT"] = "8086"
        root_path = os.path.join(os.path.expanduser("~"), ".praison")
        os.environ["CHAINLIT_APP_ROOT"] = root_path
        public_folder = os.path.join(os.path.dirname(__file__), 'public')
        if not os.path.exists(os.path.join(root_path, "public")):  # Check if the folder exists in the current directory
            if os.path.exists(public_folder):
                shutil.copytree(public_folder, os.path.join(root_path, "public"), dirs_exist_ok=True)
                logging.info("Public folder copied successfully!")
            else:
                logging.info("Public folder not found in the package.")
        else:
            logging.info("Public folder already exists.")
        code_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'code.py')
        chainlit_run([code_ui_path])
    else:
        print("ERROR: Code UI is not installed. Please install it with 'pip install \"praisonai\[code]\"' to use the code UI.")

create_gradio_interface()

Create a Gradio interface for generating agents and performing tasks.

Parameters:

Name Type Description Default
self PraisonAI

An instance of the PraisonAI class.

required

Returns:

Name Type Description
None

This method does not return any value. It launches the Gradio interface.

Raises:

Type Description
None

This method does not raise any exceptions.

Example

praison_ai.create_gradio_interface()

Source code in praisonai/cli.py
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
def create_gradio_interface(self):
    """
    Create a Gradio interface for generating agents and performing tasks.

    Args:
        self (PraisonAI): An instance of the PraisonAI class.

    Returns:
        None: This method does not return any value. It launches the Gradio interface.

    Raises:
        None: This method does not raise any exceptions.

    Example:
        >>> praison_ai.create_gradio_interface()
    """
    if GRADIO_AVAILABLE:
        def generate_crew_and_kickoff_interface(auto_args, framework):
            """
            Generate a crew and kick off tasks based on the provided auto arguments and framework.

            Args:
                auto_args (list): Topic.
                framework (str): The framework to use for generating agents.

            Returns:
                str: A string representing the result of generating the crew and kicking off tasks.

            Raises:
                None: This method does not raise any exceptions.

            Example:
                >>> result = generate_crew_and_kickoff_interface("Create a movie about Cat in Mars", "crewai")
                >>> print(result)
            """
            self.framework = framework
            self.agent_file = "test.yaml"
            generator = AutoGenerator(topic=auto_args , framework=self.framework)
            self.agent_file = generator.generate()
            agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list)
            result = agents_generator.generate_crew_and_kickoff()
            return result

        gr.Interface(
            fn=generate_crew_and_kickoff_interface,
            inputs=[gr.Textbox(lines=2, label="Auto Args"), gr.Dropdown(choices=["crewai", "autogen"], label="Framework")],
            outputs="textbox",
            title="Praison AI Studio",
            description="Create Agents and perform tasks",
            theme="default"
        ).launch()
    else:
        print("ERROR: Gradio is not installed. Please install it with 'pip install gradio' to use this feature.") 

main()

The main function of the PraisonAI object. It parses the command-line arguments, initializes the necessary attributes, and then calls the appropriate methods based on the provided arguments.

Parameters:

Name Type Description Default
self PraisonAI

An instance of the PraisonAI class.

required

Returns:

Name Type Description
Any

Depending on the arguments provided, the function may return a result from the

AgentsGenerator, a deployment result from the CloudDeployer, or a message indicating

the successful creation of a file.

Source code in praisonai/cli.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
def main(self):
    """
    The main function of the PraisonAI object. It parses the command-line arguments,
    initializes the necessary attributes, and then calls the appropriate methods based on the
    provided arguments.

    Args:
        self (PraisonAI): An instance of the PraisonAI class.

    Returns:
        Any: Depending on the arguments provided, the function may return a result from the
        AgentsGenerator, a deployment result from the CloudDeployer, or a message indicating
        the successful creation of a file.
    """
    args = self.parse_args()
    if args is None:
        agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list)
        result = agents_generator.generate_crew_and_kickoff()
        return result
    if args.deploy:
        from .deploy import CloudDeployer
        deployer = CloudDeployer()
        deployer.run_commands()
        return

    if getattr(args, 'chat', False):
        self.create_chainlit_chat_interface()
        return

    if getattr(args, 'code', False):
        self.create_code_interface()
        return

    if args.agent_file == 'train':
        package_root = os.path.dirname(os.path.abspath(__file__))
        config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')

        # Create config.yaml only if it doesn't exist or --model or --dataset is provided
        if not os.path.exists(config_yaml_destination) or args.model or args.dataset:
            config = generate_config(
                model_name=args.model,
                hf_model_name=args.hf,
                ollama_model_name=args.ollama,
                dataset=[{
                    "name": args.dataset
                }]
            )
            with open('config.yaml', 'w') as f:
                yaml.dump(config, f, default_flow_style=False, indent=2) 

        # Overwrite huggingface_save and ollama_save if --hf or --ollama are provided 
        if args.hf:
            config["huggingface_save"] = "true"
        if args.ollama:
            config["ollama_save"] = "true"

        if 'init' in sys.argv:
            from praisonai.setup.setup_conda_env import main as setup_conda_main
            setup_conda_main()
            print("All packages installed")
            return

        try:
            result = subprocess.check_output(['conda', 'env', 'list'])
            if 'praison_env' in result.decode('utf-8'):
                print("Conda environment 'praison_env' found.")
            else:
                raise subprocess.CalledProcessError(1, 'grep')
        except subprocess.CalledProcessError:
            print("Conda environment 'praison_env' not found. Setting it up...")
            from praisonai.setup.setup_conda_env import main as setup_conda_main
            setup_conda_main()
            print("All packages installed.")

        train_args = sys.argv[2:]  # Get all arguments after 'train'
        train_script_path = os.path.join(package_root, 'train.py')

        # Set environment variables
        env = os.environ.copy()
        env['PYTHONUNBUFFERED'] = '1'

        stream_subprocess(['conda', 'run', '--no-capture-output', '--name', 'praison_env', 'python', '-u', train_script_path, 'train'], env=env)
        return

    invocation_cmd = "praisonai"
    version_string = f"PraisonAI version {__version__}"

    self.framework = args.framework or self.framework 

    if args.agent_file:
        if args.agent_file.startswith("tests.test"): # Argument used for testing purposes. eg: python -m unittest tests.test 
            print("test")
        else:
            self.agent_file = args.agent_file


    if args.auto or args.init:
        temp_topic = ' '.join(args.auto) if args.auto else ' '.join(args.init)
        self.topic = temp_topic
    elif self.auto or self.init:  # Use the auto attribute if args.auto is not provided
        self.topic = self.auto

    if args.auto or self.auto:
        self.agent_file = "test.yaml"
        generator = AutoGenerator(topic=self.topic , framework=self.framework, agent_file=self.agent_file)
        self.agent_file = generator.generate()
        agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list)
        result = agents_generator.generate_crew_and_kickoff()
        return result
    elif args.init or self.init:
        self.agent_file = "agents.yaml"
        generator = AutoGenerator(topic=self.topic , framework=self.framework, agent_file=self.agent_file)
        self.agent_file = generator.generate()
        print("File {} created successfully".format(self.agent_file))
        return "File {} created successfully".format(self.agent_file)

    if args.ui:
        if args.ui == "gradio":
            self.create_gradio_interface()
        elif args.ui == "chainlit":
            self.create_chainlit_interface()
        else:
            # Modify below code to allow default ui
            agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list, agent_yaml=self.agent_yaml)
            result = agents_generator.generate_crew_and_kickoff()
            return result
    else:
        agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list, agent_yaml=self.agent_yaml)
        result = agents_generator.generate_crew_and_kickoff()
        return result

parse_args()

Parse the command-line arguments for the PraisonAI CLI.

Parameters:

Name Type Description Default
self PraisonAI

An instance of the PraisonAI class.

required

Returns:

Type Description

argparse.Namespace: An object containing the parsed command-line arguments.

Raises:

Type Description
ArgumentError

If the arguments provided are invalid.

Example

args = praison_ai.parse_args() print(args.agent_file) # Output: 'agents.yaml'

Source code in praisonai/cli.py
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
def parse_args(self):
    """
    Parse the command-line arguments for the PraisonAI CLI.

    Args:
        self (PraisonAI): An instance of the PraisonAI class.

    Returns:
        argparse.Namespace: An object containing the parsed command-line arguments.

    Raises:
        argparse.ArgumentError: If the arguments provided are invalid.

    Example:
        >>> args = praison_ai.parse_args()
        >>> print(args.agent_file)  # Output: 'agents.yaml'
    """
    parser = argparse.ArgumentParser(prog="praisonai", description="praisonAI command-line interface")
    parser.add_argument("--framework", choices=["crewai", "autogen"], help="Specify the framework")
    parser.add_argument("--ui", choices=["chainlit", "gradio"], help="Specify the UI framework (gradio or chainlit).")
    parser.add_argument("--auto", nargs=argparse.REMAINDER, help="Enable auto mode and pass arguments for it")
    parser.add_argument("--init", nargs=argparse.REMAINDER, help="Enable auto mode and pass arguments for it")
    parser.add_argument("agent_file", nargs="?", help="Specify the agent file")
    parser.add_argument("--deploy", action="store_true", help="Deploy the application") 
    parser.add_argument("--model", type=str, help="Model name")
    parser.add_argument("--hf", type=str, help="Hugging Face model name")
    parser.add_argument("--ollama", type=str, help="Ollama model name")
    parser.add_argument("--dataset", type=str, help="Dataset name for training", default="yahma/alpaca-cleaned")
    args, unknown_args = parser.parse_known_args()

    if unknown_args and unknown_args[0] == '-b' and unknown_args[1] == 'api:app':
        args.agent_file = 'agents.yaml'
    if args.agent_file == 'api:app' or args.agent_file == '/app/api:app':
        args.agent_file = 'agents.yaml'
    if args.agent_file == 'ui':
        args.ui = 'chainlit'
    if args.agent_file == 'chat':
        args.ui = 'chainlit'
        args.chat = True
    if args.agent_file == 'code':
        args.ui = 'chainlit'
        args.code = True

    return args

run()

Run the PraisonAI application.

Source code in praisonai/cli.py
94
95
96
97
98
def run(self):
    """
    Run the PraisonAI application.
    """
    self.main()

stream_subprocess(command, env=None)

Execute a subprocess command and stream the output to the terminal in real-time.

Parameters:

Name Type Description Default
command list

A list containing the command and its arguments.

required
env dict

Environment variables for the subprocess.

None
Source code in praisonai/cli.py
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def stream_subprocess(command, env=None):
    """
    Execute a subprocess command and stream the output to the terminal in real-time.

    Args:
        command (list): A list containing the command and its arguments.
        env (dict, optional): Environment variables for the subprocess.
    """
    process = subprocess.Popen(
        command,
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
        text=True,
        bufsize=1,
        universal_newlines=True,
        env=env
    )

    for line in iter(process.stdout.readline, ''):
        print(line, end='')
        sys.stdout.flush()  # Ensure output is flushed immediately

    process.stdout.close()
    return_code = process.wait()

    if return_code != 0:
        raise subprocess.CalledProcessError(return_code, command)

CloudDeployer

A class for deploying a cloud-based application.

Methods:

Name Description
__init__

Loads environment variables from .env file or system and sets them.

Source code in praisonai/deploy.py
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
class CloudDeployer:
    """
    A class for deploying a cloud-based application.

    Attributes:
        None

    Methods:
        __init__(self):
            Loads environment variables from .env file or system and sets them.

    """
    def __init__(self):
        """
        Loads environment variables from .env file or system and sets them.

        Parameters:
            self: An instance of the CloudDeployer class.

        Returns:
            None

        Raises:
            None

        """
        # Load environment variables from .env file or system
        load_dotenv()
        self.set_environment_variables()

    def create_dockerfile(self):
        """
        Creates a Dockerfile for the application.

        Parameters:
            self: An instance of the CloudDeployer class.

        Returns:
            None

        Raises:
            None

        This method creates a Dockerfile in the current directory with the specified content.
        The Dockerfile is used to build a Docker image for the application.
        The content of the Dockerfile includes instructions to use the Python 3.11-slim base image,
        set the working directory to /app, copy the current directory contents into the container,
        install the required Python packages (flask, praisonai, gunicorn, and markdown),
        expose port 8080, and run the application using Gunicorn.
        """
        with open("Dockerfile", "w") as file:
            file.write("FROM python:3.11-slim\n")
            file.write("WORKDIR /app\n")
            file.write("COPY . .\n")
            file.write("RUN pip install flask praisonai==0.0.73 gunicorn markdown\n")
            file.write("EXPOSE 8080\n")
            file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')

    def create_api_file(self):
        """
        Creates an API file for the application.

        Parameters:
            self (CloudDeployer): An instance of the CloudDeployer class.

        Returns:
            None

        This method creates an API file named "api.py" in the current directory. The file contains a basic Flask application that uses the PraisonAI library to run a simple agent and returns the output as an HTML page. The application listens on the root path ("/") and uses the Markdown library to format the output.
        """
        with open("api.py", "w") as file:
            file.write("from flask import Flask\n")
            file.write("from praisonai import PraisonAI\n")
            file.write("import markdown\n\n")
            file.write("app = Flask(__name__)\n\n")
            file.write("def basic():\n")
            file.write("    praisonai = PraisonAI(agent_file=\"agents.yaml\")\n")
            file.write("    return praisonai.run()\n\n")
            file.write("@app.route('/')\n")
            file.write("def home():\n")
            file.write("    output = basic()\n")
            file.write("    html_output = markdown.markdown(output)\n")
            file.write("    return f'<html><body>{html_output}</body></html>'\n\n")
            file.write("if __name__ == \"__main__\":\n")
            file.write("    app.run(debug=True)\n")

    def set_environment_variables(self):
        """Sets environment variables with fallback to .env values or defaults."""
        os.environ["OPENAI_MODEL_NAME"] = os.getenv("OPENAI_MODEL_NAME", "gpt-4o")
        os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "Enter your API key")
        os.environ["OPENAI_API_BASE"] = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")

    def run_commands(self):
        """
        Sets environment variables with fallback to .env values or defaults.

        Parameters:
            None

        Returns:
            None

        Raises:
            None

        This method sets environment variables for the application. It uses the `os.environ` dictionary to set the following environment variables:

        - `OPENAI_MODEL_NAME`: The name of the OpenAI model to use. If not specified in the .env file, it defaults to "gpt-4o".
        - `OPENAI_API_KEY`: The API key for accessing the OpenAI API. If not specified in the .env file, it defaults to "Enter your API key".
        - `OPENAI_API_BASE`: The base URL for the OpenAI API. If not specified in the .env file, it defaults to "https://api.openai.com/v1".
        """
        self.create_api_file()
        self.create_dockerfile()
        """Runs a sequence of shell commands for deployment, continues on error."""
        commands = [
            "yes | gcloud auth configure-docker us-central1-docker.pkg.dev",
            "gcloud artifacts repositories create praisonai-repository --repository-format=docker --location=us-central1",
            "docker build --platform linux/amd64 -t gcr.io/$(gcloud config get-value project)/praisonai-app:latest .",
            "docker tag gcr.io/$(gcloud config get-value project)/praisonai-app:latest us-central1-docker.pkg.dev/$(gcloud config get-value project)/praisonai-repository/praisonai-app:latest",
            "docker push us-central1-docker.pkg.dev/$(gcloud config get-value project)/praisonai-repository/praisonai-app:latest",
            "gcloud run deploy praisonai-service --image us-central1-docker.pkg.dev/$(gcloud config get-value project)/praisonai-repository/praisonai-app:latest --platform managed --region us-central1 --allow-unauthenticated --set-env-vars OPENAI_MODEL_NAME=${OPENAI_MODEL_NAME},OPENAI_API_KEY=${OPENAI_API_KEY},OPENAI_API_BASE=${OPENAI_API_BASE}"
        ]

        for cmd in commands:
            try:
                subprocess.run(cmd, shell=True, check=True)
            except subprocess.CalledProcessError as e:
                print(f"ERROR: Command '{e.cmd}' failed with exit status {e.returncode}")
                print(f"Continuing with the next command...")

__init__()

Loads environment variables from .env file or system and sets them.

Parameters:

Name Type Description Default
self

An instance of the CloudDeployer class.

required

Returns:

Type Description

None

Source code in praisonai/deploy.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
def __init__(self):
    """
    Loads environment variables from .env file or system and sets them.

    Parameters:
        self: An instance of the CloudDeployer class.

    Returns:
        None

    Raises:
        None

    """
    # Load environment variables from .env file or system
    load_dotenv()
    self.set_environment_variables()

create_api_file()

Creates an API file for the application.

Parameters:

Name Type Description Default
self CloudDeployer

An instance of the CloudDeployer class.

required

Returns:

Type Description

None

This method creates an API file named "api.py" in the current directory. The file contains a basic Flask application that uses the PraisonAI library to run a simple agent and returns the output as an HTML page. The application listens on the root path ("/") and uses the Markdown library to format the output.

Source code in praisonai/deploy.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def create_api_file(self):
    """
    Creates an API file for the application.

    Parameters:
        self (CloudDeployer): An instance of the CloudDeployer class.

    Returns:
        None

    This method creates an API file named "api.py" in the current directory. The file contains a basic Flask application that uses the PraisonAI library to run a simple agent and returns the output as an HTML page. The application listens on the root path ("/") and uses the Markdown library to format the output.
    """
    with open("api.py", "w") as file:
        file.write("from flask import Flask\n")
        file.write("from praisonai import PraisonAI\n")
        file.write("import markdown\n\n")
        file.write("app = Flask(__name__)\n\n")
        file.write("def basic():\n")
        file.write("    praisonai = PraisonAI(agent_file=\"agents.yaml\")\n")
        file.write("    return praisonai.run()\n\n")
        file.write("@app.route('/')\n")
        file.write("def home():\n")
        file.write("    output = basic()\n")
        file.write("    html_output = markdown.markdown(output)\n")
        file.write("    return f'<html><body>{html_output}</body></html>'\n\n")
        file.write("if __name__ == \"__main__\":\n")
        file.write("    app.run(debug=True)\n")

create_dockerfile()

Creates a Dockerfile for the application.

Parameters:

Name Type Description Default
self

An instance of the CloudDeployer class.

required

Returns:

Type Description

None

This method creates a Dockerfile in the current directory with the specified content. The Dockerfile is used to build a Docker image for the application. The content of the Dockerfile includes instructions to use the Python 3.11-slim base image, set the working directory to /app, copy the current directory contents into the container, install the required Python packages (flask, praisonai, gunicorn, and markdown), expose port 8080, and run the application using Gunicorn.

Source code in praisonai/deploy.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def create_dockerfile(self):
    """
    Creates a Dockerfile for the application.

    Parameters:
        self: An instance of the CloudDeployer class.

    Returns:
        None

    Raises:
        None

    This method creates a Dockerfile in the current directory with the specified content.
    The Dockerfile is used to build a Docker image for the application.
    The content of the Dockerfile includes instructions to use the Python 3.11-slim base image,
    set the working directory to /app, copy the current directory contents into the container,
    install the required Python packages (flask, praisonai, gunicorn, and markdown),
    expose port 8080, and run the application using Gunicorn.
    """
    with open("Dockerfile", "w") as file:
        file.write("FROM python:3.11-slim\n")
        file.write("WORKDIR /app\n")
        file.write("COPY . .\n")
        file.write("RUN pip install flask praisonai==0.0.73 gunicorn markdown\n")
        file.write("EXPOSE 8080\n")
        file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')

run_commands()

Sets environment variables with fallback to .env values or defaults.

Returns:

Type Description

None

This method sets environment variables for the application. It uses the os.environ dictionary to set the following environment variables:

  • OPENAI_MODEL_NAME: The name of the OpenAI model to use. If not specified in the .env file, it defaults to "gpt-4o".
  • OPENAI_API_KEY: The API key for accessing the OpenAI API. If not specified in the .env file, it defaults to "Enter your API key".
  • OPENAI_API_BASE: The base URL for the OpenAI API. If not specified in the .env file, it defaults to "https://api.openai.com/v1".
Source code in praisonai/deploy.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def run_commands(self):
    """
    Sets environment variables with fallback to .env values or defaults.

    Parameters:
        None

    Returns:
        None

    Raises:
        None

    This method sets environment variables for the application. It uses the `os.environ` dictionary to set the following environment variables:

    - `OPENAI_MODEL_NAME`: The name of the OpenAI model to use. If not specified in the .env file, it defaults to "gpt-4o".
    - `OPENAI_API_KEY`: The API key for accessing the OpenAI API. If not specified in the .env file, it defaults to "Enter your API key".
    - `OPENAI_API_BASE`: The base URL for the OpenAI API. If not specified in the .env file, it defaults to "https://api.openai.com/v1".
    """
    self.create_api_file()
    self.create_dockerfile()
    """Runs a sequence of shell commands for deployment, continues on error."""
    commands = [
        "yes | gcloud auth configure-docker us-central1-docker.pkg.dev",
        "gcloud artifacts repositories create praisonai-repository --repository-format=docker --location=us-central1",
        "docker build --platform linux/amd64 -t gcr.io/$(gcloud config get-value project)/praisonai-app:latest .",
        "docker tag gcr.io/$(gcloud config get-value project)/praisonai-app:latest us-central1-docker.pkg.dev/$(gcloud config get-value project)/praisonai-repository/praisonai-app:latest",
        "docker push us-central1-docker.pkg.dev/$(gcloud config get-value project)/praisonai-repository/praisonai-app:latest",
        "gcloud run deploy praisonai-service --image us-central1-docker.pkg.dev/$(gcloud config get-value project)/praisonai-repository/praisonai-app:latest --platform managed --region us-central1 --allow-unauthenticated --set-env-vars OPENAI_MODEL_NAME=${OPENAI_MODEL_NAME},OPENAI_API_KEY=${OPENAI_API_KEY},OPENAI_API_BASE=${OPENAI_API_BASE}"
    ]

    for cmd in commands:
        try:
            subprocess.run(cmd, shell=True, check=True)
        except subprocess.CalledProcessError as e:
            print(f"ERROR: Command '{e.cmd}' failed with exit status {e.returncode}")
            print(f"Continuing with the next command...")

set_environment_variables()

Sets environment variables with fallback to .env values or defaults.

Source code in praisonai/deploy.py
91
92
93
94
95
def set_environment_variables(self):
    """Sets environment variables with fallback to .env values or defaults."""
    os.environ["OPENAI_MODEL_NAME"] = os.getenv("OPENAI_MODEL_NAME", "gpt-4o")
    os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "Enter your API key")
    os.environ["OPENAI_API_BASE"] = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")