python
主页 > 脚本 > python >

Python实现Ollama的提示词生成与优化

2024-12-12 | 佚名 | 点击:

1. 基础环境配置

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

import requests

import json

from typing import List, Dict, Optional

from dataclasses import dataclass

 

@dataclass

class PromptContext:

    task: str

    domain: str

    requirements: List[str]

 

class OllamaService:

    def __init__(self, base_url: str = "http://localhost:11434"):

        self.base_url = base_url

        self.models = {

            'mistral': 'mistral',

            'llama2': 'llama2',

            'neural-chat': 'neural-chat'

        }

2. 核心功能实现

2.1 提示词生成服务

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

class PromptGenerationService:

    def __init__(self, model_name: str = 'mistral'):

        self.model_name = model_name

        self.api_url = "http://localhost:11434/api/generate"

 

    async def generate_prompt(self, context: PromptContext) -> str:

        prompt = f"""

        Task: Create a detailed prompt for the following context:

        - Task Type: {context.task}

        - Domain: {context.domain}

        - Requirements: {', '.join(context.requirements)}

         

        Generate a structured prompt that includes:

        1. Context setting

        2. Specific requirements

        3. Output format

        4. Constraints

        5. Examples (if applicable)

        """

 

        response = requests.post(

            self.api_url,

            json={

                "model": self.model_name,

                "prompt": prompt,

                "stream": False

            }

        )

         

        return response.json()["response"]

 

    async def optimize_prompt(self, original_prompt: str) -> Dict:

        prompt = f"""

        Analyze and optimize the following prompt:

        "{original_prompt}"

         

        Provide:

        1. Improved version

        2. Explanation of changes

        3. Potential variations

        """

 

        response = requests.post(

            self.api_url,

            json={

                "model": self.model_name,

                "prompt": prompt,

                "stream": False

            }

        )

         

        return response.json()["response"]

2.2 提示词模板管理

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

class PromptTemplates:

    @staticmethod

    def get_code_review_template(code: str) -> str:

        return f"""

        Analyze the following code:

        [code]

         

        Provide:

        1. Code quality assessment

        2. Potential improvements

        3. Security concerns

        4. Performance optimization

        """

 

    @staticmethod

    def get_documentation_template(component: str) -> str:

        return f"""

        Generate documentation for:

        {component}

         

        Include:

        1. Overview

        2. API reference

        3. Usage examples

        4. Best practices

        """

 

    @staticmethod

    def get_refactoring_template(code: str) -> str:

        return f"""

        Suggest refactoring for:

        [code]

         

        Consider:

        1. Design patterns

        2. Clean code principles

        3. Performance impact

        4. Maintainability

        """

3. 使用示例

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

async def main():

    # 初始化服务

    prompt_service = PromptGenerationService(model_name='mistral')

     

    # 代码生成提示词示例

    code_context = PromptContext(

        task='code_generation',

        domain='web_development',

        requirements=[

            'React component',

            'TypeScript',

            'Material UI',

            'Form handling'

        ]

    )

     

    code_prompt = await prompt_service.generate_prompt(code_context)

    print("代码生成提示词:", code_prompt)

     

    # 文档生成提示词示例

    doc_context = PromptContext(

        task='documentation',

        domain='API_reference',

        requirements=[

            'OpenAPI format',

            'Examples included',

            'Error handling',

            'Authentication details'

        ]

    )

     

    doc_prompt = await prompt_service.generate_prompt(doc_context)

    print("文档生成提示词:", doc_prompt)

 

    # 提示词优化示例

    original_prompt = "写一个React组件"

    optimized_prompt = await prompt_service.optimize_prompt(original_prompt)

    print("优化后的提示词:", optimized_prompt)

 

if __name__ == "__main__":

    import asyncio

    asyncio.run(main())

4. 工具类实现

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

class PromptUtils:

    @staticmethod

    def format_requirements(requirements: List[str]) -> str:

        return "\n".join([f"- {req}" for req in requirements])

 

    @staticmethod

    def validate_prompt(prompt: str) -> bool:

        # 简单的提示词验证

        return len(prompt.strip()) > 0

 

    @staticmethod

    def enhance_prompt(prompt: str) -> str:

        # 添加通用的提示词增强

        return f"""

        {prompt}

         

        Additional requirements:

        - Provide clear and detailed explanations

        - Include practical examples

        - Consider edge cases

        - Follow best practices

        """

5. 错误处理

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

class PromptGenerationError(Exception):

    pass

 

class ModelConnectionError(Exception):

    pass

 

def handle_api_errors(func):

    async def wrapper(*args, **kwargs):

        try:

            return await func(*args, **kwargs)

        except requests.exceptions.ConnectionError:

            raise ModelConnectionError("无法连接到Ollama服务")

        except Exception as e:

            raise PromptGenerationError(f"提示词生成错误: {str(e)}")

    return wrapper

6. 配置管理

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

class Config:

    MODELS = {

        'mistral': {

            'name': 'mistral',

            'description': '快速、轻量级提示词生成',

            'parameters': {

                'temperature': 0.7,

                'max_tokens': 2000

            }

        },

        'llama2': {

            'name': 'llama2',

            'description': '复杂、详细的提示词需求',

            'parameters': {

                'temperature': 0.8,

                'max_tokens': 4000

            }

        },

        'neural-chat': {

            'name': 'neural-chat',

            'description': '交互式提示词优化',

            'parameters': {

                'temperature': 0.9,

                'max_tokens': 3000

            }

        }

    }

使用这个Python实现,你可以:

主要优点:

这个实现可以作为一个基础框架,根据具体需求进行扩展和定制。

原文链接:
相关文章
最新更新