77
88import loguru
99import openai
10- import tiktoken
1110from langfuse .model import InitialGeneration , Usage
1211from openai import OpenAI
13- from tenacity import *
1412
1513from pentestgpt .utils .llm_api import LLMAPI
1614
@@ -61,12 +59,12 @@ def __init__(self, config_class, use_langfuse_logging=False):
6159 from langfuse import Langfuse
6260
6361 self .langfuse = Langfuse ()
64-
62+
6563 self .model = config_class .model
6664 self .log_dir = config_class .log_dir
6765 self .history_length = 5 # maintain 5 messages in the history. (5 chat memory)
6866 self .conversation_dict : Dict [str , Conversation ] = {}
69- self .error_waiting_time = 3 # wait for 3 seconds
67+ self .error_wait_time = config_class . error_wait_time
7068
7169 logger .add (sink = os .path .join (self .log_dir , "chatgpt.log" ), level = "WARNING" )
7270
@@ -77,7 +75,7 @@ def _chat_completion(
7775 # use model if provided, otherwise use self.model; if self.model is None, use gpt-4-1106-preview
7876 if model is None :
7977 if self .model is None :
80- model = "gpt-4-1106-preview "
78+ model = "gpt-4o-2024-05-13 "
8179 else :
8280 model = self .model
8381 try :
@@ -102,7 +100,7 @@ def _chat_completion(
102100 except openai ._exceptions .RateLimitError as e : # give one more try
103101 logger .warning ("Rate limit reached. Waiting for 5 seconds" )
104102 logger .error ("Rate Limit Error: " , e )
105- time .sleep (5 )
103+ time .sleep (self . error_wait_time )
106104 response = openai .ChatCompletion .create (
107105 model = model ,
108106 messages = history ,
@@ -129,7 +127,7 @@ def _chat_completion(
129127 if isinstance (response , tuple ):
130128 logger .warning ("Response is not valid. Waiting for 5 seconds" )
131129 try :
132- time .sleep (5 )
130+ time .sleep (self . error_wait_time )
133131 response = openai .ChatCompletion .create (
134132 model = model ,
135133 messages = history ,
@@ -165,12 +163,19 @@ def _chat_completion(
165163
166164
167165if __name__ == "__main__" :
168- from module_import import GPT4ConfigClass
166+ from module_import import GPT4O
169167
170- config_class = GPT4ConfigClass ()
171- config_class .log_dir = "logs"
172- chatgpt = ChatGPTAPI (config_class , use_langfuse_logging = True )
168+ local_config_class = GPT4O ()
169+ local_config_class .log_dir = "logs"
170+ chatgpt = ChatGPTAPI (local_config_class , use_langfuse_logging = True )
173171 # test is below
172+ # 0. A single test initialized with image.
173+ result , conversation_id = chatgpt .send_new_message (
174+ "What's in the image?" ,
175+ image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" ,
176+ )
177+ print ("Answer 1" )
178+ print (result )
174179 # 1. create a new conversation
175180 result , conversation_id = chatgpt .send_new_message (
176181 """You're an excellent cybersecurity penetration tester assistant.
@@ -203,3 +208,12 @@ def _chat_completion(
203208 )
204209 print ("Answer 2" )
205210 print (result )
211+
212+ # 3. send a image related conversation
213+ result = chatgpt .send_message (
214+ "What's in the image?" ,
215+ conversation_id ,
216+ image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" ,
217+ )
218+ print ("Answer 3" )
219+ print (result )
0 commit comments