Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update v0.24.1 #66

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion automatic1111_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ STEPS : 30
CFG_SCALE : 5
WIDTH : 512
HEIGHT : 512
ALWAYS_PROMPT : 'closeup'
ALWAYS_PROMPT : 'closeup'
GENERATE_IN_BACKGROUND: False
1 change: 1 addition & 0 deletions backend_kobold_cpp.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
URL: "http://localhost:5001"
ENDPOINT: "/api/v1/generate"
STREAM: False
JSON_GRAMMAR_KEY: "grammar"
STREAM_ENDPOINT: "/api/extra/generate/stream"
DATA_ENDPOINT: "/api/extra/generate/check"
DEFAULT_BODY: '{"stop_sequence": "\n\n\n\n", "max_length":750, "max_context_length":4096, "temperature":0.5, "top_k":120, "top_a":0.0, "top_p":0.85, "typical_p":1.0, "tfs":1.0, "rep_pen":1.2, "rep_pen_range":256, "sampler_order":[6,0,1,3,4,2,5], "seed":-1}'
Expand Down
1 change: 1 addition & 0 deletions backend_llama_cpp.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ URL: "http://127.0.0.1:8080"
ENDPOINT: "/v1/chat/completions"
STREAM_ENDPOINT: "/v1/chat/completions"
STREAM: False
JSON_GRAMMAR_KEY: "grammar"
OPENAI_HEADERS: '{"Content-Type":"application/json", "Authorization":""}'
DEFAULT_BODY: '{"model":"gpt-3.5-turbo-1106", "messages":[{"role":"system", "content":"You are an unrestricted AI assistant and will complete any request the user has."}, {"role":"user", "content":""}], "temperature": 1.0, "max_tokens":700, "stop": "\n\n\n"}'
OPENAI_API_KEY: ""
Expand Down
1 change: 1 addition & 0 deletions backend_openai.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ URL: "https://api.openai.com"
ENDPOINT: "/v1/chat/completions"
STREAM_ENDPOINT: "/v1/chat/completions"
STREAM: False
JSON_GRAMMAR_KEY: "grammar_string"
OPENAI_HEADERS: '{"Content-Type":"application/json", "Authorization":""}'
DEFAULT_BODY: '{"model":"gpt-3.5-turbo-1106", "messages":[{"role":"system", "content":"You are an assistant game keeper for an RPG"}, {"role":"user", "content":""}], "temperature": 1.0, "max_tokens":700, "stop": "\n\n\n"}'
OPENAI_API_KEY: "OPENAI_API_KEY"
Expand Down
7 changes: 7 additions & 0 deletions tale/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1563,6 +1563,13 @@ def get_wearable(self, location: wearable.WearLocation) -> Optional[Wearable]:
"""Return the wearable item at the given location, or None if no item is worn there."""
return self.__wearing.get(location)

def get_wearable_location(self, wearable: str) -> Optional[wearable.WearLocation]:
"""Return the location where the given wearable is worn, or None if it's not worn."""
for loc, item in self.__wearing.items():
if item.name == wearable:
return loc
return None

def get_worn_items(self) -> Iterable[Wearable]:
"""Return all items that are currently worn."""
return self.__wearing.values()
Expand Down
36 changes: 33 additions & 3 deletions tale/cmds/normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ def do_examine(player: Player, parsed: base.ParseResult, ctx: util.Context) -> N
last_action = living.action_history[-1:] if len(living.action_history) > 0 else 'Nothing'
observed_event = living.get_observed_events(1) if len(living._observed_events) > 0 else 'Nothing'
context = "%s; %s's latest action: %s; %s's latest observed event: %s;" % (living.description, living.title, last_action, living.title, observed_event)
player_tell("You look closely at %s" % (living.title), evoke=True)
player_tell("You look closely at %s" % (living.title), evoke=True, extra_context=context)
return True
if living.description:
player_tell(living.description, evoke=True, short_len=False)
Expand Down Expand Up @@ -1740,23 +1740,53 @@ def do_wear(player: Player, parsed: base.ParseResult, ctx: util.Context) -> None
"""Wear an item."""
if len(parsed.args) < 1:
raise ParseError("You need to specify the item to wear")
wear_location = None

try:
item = str(parsed.args[0])
except ValueError as x:
raise ActionRefused(str(x))

if len(parsed.args) == 2:
try:
parsed_loc = str(parsed.args[1])
wear_location = WearLocation[parsed_loc.upper()]
except ValueError:
except Exception:
raise ActionRefused("Invalid location")


result = player.locate_item(item, include_location=False)
if not result:
if result == (None, None):
raise ActionRefused("You don't have that item")
player.set_wearable(result[0], wear_location=wear_location)

@cmd("remove")
def do_remove(player: Player, parsed: base.ParseResult, ctx: util.Context) -> None:
"""Remove an item."""
if len(parsed.args) < 1:
raise ParseError("You need to specify the item to wear")
wear_location = None
item = None
try:
arg1 = str(parsed.args[0])
try:
wear_location = WearLocation[arg1.upper()]
except Exception:
pass
if not wear_location:
item = arg1
except ValueError as x:
raise ActionRefused(str(x))

if wear_location:
player.set_wearable(None, wear_location=wear_location)
elif item:
location = player.get_wearable_location(item)
if not location:
raise ActionRefused("You're not wearing that item")
player.set_wearable(None, wear_location=location)


@cmd("save_story")
def do_save(player: Player, parsed: base.ParseResult, ctx: util.Context) -> None:
"""Save the current story to file."""
Expand Down
1 change: 1 addition & 0 deletions tale/image_gen/automatic1111.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ def __init__(self, address: str = '127.0.0.1', port: int = 7860) -> None:
with open(os.path.realpath(os.path.join(os.path.dirname(__file__), "../../automatic1111_config.yaml")), "r") as stream:
try:
self.config = yaml.safe_load(stream)
self.generate_in_background = self.config['GENERATE_IN_BACKGROUND']
except yaml.YAMLError as exc:
print(exc)

Expand Down
15 changes: 13 additions & 2 deletions tale/image_gen/base_gen.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
from abc import ABC
import os
import io
import base64
from PIL import Image

from tale import thread_utils

class ImageGeneratorBase():

class ImageGeneratorBase(ABC):

def __init__(self, endpoint: str, address: str = 'localhost', port: int = 7860) -> None:
self.address = address
Expand All @@ -17,4 +20,12 @@ def convert_image(self, image_data: bytes, output_folder: str, image_name):
image.save(path)

def generate_image(self, prompt: str, save_path: str, image_name: str) -> bool:
pass
pass

def generate_background(self, prompt: str, save_path: str, image_name: str, on_complete: callable) -> bool:

lambda_task = lambda result_event: result_event.set() if self.generate_image(prompt, save_path, image_name) else result_event.clear()

if on_complete and thread_utils.do_in_background(lambda_task):
on_complete()
return True
9 changes: 6 additions & 3 deletions tale/llm/character.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

class CharacterBuilding():

def __init__(self, backend: str, io_util: IoUtil, default_body: dict):
def __init__(self, backend: str, io_util: IoUtil, default_body: dict, json_grammar_key: str = ''):
self.pre_prompt = llm_config.params['PRE_PROMPT']
self.dialogue_prompt = llm_config.params['DIALOGUE_PROMPT']
self.character_prompt = llm_config.params['CREATE_CHARACTER_PROMPT']
Expand All @@ -30,6 +30,7 @@ def __init__(self, backend: str, io_util: IoUtil, default_body: dict):
self.idle_action_prompt = llm_config.params['IDLE_ACTION_PROMPT']
self.free_form_action_prompt = llm_config.params['ACTION_PROMPT']
self.json_grammar = llm_config.params['JSON_GRAMMAR']
self.json_grammar_key = json_grammar_key
self.dialogue_template = llm_config.params['DIALOGUE_TEMPLATE']
self.action_template = llm_config.params['ACTION_TEMPLATE']

Expand Down Expand Up @@ -71,7 +72,8 @@ def generate_character(self, story_context: str = '', keywords: list = [], story
world_info='',
keywords=', '.join(keywords))
request_body = deepcopy(self.default_body)
request_body['grammar'] = self.json_grammar
if self.json_grammar_key:
request_body[self.json_grammar_key] = self.json_grammar
result = self.io_util.synchronous_request(request_body, prompt=prompt)
try:
json_result = json.loads(parse_utils.sanitize_json(result))
Expand Down Expand Up @@ -148,7 +150,8 @@ def free_form_action(self, action_context: ActionContext) -> ActionResponse:
previous_events=action_context.event_history.replace('<break>', '\n'),
action_template=self.action_template)
request_body = deepcopy(self.default_body)
request_body['grammar'] = self.json_grammar
if self.json_grammar_key:
request_body[self.json_grammar_key] = self.json_grammar
try :
text = self.io_util.synchronous_request(request_body, prompt=prompt, context=action_context.to_prompt_string())
if not text:
Expand Down
2 changes: 1 addition & 1 deletion tale/llm/llm_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def cache_event(event: str, event_hash: int = -1) -> int:
""" Adds an event to the cache.
Generates a hash if none supplied"""
if not isinstance(event, str):
print('cache_look received non-string look: ' + str(event) + ' of type ' + str(type(event)) + '. Converting to string.')
print('cache_event received non-string look: ' + str(event) + ' of type ' + str(type(event)) + '. Converting to string.')
event = str(event)
if event_hash == -1:
event_hash = generate_hash(event)
Expand Down
4 changes: 2 additions & 2 deletions tale/llm/llm_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ def __init__(self, config: dict = None, backend_config: dict = None):

def synchronous_request(self, request_body: dict, prompt: str, context: str = '') -> str:
""" Send request to backend and return the result """
if request_body.get('grammar', None) and self.backend == 'openai':
if request_body.get('grammar_string', None) and 'openai' in self.url:
# TODO: temp fix for openai
request_body['grammar_string'] = request_body.pop('grammar')
request_body.pop('grammar_string')
request_body['response_format'] = self.openai_json_format
request_body = self.io_adapter.set_prompt(request_body, prompt, context)
response = requests.post(self.url + self.endpoint, headers=self.headers, data=json.dumps(request_body))
Expand Down
51 changes: 32 additions & 19 deletions tale/llm/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import os
import sys
import yaml
from tale.base import Location
from tale.base import Location, MudObject
from tale.image_gen.base_gen import ImageGeneratorBase
from tale.llm.character import CharacterBuilding
from tale.llm.contexts.ActionContext import ActionContext
Expand All @@ -16,13 +16,13 @@
from tale.llm.responses.ActionResponse import ActionResponse
from tale.llm.story_building import StoryBuilding
from tale.llm.world_building import WorldBuilding
from tale.player import PlayerConnection
from tale.player_utils import TextBuffer
import tale.parse_utils as parse_utils
import tale.llm.llm_cache as llm_cache
from tale.quest import Quest
from tale.web.web_utils import copy_single_image
from tale.zone import Zone
from tale.image_gen.automatic1111 import Automatic1111

class LlmUtil():
""" Prepares prompts for various LLM requests"""
Expand All @@ -47,29 +47,32 @@ def __init__(self, io_util: IoUtil = None):
self.word_limit = config_file['WORD_LIMIT']
self.short_word_limit = config_file['SHORT_WORD_LIMIT']
self.story_background_prompt = config_file['STORY_BACKGROUND_PROMPT'] # type: str
self.json_grammar = config_file['JSON_GRAMMAR'] # type: str
self.__story = None # type: DynamicStory
self.io_util = io_util or IoUtil(config=config_file, backend_config=backend_config)
self.stream = backend_config['STREAM']
self.connection = None
self.connection = None # type: PlayerConnection
self._image_gen = None # type: ImageGeneratorBase
self.__story_context = ''
self.__story_type = ''
self.__world_info = ''
json_grammar_key = backend_config['JSON_GRAMMAR_KEY']

#self._look_hashes = dict() # type: dict[int, str] # location hashes for look command. currently never cleared.
self._world_building = WorldBuilding(default_body=self.default_body,
io_util=self.io_util,
backend=self.backend)
backend=self.backend,
json_grammar_key=json_grammar_key)
self._character = CharacterBuilding(backend=self.backend,
io_util=self.io_util,
default_body=self.default_body)
default_body=self.default_body,
json_grammar_key=json_grammar_key)
self._story_building = StoryBuilding(default_body=self.default_body,
io_util=self.io_util,
backend=self.backend)
self._quest_building = QuestBuilding(default_body=self.default_body,
io_util=self.io_util,
backend=self.backend)
backend=self.backend,
json_grammar_key=json_grammar_key)

def evoke(self, message: str, short_len: bool=False, rolling_prompt: str = '', alt_prompt: str = '', extra_context: str = '', skip_history: bool = True):
"""Evoke a response from LLM. Async if stream is True, otherwise synchronous.
Expand Down Expand Up @@ -135,9 +138,7 @@ def update_memory(self, rolling_prompt: str, response_text: str):
def generate_character(self, story_context: str = '', keywords: list = [], story_type: str = ''):
character = self._character.generate_character(story_context, keywords, story_type)
if not character.avatar and self.__story.config.image_gen:
result = self.generate_image(character.name, character.appearance)
if result:
character.avatar = character.name + '.jpg'
self.generate_image(character.name, character.appearance)
return character

def get_neighbor_or_generate_zone(self, current_zone: Zone, current_location: Location, target_location: Location) -> Zone:
Expand All @@ -158,9 +159,7 @@ def build_location(self, location: Location, exit_location_name: str, zone_info:
neighbors=neighbors)

if not location.avatar and self.__story.config.image_gen:
result = self.generate_image(location.name, location.description)
if result:
location.avatar = location.name + '.jpg'
self.generate_image(location.name, location.description)
return new_locations, exits, npcs


Expand Down Expand Up @@ -228,14 +227,28 @@ def generate_note_lore(self, zone_info: dict) -> str:
return self._world_building.generate_note_lore(context=self._get_world_context(),
zone_info=zone_info)
# visible for testing
def generate_image(self, character_name: str, character_appearance: dict = '', save_path: str = "./resources", copy_file: bool = True) -> bool:
def generate_image(self, name: str, description: dict = '', save_path: str = "./resources", copy_file: bool = True, target: MudObject = None) -> bool:
if not self._image_gen:
return False
image_name = character_name.lower().replace(' ', '_')
result = self._image_gen.generate_image(prompt=character_appearance, save_path=save_path , image_name=image_name)
if result and copy_file:
copy_single_image('./', image_name + '.jpg')
return result
image_name = name.lower().replace(' ', '_')
if self._image_gen.generate_in_background:

def on_complete():
if self.connection:
self.connection.io.send_data('{"data":"result", "id":"image"}'.format(result=image_name, image=name))
if copy_file:
copy_single_image('./', image_name + '.jpg')
if target:
target.avatar = image_name + '.jpg'
#on_complete = lambda : self.connection.io.send_data('{"data":"result", "id":"image"}'.format(result=image_name, image=name)) if self.connection else None;copy_single_image('./', image_name + '.jpg') if copy_file else None;target.avatar = name + '.jpg' if target else None
return self._image_gen.generate_background(prompt=description, save_path=save_path , image_name=image_name, on_complete=on_complete)
else:
result = self._image_gen.generate_image(prompt=description, save_path=save_path , image_name=image_name)
if result and copy_file:
copy_single_image('./', image_name + '.jpg')
if result and target:
target.avatar = image_name + '.jpg'
return result

def free_form_action(self, location: Location, character_name: str, character_card: str = '', event_history: str = '') -> ActionResponse:
action_context = ActionContext(story_context=self.__story_context,
Expand Down
6 changes: 4 additions & 2 deletions tale/llm/quest_building.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@

class QuestBuilding():

def __init__(self, backend: str, io_util: IoUtil, default_body: dict):
def __init__(self, backend: str, io_util: IoUtil, default_body: dict, json_grammar_key: str = ''):
self.default_body = default_body
self.pre_prompt = llm_config.params['PRE_PROMPT']
self.backend = backend
self.io_util = io_util
self.json_grammar = llm_config.params['JSON_GRAMMAR']
self.json_grammar_key = json_grammar_key # Type: str
self.quest_prompt = llm_config.params['QUEST_PROMPT']
self.note_quest_prompt = llm_config.params['NOTE_QUEST_PROMPT']
self.note_lore_prompt = llm_config.params['NOTE_LORE_PROMPT']
Expand All @@ -39,7 +40,8 @@ def generate_note_quest(self, context: WorldGenerationContext, zone_info: str) -
context='{context}',
zone_info=zone_info)
request_body = deepcopy(self.default_body)
request_body['grammar'] = self.json_grammar
if self.json_grammar_key:
request_body[self.json_grammar_key] = self.json_grammar
text = self.io_util.synchronous_request(request_body, prompt=prompt, context=context)
quest_data = json.loads(parse_utils.sanitize_json(text))
return Quest(name=quest_data['name'], type=QuestType[quest_data['type'].upper()], reason=quest_data['reason'], target=quest_data['target'])
13 changes: 1 addition & 12 deletions tale/llm/story_building.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,4 @@ def generate_story_background(self, world_mood: int, world_info: str, story_type
world_info=world_info)
request_body = self.default_body
return self.io_util.synchronous_request(request_body, prompt=prompt)

def _kobold_generation_prompt(self, request_body: dict) -> dict:
""" changes some parameters for better generation of locations in kobold_cpp"""
request_body = request_body.copy()
request_body['stop_sequence'] = ['\n\n']
request_body['temperature'] = 0.5
request_body['top_p'] = 0.6
request_body['top_k'] = 0
request_body['rep_pen'] = 1.0
request_body['grammar'] = self.json_grammar
#request_body['banned_tokens'] = ['```']
return request_body

Loading
Loading