Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
mark9064 committed Jun 19, 2019
0 parents commit 879cc2b
Show file tree
Hide file tree
Showing 7 changed files with 946 additions and 0 deletions.
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
temp_local/
__pycache__/
.vscode/
build/
dist/
*.egg-info/
674 changes: 674 additions & 0 deletions LICENSE

Large diffs are not rendered by default.

26 changes: 26 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# sfsutils

A KSP SFS savefile parser

## Install

`pip3 install sfutils`

## Usage

Example:
```python
import sfsutils
data = sfsutils.parse_savefile("saves/persistent.sfs")
# edit data
sfsutils.writeout_savefile(data, destination_file="saves/edited.sfs")
```
All documentation is in the docstrings of each function/class.

Available functions are (see docstrings for more info):
* parse_savefile - Parses an SFS file from stream or file
* writeout_savefile - Writes out the parsed data back into the SFS format

## License

GPLV3
Empty file added requirements.txt
Empty file.
23 changes: 23 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import setuptools

with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()

setuptools.setup(
name="sfsutils",
version="0.0.1",
author="mark9064",
description="A KSP SFS savefile parser",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/mark9064/sfsutils",
packages=setuptools.find_packages(),
install_requires=[],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Natural Language :: English"
],
)
1 change: 1 addition & 0 deletions sfsutils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .sfsutils import parse_savefile, writeout_savefile
216 changes: 216 additions & 0 deletions sfsutils/sfsutils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,216 @@
"""Parser for KSP save file"""
import copy
import collections

#FIXME capitalisation / stops

def parse_savefile(sfs, sfs_is_path=True):
"""Parses an SFS file
Params:
sfs: str; the path to the SFS file to read or a string containing data read from an sfs.
sfs_is_path (optional, default True): bool; whether the 'sfs' param is a path or raw data.
Raises:
No specific exceptions.
Returns:
OrderedDict containing the data in the SFS.
Extra information:
All values are strings as SFS files do not reveal data to be any type.
The SFS format is particularly bad and this leads to the returned OrderedDict
containing data that is unusually structured. If the SFS contains multiple keys of any
kind with the same name (this can be a 'node' header or values in a node), then the data
contained within these keys will formatted as the common name of the keys as a key
in a dict, and the values as a list. This data will always be in the exact order
that they were in in the SFS. Example:
--SFS format--
NODE
{
x = 1
x = 2
y = 3
}
NODE
{
value = 1
}
OTHER
{
z = 4
}
--Python structure--
{
"NODE": [
{"x": ["1","2"], "y": "3"},
{"value": "1"}
],
"OTHER": {
"z": "4"
}
}
"""
if sfs_is_path:
data = open(sfs, "r").read()
else:
data = sfs
# removes all tabs
data = data.replace("\t", "")
reading_val = False
# in_nodes tracks the location of data being parsed (what nodes the parser is inside)
in_nodes = []
out_dict = collections.OrderedDict()
key_read = ""
value_read = ""
for index, char in enumerate(data):
if char == "\n":
# if the key is empty, continue
if not key_read:
continue
# if next char is an open bracket, save it as a new node
if data[index + 1] == "{":
in_nodes.append(key_read)
write_list = copy.deepcopy(in_nodes)
write_list.append(collections.OrderedDict())
# else it is a value in an existing node
else:
# discard trailing space from key and leading space from value
key_read = key_read[:-1]
value_read = value_read[1:]
write_list = copy.deepcopy(in_nodes)
write_list.append(key_read)
write_list.append(value_read)
set_value(out_dict, write_list)
key_read = ""
value_read = ""
reading_val = False
# pop the end of the 'stack' used to track attribute location
# when the end of a node is found
elif char == "}":
in_nodes.pop()
# ignore opening brackets as these are handled in the newline section
elif char == "{":
pass
# set the data to go to the value section rather than key
elif char == "=":
reading_val = True
elif reading_val:
value_read += char
else:
key_read += char
return out_dict

def set_value(dict_nested, address_list):
"""Sets a value in a nested dict
WARNING - modifies the dictionary passed as an arg"""
# references the main dict
current = dict_nested
# locate the desired node to write to through iterating through the keys
# while selecting the last element of any list found, as the data is in order
for path_item in address_list[:-2]:
if isinstance(current, list):
current = current[-1][path_item]
else:
current = current[path_item]
# if current is a list, then take the last entry as that's what will be modified
if isinstance(current, list):
current = current[-1]
# if the node already exists
if address_list[-2] in current:
# if it's a list simply append it to the list
if isinstance(current[address_list[-2]], list):
current[address_list[-2]].append(address_list[-1])
# else convert the existing dict to a list
else:
existing_value = current[address_list[-2]]
current[address_list[-2]] = []
current[address_list[-2]].append(existing_value)
current[address_list[-2]].append(address_list[-1])
# if it doesn't exist
else:
# guaranteed to be a dict thanks to earlier list check, so insert the key into the dict
current[address_list[-2]] = address_list[-1]


def writeout_savefile(parsed_data, destination_file=""):
"""Writes out the parsed data back into the SFS format
Params:
parsed_data: str; the parsed dictionary generated by parse_savefile.
destination_file (optional): str; the destination file to write the SFS to.
Raises:
No specific exceptions
Returns:
str containg the generated SFS if a destination file is not specified
None if a destination file is specified
Extra information:
This function will generate a byte perfect copy of the original SFS parsed assuming
the data is not modified. All abnormalities of the SFS format are addressed and
represented correctly.
"""
indents = -1
out_str = serialise_data(parsed_data, indents)
if not destination_file:
return out_str
open(destination_file, "w").write(out_str)
return None

def serialise_data(obj, indents, outer_key=None):
"""Recursively serialises data"""
# indent upon each recurse
indents += 1
out_str = ""
# set up the buffer list
buffer_list = []
if isinstance(obj, list):
for item in obj:
buffer_list.append(item)
else:
buffer_list.append(obj)
for item in buffer_list:
# if it is a string, it is one of SFS stupid same keys
# to different values, so just write value to node
if isinstance(item, str):
out_str += write_value_to_node(indents, outer_key, item)
else:
# it is a dict, so iterate through
for key, value in item.items():
# if value is a string, it must be a value to write to a node
if isinstance(value, str):
out_str += write_value_to_node(indents, key, value)
# if it's a dict, it's another node, so recurse
elif isinstance(value, dict):
out_str += write_new_node(indents, key, value)
# if it's a list it could be multiple things
elif isinstance(value, list):
# if everything in the list is a string, then it is one of the multi
# value nodes (could this be optimised TODO)
if all(isinstance(x, str) for x in value):
out_str += serialise_data(value, indents - 1, outer_key=key)
# else just write out each subdict in the list
else:
for subdict in value:
out_str += write_new_node(indents, key, subdict)
return out_str

def write_new_node(indents, sect_name, value):
"""Write a new node to the SFS"""
# adds the header
out_str = "{0}{1}\n{0}{{\n".format("\t" * indents, sect_name)
# adds data through recursion
out_str += serialise_data(value, indents, outer_key=sect_name)
# closes the block
out_str += "{0}}}\n".format("\t" * indents)
return out_str

def write_value_to_node(indents, key, value):
"""Writes a key value pair into a node"""
return "{0}{1} = {2}\n".format("\t" * indents, key, value)


if __name__ == "__main__":
print("Parsing...")
DATA = parse_savefile("persistent.sfs")
print("Parse done, writing data out...")
writeout_savefile(DATA, "test.sfs")
print("Done")

0 comments on commit 879cc2b

Please sign in to comment.