diff --git a/src/simsopt/_core/json.py b/src/simsopt/_core/json.py new file mode 100644 index 000000000..612bfb9b3 --- /dev/null +++ b/src/simsopt/_core/json.py @@ -0,0 +1,690 @@ +# coding: utf-8 +# Copyright (c) HiddenSymmetries Development Team. +# Distributed under the terms of the MIT License + +""" +Provides json emitting and parsing functionality that can handle graphs. +A majority of the code is based on monty.json. +Credits: Materials Virtual Job +""" + +import datetime +import json +import os +import pathlib +import types +from collections import defaultdict +from enum import Enum +from importlib import import_module +from inspect import getfullargspec +from uuid import UUID + +import numpy as np + +try: + import jax +except ImportError: + jax = None + +try: + import pandas as pd +except ImportError: + pd = None # type: ignore + +try: + import bson +except ImportError: + bson = None # type: ignore + +try: + from ruamel.yaml import YAML +except ImportError: + YAML = None # type: ignore + +try: + import orjson +except ImportError: + orjson = None # type: ignore + +__version__ = "3.0.0" + + +def _load_redirect(redirect_file): + try: + with open(redirect_file) as f: + yaml = YAML() + d = yaml.load(f) + except OSError: + # If we can't find the file + # Just use an empty redirect dict + return {} + + # Convert the full paths to module/class + redirect_dict = defaultdict(dict) + for old_path, new_path in d.items(): + old_class = old_path.split(".")[-1] + old_module = ".".join(old_path.split(".")[:-1]) + + new_class = new_path.split(".")[-1] + new_module = ".".join(new_path.split(".")[:-1]) + + redirect_dict[old_module][old_class] = { + "@module": new_module, + "@class": new_class, + } + + return dict(redirect_dict) + + +class GSONable: + """ + This is a mix-in base class specifying an API for GSONable objects. GSON + is Graph JSON. This class aims to overcome the limitation of JSON in + serializing/deserializing of objects whose compositional pattern resembles + a direct acyclic graph (DAG). Essentially, GSONable objects must implement an as_dict + method, which must return a json serializable dict and also a unique path + within the json document plus a unique identifier, + and a from_dict class method that regenerates the object from the dict + generated by the as_dict method. The as_dict method should contain the + "@module", "@class", and "@name" keys which will allow the GSONEncoder to + dynamically deserialize the class. E.g.:: + d["@module"] = self.__class__.__module__ + d["@class"] = self.__class__.__name__ + d["@name"] = self.name + The "@name" should provide a unique id to represent the instance. + A default implementation is provided in GSONable, which automatically + determines if the class already contains self.argname or self._argname + attributes for every arg. If so, these will be used for serialization in + the dict format. Similarly, the default from_dict will deserialization + classes of such form. An example is given below:: + class GSONClass(GSONable): + def __init__(self, a, b, c, d=1, **kwargs): + self.a = a + self.b = b + self._c = c + self._d = d + self.kwargs = kwargs + For such classes, you merely need to inherit from GSONable and you do not + need to implement your own as_dict or from_dict protocol. + GSONable objects need to have a `name` attribute that is unique. + Classes can be redirected to moved implementations by putting in the old + fully qualified path and new fully qualified path into .simsopt.yaml in the + home folder + Example: + old_module.old_class: new_module.new_class + """ + + REDIRECT = _load_redirect( + os.path.join(os.path.expanduser("~"), ".simsopt.yaml")) + + def as_dict(self, serial_objs_dict): + """ + A JSON serializable dict representation of an object. + """ + name = getattr(self, "name", str(id(self))) + d = {"@module": self.__class__.__module__, + "@class": self.__class__.__name__, + "@name": name} + + try: + parent_module = \ + self.__class__.__module__.split(".", maxsplit=1)[0] + module_version = import_module( + parent_module).__version__ # type: ignore + d["@version"] = str(module_version) + except (AttributeError, ImportError): + d["@version"] = None # type: ignore + + spec = getfullargspec(self.__class__.__init__) + args = spec.args + + def recursive_as_dict(obj): + if isinstance(obj, (list, tuple)): + return [recursive_as_dict(it) for it in obj] + if isinstance(obj, dict): + return {kk: recursive_as_dict(vv) for kk, vv in obj.items()} + if callable(obj) and not isinstance(obj, GSONable): + return _serialize_callable(obj, serial_objs_dict=serial_objs_dict) + if hasattr(obj, "as_dict"): + name = getattr(obj, "name", str(id(obj))) + if name not in serial_objs_dict: # Add the path + serial_obj = obj.as_dict(serial_objs_dict) # serial_objs is modified in place + serial_objs_dict[name] = serial_obj + return {"$type": "ref", "value": name} + return obj + + for c in args: + if c != "self": + try: + a = getattr(self, c) + except AttributeError: + try: + a = getattr(self, "_" + c) + except AttributeError: + print(f"Missing attribute is {c}") + raise NotImplementedError( + "Unable to automatically determine as_dict " + "format from class. GSONAble requires all " + "args to be present as either self.argname or " + "self._argname, and kwargs to be present under" + "a self.kwargs variable to automatically " + "determine the dict format. Alternatively, " + "you can implement both as_dict and from_dict." + ) + d[c] = recursive_as_dict(a) + if hasattr(self, "kwargs"): + # type: ignore + d.update(**getattr(self, "kwargs")) # pylint: disable=E1101 + if spec.varargs is not None and getattr(self, spec.varargs, + None) is not None: + d.update({spec.varargs: getattr(self, spec.varargs)}) + if hasattr(self, "_kwargs"): + d.update(**getattr(self, "_kwargs")) # pylint: disable=E1101 + if isinstance(self, Enum): + d.update({"value": self.value}) # pylint: disable=E1101 + return d # , serial_objs_dict + + def as_dict2(self, serial_objs_dict): + """ + This is a slightly modified version of as_dict method to deal with the cases + where the supplied object itself needs to be added to serial_objs_dict. + """ + def recursive_as_dict(obj): + if isinstance(obj, (list, tuple)): + return [recursive_as_dict(it) for it in obj] + if isinstance(obj, dict): + return {kk: recursive_as_dict(vv) for kk, vv in obj.items()} + if callable(obj) and not isinstance(obj, GSONable): + return _serialize_callable(obj, serial_objs_dict=serial_objs_dict) + if hasattr(obj, "as_dict"): + name = getattr(obj, "name", str(id(obj))) + if name not in serial_objs_dict: # Add the path + serial_obj = obj.as_dict(serial_objs_dict) # serial_objs is modified in place + serial_objs_dict[name] = serial_obj + return {"$type": "ref", "value": name} + return obj + + return recursive_as_dict(self) + + @classmethod + def from_dict(cls, d, serial_objs_dict, recon_objs): + """ + :param d: Dict representation. + :return: GSONable class. + """ + decoded = {k: GSONDecoder().process_decoded(v, serial_objs_dict, recon_objs) for k, v in + d.items() if not k.startswith("@")} + return cls(**decoded) + + def to_json(self) -> str: + """ + Returns a json string representation of the GSONable object. + """ + return json.dumps(SIMSON(self), cls=GSONEncoder) + + @classmethod + def __get_validators__(cls): + """Return validators for use in pydantic""" + yield cls.validate_gson + + @classmethod + def validate_gson(cls, v): + """ + pydantic Validator for GSONable pattern + """ + if isinstance(v, cls): + return v + if isinstance(v, dict): + new_obj = GSONDecoder().process_decoded(v) + if isinstance(new_obj, cls): + return new_obj + + new_obj = cls(**v) + return new_obj + + raise ValueError( + f"Must provide {cls.__name__}, the as_dict form, or the proper") + + @classmethod + def __modify_schema__(cls, field_schema): + """JSON schema for GSONable pattern""" + field_schema.update( + { + "type": "object", + "properties": { + "@class": {"enum": [cls.__name__], "type": "string"}, + "@module": {"enum": [cls.__module__], "type": "string"}, + "@version": {"type": "string"}, + }, + "required": ["@class", "@module"], + } + ) + + +class SIMSON: + """ + Wrapper class providing a scaffolding for serializing the graph + framework implemented in simsopt. This class aims to overcome the + limitation of JSON in serializing/deserializing of objects whose + compositional pattern resembles a direct acyclic graph (DAG). This + class is used to wrap the simsopt graph just before passing the simsopt + graph to serializing functions. Essentially, SIMSON generates an extra + dictionary that contains only serialized GSONable objects and the + conventionally generated JSON doc contains only references (by the way of + keys in the extra dictionary) for serialized GSONable objects. + Only one instance should be used to enclose the entire graph. + This class implements an as_dict method, which returns a json serializable + dict with two subdicts with keys: "graph" and "simsopt_objs". The "graph" + subdict consists of the json doc typically produced where all simsopt objs + replaced by their references in the "simsopt_objs" subdict. "simsopt_objs" + is a dict whose keys are the unique identifiers for simsopt objects and + the values are the serialized simsopt objects. + For deserialization a from_dict class method is implemented that + regenerates the object from the dicts + generated by the as_dict method. The as_dict method should contain the + "@module" and "@class" keys which will allow the GSONEncoder to + dynamically deserialize the class. E.g.:: + d["@module"] = self.__class__.__module__ + d["@class"] = self.__class__.__name__ + """ + + def __init__(self, simsopt_objs): + self.simsopt_objs = simsopt_objs + + def as_dict(self, serial_objs_dict=None): + """ + A JSON serializable dict representation of an object. + """ + d = {"@module": self.__class__.__module__, + "@class": self.__class__.__name__} + + try: + parent_module = \ + self.__class__.__module__.split(".", maxsplit=1)[0] + module_version = import_module( + parent_module).__version__ # type: ignore + d["@version"] = str(module_version) + except (AttributeError, ImportError): + d["@version"] = None # type: ignore + + serial_objs_dict = {} + + def recursive_as_dict(obj): + if isinstance(obj, (list, tuple)): + return [recursive_as_dict(it) for it in obj] + if isinstance(obj, dict): + return {kk: recursive_as_dict(vv) for kk, vv in obj.items()} + if callable(obj) and not isinstance(obj, GSONable): + return _serialize_callable(obj, serial_objs_dict=serial_objs_dict) + if hasattr(obj, "as_dict"): + name = getattr(obj, "name", str(id(obj))) + if name not in serial_objs_dict: # Add the path + serial_obj = obj.as_dict( + serial_objs_dict=serial_objs_dict) # serial_objs is modified in place + serial_objs_dict[name] = serial_obj + return {"$type": "ref", "value": name} + return obj + + d["graph"] = recursive_as_dict(self.simsopt_objs) + d["simsopt_objs"] = serial_objs_dict + return d + + @classmethod + def from_dict(cls, d, serial_objs_dict=None, recon_objs=None): + graph_subdict = d["graph"] + serial_objs_dict = d["simsopt_objs"] + gson_decoder = GSONDecoder() + recon_objs = {} + return gson_decoder.process_decoded( + graph_subdict, serial_objs_dict, recon_objs) + + +class GSONEncoder(json.JSONEncoder): + """ + A Json Encoder which supports the GSONable API, plus adds support for + numpy arrays, datetime objects, bson ObjectIds (requires bson). + Usage:: + # Add it as a *cls* keyword when using json.dump + json.dumps(object, cls=GSONEncoder) + """ + + def default(self, o) -> dict: # pylint: disable=E0202 + """ + Overriding default method for JSON encoding. This method does two + things: (a) If an object has a to_dict property, return the to_dict + output. (b) If the @module and @class keys are not in the to_dict, + add them to the output automatically. If the object has no to_dict + property, the default Python json encoder default method is called. + Args: + o: Python object. + Return: + Python dict representation. + """ + if isinstance(o, datetime.datetime): + return {"@module": "datetime", "@class": "datetime", + "string": str(o)} + if isinstance(o, UUID): + return {"@module": "uuid", "@class": "UUID", "string": str(o)} + + if jax is not None and isinstance(o, jax.Array): + o = np.asarray(o) + + if isinstance(o, np.ndarray): + if str(o.dtype).startswith("complex"): + return { + "@module": "numpy", + "@class": "array", + "dtype": str(o.dtype), + "data": [o.real.tolist(), o.imag.tolist()], + } + return { + "@module": "numpy", + "@class": "array", + "dtype": str(o.dtype), + "data": o.tolist(), + } + if isinstance(o, np.generic): + return o.item() + + if pd is not None: + if isinstance(o, pd.DataFrame): + return { + "@module": "pandas", + "@class": "DataFrame", + "data": o.to_json( + default_handler=GSONEncoder().encode), + } + if isinstance(o, pd.Series): + return { + "@module": "pandas", + "@class": "Series", + "data": o.to_json( + default_handler=GSONEncoder().encode), + } + + if bson is not None: + if isinstance(o, bson.objectid.ObjectId): + return {"@module": "bson.objectid", "@class": "ObjectId", + "oid": str(o)} + + if callable(o) and not isinstance(o, GSONable): + return _serialize_callable(o) + + try: + d = o.as_dict() + if hasattr(o, "name") and "@name" not in d: + d["@name"] = o.name + + if "@module" not in d: + d["@module"] = str(o.__class__.__module__) + if "@class" not in d: + d["@class"] = str(o.__class__.__name__) + if "@version" not in d: + try: + parent_module = o.__class__.__module__.split(".")[0] + module_version = import_module( + parent_module).__version__ # type: ignore + d["@version"] = str(module_version) + except (AttributeError, ImportError): + d["@version"] = None + return d + except AttributeError: + return json.JSONEncoder.default(self, o) + + +class GSONDecoder(json.JSONDecoder): + """ + A Json Decoder which supports the GSONable API. By default, the + decoder attempts to find a module and name associated with a dict. If + found, the decoder will generate a SIMSOPT object as a priority. If that fails, + the original decoded dictionary from the string is returned. Note that + nested lists and dicts containing pymatgen object will be decoded correctly + as well. + Usage: + # Add it as a *cls* keyword when using json.load + json.loads(json_string, cls=GSONDecoder) + """ + + def process_decoded(self, d, serial_objs_dict=None, recon_objs=None): + """ + Recursive method to support decoding dicts and lists containing + GSONable objects. + """ + if isinstance(d, dict): + if "$type" in d.keys(): + if d["$type"] == "ref": + if d["value"] not in recon_objs: + sub_dict = serial_objs_dict[d["value"]] + recon_obj = self.process_decoded(sub_dict, + serial_objs_dict, recon_objs) + recon_objs[d["value"]] = recon_obj + return recon_objs[d["value"]] + if "@module" in d and "@class" in d: + modname = d["@module"] + classname = d["@class"] + if classname in GSONable.REDIRECT.get(modname, {}): + modname = GSONable.REDIRECT[modname][classname][ + "@module"] + classname = GSONable.REDIRECT[modname][classname][ + "@class"] + elif "@module" in d and "@callable" in d: + modname = d["@module"] + objname = d["@callable"] + classname = None + if d.get("@bound", None) is not None: + # if the function is bound to an instance or class, first + # deserialize the bound object and then remove the object name + # from the function name. + obj = self.process_decoded(d["@bound"], serial_objs_dict=serial_objs_dict, + recon_objs=recon_objs) + objname = objname.split(".")[1:] + else: + # if the function is not bound to an object, import the + # function from the module name + obj = __import__(modname, globals(), locals(), + [objname], 0) + objname = objname.split(".") + try: + # the function could be nested. e.g., MyClass.NestedClass.function + # so iteratively access the nesting + for attr in objname: + obj = getattr(obj, attr) + + return obj + + except AttributeError: + pass + else: + modname = None + classname = None + + if classname: + + if modname and modname not in ["bson.objectid", "numpy", + "pandas"]: + if modname == "datetime" and classname == "datetime": + try: + dt = datetime.datetime.strptime(d["string"], + "%Y-%m-%d %H:%M:%S.%f") + except ValueError: + dt = datetime.datetime.strptime(d["string"], + "%Y-%m-%d %H:%M:%S") + return dt + + if modname == "uuid" and classname == "UUID": + return UUID(d["string"]) + + mod = __import__(modname, globals(), locals(), + [classname], 0) + if hasattr(mod, classname): + cls_ = getattr(mod, classname) + data = {k: v for k, v in d.items() if + not k.startswith("@")} + if hasattr(cls_, "from_dict"): + obj = cls_.from_dict(data, serial_objs_dict, recon_objs) + if "@name" in d: + recon_objs[d["@name"]] = obj + return obj + elif np is not None and modname == "numpy" and classname == "array": + if d["dtype"].startswith("complex"): + return np.array( + [np.array(r) + np.array(i) * 1j for r, i in + zip(*d["data"])], + dtype=d["dtype"], + ) + return np.array(d["data"], dtype=d["dtype"]) + elif pd is not None and modname == "pandas": + if classname == "DataFrame": + decoded_data = GSONDecoder().decode(d["data"]) + return pd.DataFrame(decoded_data) + if classname == "Series": + decoded_data = GSONDecoder().decode(d["data"]) + return pd.Series(decoded_data) + elif ( + bson is not None) and modname == "bson.objectid" and classname == "ObjectId": + return bson.objectid.ObjectId(d["oid"]) + + return {self.process_decoded(k, serial_objs_dict, recon_objs): self.process_decoded(v, serial_objs_dict, recon_objs) for + k, v in d.items()} + + if isinstance(d, list): + return [self.process_decoded(x, serial_objs_dict, recon_objs) for x in d] + + return d + + def decode(self, s): + """ + Overrides decode from JSONDecoder. + :param s: string + :return: Object. + """ + if orjson is not None: + try: + d = orjson.loads(s) # pylint: disable=E1101 + except orjson.JSONDecodeError: # pylint: disable=E1101 + d = json.loads(s) + else: + d = json.loads(s) + return self.process_decoded(d) + + +class GSONError(Exception): + """ + Exception class for serialization errors. + """ + + +def jsanitize(obj, strict=False, allow_bson=False, enum_values=False, + recursive_gsonable=False, serial_objs_dict=None): + """ + This method cleans an input json-like object, either a list or a dict or + some sequence, nested or otherwise, by converting all non-string + dictionary keys (such as int and float) to strings, and also recursively + encodes all objects using GSON's as_dict() protocol. + Args: + obj: input json-like object. + strict (bool): This parameters sets the behavior when jsanitize + encounters an object it does not understand. If strict is True, + jsanitize will try to get the as_dict() attribute of the object. If + no such attribute is found, an attribute error will be thrown. If + strict is False, jsanitize will simply call str(object) to convert + the object to a string representation. + allow_bson (bool): This parameters sets the behavior when jsanitize + encounters a bson supported type such as objectid and datetime. If + True, such bson types will be ignored, allowing for proper + insertion into MongoDB databases. + enum_values (bool): Convert Enums to their values. + recursive_gsonable (bool): If True, uses .as_dict() for GSONables regardless + of the value of strict. + serial_objs_dict: Dictionary of serialized objects produced during jsonification + Returns: + Sanitized dict that can be json serialized. + """ + if serial_objs_dict is None: + serial_objs_dict = {} + if isinstance(obj, Enum) and enum_values: + return obj.value + + if allow_bson and ( + isinstance(obj, (datetime.datetime, bytes)) or ( + bson is not None and isinstance(obj, bson.objectid.ObjectId)) + ): + return obj + if isinstance(obj, (list, tuple)): + return [jsanitize(i, strict=strict, allow_bson=allow_bson, + enum_values=enum_values) for i in obj] + if np is not None and isinstance(obj, np.ndarray): + return [jsanitize(i, strict=strict, allow_bson=allow_bson, + enum_values=enum_values) for i in obj.tolist()] + if np is not None and isinstance(obj, np.generic): + return obj.item() + if pd is not None and isinstance(obj, (pd.Series, pd.DataFrame)): + return obj.to_dict() + if isinstance(obj, dict): + return { + str(k): jsanitize( + v, + strict=strict, + allow_bson=allow_bson, + enum_values=enum_values, + recursive_gsonable=recursive_gsonable, + ) + for k, v in obj.items() + } + if isinstance(obj, (int, float)): + return obj + if obj is None: + return None + if isinstance(obj, pathlib.Path): + return str(obj) + + if callable(obj) and not isinstance(obj, GSONable): + try: + return _serialize_callable(obj) + except TypeError: + pass + + if recursive_gsonable and isinstance(obj, GSONable): + return obj.as_dict(serial_objs_dict=serial_objs_dict) + + if not strict: + return str(obj) + + if isinstance(obj, str): + return obj + + return jsanitize( + obj.as_dict(serial_objs_dict=serial_objs_dict), + strict=strict, + allow_bson=allow_bson, + enum_values=enum_values, + recursive_gsonable=recursive_gsonable, + ) + + +def _serialize_callable(o, serial_objs_dict={}): + if isinstance(o, types.BuiltinFunctionType): + # don't care about what builtin functions (sum, open, etc) are bound to + bound = None + else: + # bound methods (i.e., instance methods) have a __self__ attribute + # that points to the class/module/instance + bound = getattr(o, "__self__", None) + + # we are only able to serialize bound methods if the object the method is + # bound to is itself serializable + if bound is not None: + if isinstance(bound, GSONable): + bound = bound.as_dict(serial_objs_dict=serial_objs_dict) + else: + try: + bound = GSONEncoder().default(bound) + except TypeError: + raise TypeError( + "Only bound methods of classes or GSONable instances are supported.") + + return { + "@module": o.__module__, + "@callable": getattr(o, "__qualname__", o.__name__), + "@bound": bound, + } diff --git a/src/simsopt/field/__init__.py b/src/simsopt/field/__init__.py index e69de29bb..8e652395e 100644 --- a/src/simsopt/field/__init__.py +++ b/src/simsopt/field/__init__.py @@ -0,0 +1,21 @@ +from .biotsavart import * +from .boozermagneticfield import * +from .coil import * +from .magneticfield import * +from .magneticfieldclasses import * +from .mgrid import * +from .normal_field import * +from .tracing import * +from .magnetic_axis_helpers import * + +__all__ = ( + biotsavart.__all__ + + boozermagneticfield.__all__ + + coil.__all__ + + magneticfield.__all__ + + magneticfieldclasses.__all__ + + mgrid.__all__ + + normal_field.__all__ + + tracing.__all__ + + magnetic_axis_helpers.__all__ +) diff --git a/src/simsopt/field/biotsavart.py b/src/simsopt/field/biotsavart.py index 12234603f..931d3eb54 100644 --- a/src/simsopt/field/biotsavart.py +++ b/src/simsopt/field/biotsavart.py @@ -1,9 +1,8 @@ -import json import numpy as np -from monty.json import MSONable, MontyDecoder, MontyEncoder import simsoptpp as sopp from .magneticfield import MagneticField +from .._core.json import GSONDecoder __all__ = ['BiotSavart'] @@ -210,16 +209,20 @@ def A_vjp(self, v): res_current = [np.sum(v * dA_by_dcoilcurrents[i]) for i in range(len(dA_by_dcoilcurrents))] return sum([coils[i].vjp(res_gamma[i], res_gammadash[i], np.asarray([res_current[i]])) for i in range(len(coils))]) - def as_dict(self) -> dict: - d = MSONable.as_dict(self) + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): - decoder = MontyDecoder() - coils = decoder.process_decoded(d["coils"]) + def from_dict(cls, d, serial_objs_dict, recon_objs): + decoder = GSONDecoder() + xyz = decoder.process_decoded(d["points"], + serial_objs_dict=serial_objs_dict, + recon_objs=recon_objs) + coils = decoder.process_decoded(d["coils"], + serial_objs_dict=serial_objs_dict, + recon_objs=recon_objs) bs = cls(coils) - xyz = decoder.process_decoded(d["points"]) bs.set_points_cart(xyz) return bs diff --git a/src/simsopt/field/boozermagneticfield.py b/src/simsopt/field/boozermagneticfield.py index e9ec41b45..d14e5d2fb 100644 --- a/src/simsopt/field/boozermagneticfield.py +++ b/src/simsopt/field/boozermagneticfield.py @@ -7,6 +7,8 @@ logger = logging.getLogger(__name__) +__all__ = ["BoozerMagneticField","BoozerAnalytic","BoozerRadialInterpolant","InterpolatedBoozerField"] + try: from mpi4py import MPI except ImportError as e: diff --git a/src/simsopt/field/coil.py b/src/simsopt/field/coil.py index 80bfe9355..e2ab7dd0b 100644 --- a/src/simsopt/field/coil.py +++ b/src/simsopt/field/coil.py @@ -1,14 +1,16 @@ +from math import pi +import numpy as np + from simsopt._core.optimizable import Optimizable from simsopt._core.derivative import Derivative from simsopt.geo.curvexyzfourier import CurveXYZFourier -from simsopt.geo.curve import RotatedCurve, Curve +from simsopt.geo.curve import RotatedCurve import simsoptpp as sopp -from math import pi -import numpy as np -from monty.json import MontyDecoder, MSONable -__all__ = ['Coil', 'Current', 'coils_via_symmetries', - 'apply_symmetries_to_currents', 'apply_symmetries_to_curves'] + +__all__ = ['Coil', 'Current', 'coils_via_symmetries', 'load_coils_from_makegrid_file', + 'apply_symmetries_to_currents', 'apply_symmetries_to_curves', + 'coils_to_makegrid', 'coils_to_focus'] class Coil(sopp.Coil, Optimizable): @@ -22,7 +24,7 @@ def __init__(self, curve, current): self._curve = curve self._current = current sopp.Coil.__init__(self, curve, current) - Optimizable.__init__(self, x0=np.asarray([]), depends_on=[curve, current]) + Optimizable.__init__(self, depends_on=[curve, current]) def vjp(self, v_gamma, v_gammadash, v_current): return self.curve.dgamma_by_dcoeff_vjp(v_gamma) \ @@ -38,16 +40,6 @@ def plot(self, **kwargs): """ return self.curve.plot(**kwargs) - def as_dict(self) -> dict: - return MSONable.as_dict(self) - - @classmethod - def from_dict(cls, d): - decoder = MontyDecoder() - current = decoder.process_decoded(d["current"]) - curve = decoder.process_decoded(d["curve"]) - return cls(curve, current) - class CurrentBase(Optimizable): @@ -90,24 +82,21 @@ class Current(sopp.Current, CurrentBase): of coils that are constrained to use the same current. """ - def __init__(self, current, **kwargs): + def __init__(self, current, dofs=None, **kwargs): sopp.Current.__init__(self, current) - CurrentBase.__init__(self, external_dof_setter=sopp.Current.set_dofs, - x0=self.get_dofs(), **kwargs) + if dofs is None: + CurrentBase.__init__(self, external_dof_setter=sopp.Current.set_dofs, + x0=self.get_dofs(), **kwargs) + else: + CurrentBase.__init__(self, external_dof_setter=sopp.Current.set_dofs, + dofs=dofs, **kwargs) def vjp(self, v_current): return Derivative({self: v_current}) - def as_dict(self) -> dict: - d = {} - d["@module"] = self.__class__.__module__ - d["@class"] = self.__class__.__name__ - d["current"] = self.get_value() - return d - - @classmethod - def from_dict(cls, d): - return cls(d["current"]) + @property + def current(self): + return self.get_value() class ScaledCurrent(sopp.CurrentBase, CurrentBase): @@ -120,8 +109,7 @@ def __init__(self, current_to_scale, scale, **kwargs): self.current_to_scale = current_to_scale self.scale = scale sopp.CurrentBase.__init__(self) - CurrentBase.__init__(self, x0=np.asarray([]), - depends_on=[current_to_scale], **kwargs) + CurrentBase.__init__(self, depends_on=[current_to_scale], **kwargs) def vjp(self, v_current): return self.scale * self.current_to_scale.vjp(v_current) @@ -129,15 +117,6 @@ def vjp(self, v_current): def get_value(self): return self.scale * self.current_to_scale.get_value() - def as_dict(self) -> dict: - return MSONable.as_dict(self) - - @classmethod - def from_dict(cls, d): - decoder = MontyDecoder() - current = decoder.process_decoded(d["current_to_scale"]) - return cls(current, d["scale"]) - class CurrentSum(sopp.CurrentBase, CurrentBase): """ @@ -148,7 +127,7 @@ def __init__(self, current_a, current_b): self.current_a = current_a self.current_b = current_b sopp.CurrentBase.__init__(self) - CurrentBase.__init__(self, x0=np.asarray([]), depends_on=[current_a, current_b]) + CurrentBase.__init__(self, depends_on=[current_a, current_b]) def vjp(self, v_current): return self.current_a.vjp(v_current) + self.current_b.vjp(v_current) @@ -156,16 +135,6 @@ def vjp(self, v_current): def get_value(self): return self.current_a.get_value() + self.current_b.get_value() - def as_dict(self) -> dict: - return MSONable.as_dict(self) - - @classmethod - def from_dict(cls, d): - decoder = MontyDecoder() - current_a = decoder.process_decoded(d["current_a"]) - current_b = decoder.process_decoded(d["current_b"]) - return cls(current_a, current_b) - def apply_symmetries_to_curves(base_curves, nfp, stellsym): """ @@ -215,3 +184,144 @@ def coils_via_symmetries(curves, currents, nfp, stellsym): currents = apply_symmetries_to_currents(currents, nfp, stellsym) coils = [Coil(curv, curr) for (curv, curr) in zip(curves, currents)] return coils + + +def load_coils_from_makegrid_file(filename, order, ppp=20): + """ + This function loads a file in MAKEGRID input format containing the Cartesian coordinates + and the currents for several coils and returns an array with the corresponding coils. + The format is described at + https://princetonuniversity.github.io/STELLOPT/MAKEGRID + + Args: + filename: file to load. + order: maximum mode number in the Fourier expansion. + ppp: points-per-period: number of quadrature points per period. + + Returns: + A list of ``Coil`` objects with the Fourier coefficients and currents given by the file. + """ + with open(filename, 'r') as f: + all_coils_values = f.read().splitlines()[3:] + + currents = [] + flag = True + for j in range(len(all_coils_values)-1): + vals = all_coils_values[j].split() + if flag: + currents.append(float(vals[3])) + flag = False + if len(vals) > 4: + flag = True + + curves = CurveXYZFourier.load_curves_from_makegrid_file(filename, order=order, ppp=ppp) + coils = [Coil(curves[i], Current(currents[i])) for i in range(len(curves))] + + return coils + + +def coils_to_makegrid(filename, curves, currents, groups=None, nfp=1, stellsym=False): + """ + Export a list of Curve objects together with currents in MAKEGRID input format, so they can + be used by MAKEGRID and FOCUS. The format is introduced at + https://princetonuniversity.github.io/STELLOPT/MAKEGRID + Note that this function does not generate files with MAKEGRID's *output* format. + + Args: + filename: Name of the file to write. + curves: A python list of Curve objects. + currents: Coil current of each curve. + groups: Coil current group. Coils in the same group will be assembled together. Defaults to None. + nfp: The number of field periodicity. Defaults to 1. + stellsym: Whether or not following stellarator symmetry. Defaults to False. + """ + + assert len(curves) == len(currents) + coils = coils_via_symmetries(curves, currents, nfp, stellsym) + ncoils = len(coils) + if groups is None: + groups = np.arange(ncoils) + 1 + else: + assert len(groups) == ncoils + # should be careful. SIMSOPT flips the current, but actually should change coil order + with open(filename, "w") as wfile: + wfile.write("periods {:3d} \n".format(nfp)) + wfile.write("begin filament \n") + wfile.write("mirror NIL \n") + for icoil in range(ncoils): + x = coils[icoil].curve.gamma()[:, 0] + y = coils[icoil].curve.gamma()[:, 1] + z = coils[icoil].curve.gamma()[:, 2] + for iseg in range(len(x)): # the last point matches the first one; + wfile.write( + "{:23.15E} {:23.15E} {:23.15E} {:23.15E}\n".format( + x[iseg], y[iseg], z[iseg], coils[icoil].current.get_value() + ) + ) + wfile.write( + "{:23.15E} {:23.15E} {:23.15E} {:23.15E} {:} {:10} \n".format( + x[0], y[0], z[0], 0.0, groups[icoil], coils[icoil].curve.name + ) + ) + wfile.write("end \n") + return + + +def coils_to_focus(filename, curves, currents, nfp=1, stellsym=False, Ifree=False, Lfree=False): + """ + Export a list of Curve objects together with currents in FOCUS format, so they can + be used by FOCUS. The format is introduced at + https://princetonuniversity.github.io/FOCUS/rdcoils.pdf + This routine only works with curves of type CurveXYZFourier, + not other curve types. + + Args: + filename: Name of the file to write. + curves: A python list of CurveXYZFourier objects. + currents: Coil current of each curve. + nfp: The number of field periodicity. Defaults to 1. + stellsym: Whether or not following stellarator symmetry. Defaults to False. + Ifree: Flag specifying whether the coil current is free. Defaults to False. + Lfree: Flag specifying whether the coil geometry is free. Defaults to False. + """ + from simsopt.geo import CurveLength + + assert len(curves) == len(currents) + ncoils = len(curves) + if stellsym: + symm = 2 # both periodic and symmetric + elif nfp > 1 and not stellsym: + symm = 1 # only periodicity + else: + symm = 0 # no periodicity or symmetry + if nfp > 1: + print('Please note: FOCUS sets Nfp in the plasma file.') + with open(filename, 'w') as f: + f.write('# Total number of coils \n') + f.write(' {:d} \n'.format(ncoils)) + for i in range(ncoils): + assert isinstance(curves[i], CurveXYZFourier) + nf = curves[i].order + xyz = curves[i].full_x.reshape((3, -1)) + xc = xyz[0, ::2] + xs = np.concatenate(([0.], xyz[0, 1::2])) + yc = xyz[1, ::2] + ys = np.concatenate(([0.], xyz[1, 1::2])) + zc = xyz[2, ::2] + zs = np.concatenate(([0.], xyz[2, 1::2])) + length = CurveLength(curves[i]).J() + nseg = len(curves[i].quadpoints) + f.write('#------------{:d}----------- \n'.format(i+1)) + f.write('# coil_type symm coil_name \n') + f.write(' {:d} {:d} {:} \n'.format(1, symm, curves[i].name)) + f.write('# Nseg current Ifree Length Lfree target_length \n') + f.write(' {:d} {:23.15E} {:d} {:23.15E} {:d} {:23.15E} \n'.format(nseg, currents[i].get_value(), Ifree, length, Lfree, length)) + f.write('# NFcoil \n') + f.write(' {:d} \n'.format(nf)) + f.write('# Fourier harmonics for coils ( xc; xs; yc; ys; zc; zs) \n') + for r in [xc, xs, yc, ys, zc, zs]: # 6 lines + for k in range(nf+1): + f.write('{:23.15E} '.format(r[k])) + f.write('\n') + f.write('\n') + return diff --git a/src/simsopt/field/magnetic_axis_helpers.py b/src/simsopt/field/magnetic_axis_helpers.py new file mode 100644 index 000000000..67ac71129 --- /dev/null +++ b/src/simsopt/field/magnetic_axis_helpers.py @@ -0,0 +1,90 @@ +import numpy as np +from simsopt.geo import CurveRZFourier +from scipy.integrate import solve_ivp + +__all__ = ['compute_on_axis_iota'] + +def compute_on_axis_iota(axis, magnetic_field): + """ + Computes the rotational transform on the magnetic axis of a device using a method based on + equation (13) of Greene, Journal of Mathematical Physics 20, 1183 (1979); doi: 10.1063/1.524170. + This method was shared by Stuart Hudson and Matt Landreman. NOTE: this function does not check that the provided + axis is actually a magnetic axis of the input magnetic_field. + + Args: + axis: a CurveRZFourier corresponding to the magnetic axis of the magnetic field. + magnetic_field: the magnetic field in which the axis lies. + + Returns: + iota: the rotational transform on the given axis. + """ + assert type(axis) is CurveRZFourier + + def tangent_map(phi, x): + ind = np.array(phi) + out = np.zeros((1, 3)) + axis.gamma_impl(out, ind) + magnetic_field.set_points(out) + + out = out.flatten() + B = magnetic_field.B().flatten() + dB_by_dX = magnetic_field.dB_by_dX().reshape((3, 3)) + B1 = B[0] + B2 = B[1] + B3 = B[2] + + dB1_dx = dB_by_dX[0, 0] + dB1_dy = dB_by_dX[0, 1] + dB1_dz = dB_by_dX[0, 2] + + dB2_dx = dB_by_dX[1, 0] + dB2_dy = dB_by_dX[1, 1] + dB2_dz = dB_by_dX[1, 2] + + dB3_dx = dB_by_dX[2, 0] + dB3_dy = dB_by_dX[2, 1] + dB3_dz = dB_by_dX[2, 2] + + c = np.cos(2*np.pi*phi) + s = np.sin(2*np.pi*phi) + + R = np.sqrt(out[0]**2 + out[1]**2) + dx_dR = c + dy_dR = s + + BR = c*B1 + s*B2 + Bphi =-s*B1 + c*B2 + BZ = B3 + + dB1_dR = dB1_dx * dx_dR + dB1_dy * dy_dR + dB2_dR = dB2_dx * dx_dR + dB2_dy * dy_dR + dB3_dR = dB3_dx * dx_dR + dB3_dy * dy_dR + + dBR_dR = c*dB1_dR + s*dB2_dR + dBphi_dR = -s*dB1_dR + c*dB2_dR + dBZ_dR = dB3_dR + + dBR_dZ = c*dB1_dz + s*dB2_dz + dBphi_dZ = -s*dB1_dz + c*dB2_dz + dBZ_dZ = dB3_dz + + Bphi_R = Bphi/R + d_Bphi_R_dR = (dBphi_dR * R - Bphi)/R**2 + d_Bphi_R_dZ = dBphi_dZ/R + + A11 = (dBR_dR - (BR / Bphi_R) * d_Bphi_R_dR) / Bphi_R + A21 = (dBZ_dR - (BZ / Bphi_R) * d_Bphi_R_dR) / Bphi_R + A12 = (dBR_dZ - (BR / Bphi_R) * d_Bphi_R_dZ) / Bphi_R + A22 = (dBZ_dZ - (BZ / Bphi_R) * d_Bphi_R_dZ) / Bphi_R + A = np.array([[A11,A12],[A21,A22]]) + return 2*np.pi*np.array([A@x[:2], A@x[2:]]).flatten() + + t_span = [0, 1/axis.nfp] + t_eval = t_span + + y0 = np.array([1, 0, 0, 1]) + results = solve_ivp(tangent_map, t_span, y0, t_eval=t_eval, rtol=1e-12, atol=1e-12, method='RK45') + M = results.y[:, -1].reshape((2,2)) + evals, evecs = np.linalg.eig(M) + iota = np.arctan2(np.imag(evals[0]), np.real(evals[0])) * axis.nfp/(2*np.pi) + return iota diff --git a/src/simsopt/field/magneticfield.py b/src/simsopt/field/magneticfield.py index 062967377..8a647633c 100644 --- a/src/simsopt/field/magneticfield.py +++ b/src/simsopt/field/magneticfield.py @@ -1,9 +1,9 @@ import numpy as np -from monty.json import MontyDecoder, MSONable import simsoptpp as sopp from .._core.optimizable import Optimizable -from .._core.derivative import Derivative +from .._core.json import GSONDecoder +from .mgrid import MGrid __all__ = ['MagneticField', 'MagneticFieldSum', 'MagneticFieldMultiply'] @@ -92,6 +92,69 @@ def to_vtk(self, filename, nr=10, nphi=10, nz=10, rmin=1.0, rmax=2.0, zmin=-0.5, contig = np.ascontiguousarray gridToVTK(filename, X, Y, Z, pointData={"B": (contig(vals[..., 0]), contig(vals[..., 1]), contig(vals[..., 2]))}) + def to_mgrid(self, filename, nr=10, nphi=4, nz=12, rmin=1.0, rmax=2.0, zmin=-0.5, zmax=0.5, nfp=1, + include_potential=False): + """Export the field to the mgrid format for free boundary calculations. + + The field data is represented as a single "current group". For + free-boundary vmec, the "extcur" array should have a single nonzero + element, set to 1.0. + + In the future, we may want to implement multiple current groups. + + Args: + filename: Name of the NetCDF file to save. + nr: Number of grid points in the major radius dimension. + nphi: Number of planes in the toroidal angle. + nz: Number of grid points in the z coordinate. + rmin: Minimum value of major radius for the grid. + rmax: Maximum value of major radius for the grid. + zmin: Minimum value of z for the grid. + zmax: Maximum value of z for the grid. + nfp: Number of field periods. + include_potential: Boolean to include the vector potential A. Defaults to false. + """ + + rs = np.linspace(rmin, rmax, nr, endpoint=True) + phis = np.linspace(0, 2 * np.pi / nfp, nphi, endpoint=False) + zs = np.linspace(zmin, zmax, nz, endpoint=True) + + Phi, Z, R = np.meshgrid(phis, zs, rs, indexing='ij') + + RPhiZ = np.zeros((R.size, 3)) + RPhiZ[:, 0] = R.flatten() + RPhiZ[:, 1] = Phi.flatten() + RPhiZ[:, 2] = Z.flatten() + + # get field on the grid + self.set_points_cyl(RPhiZ) + B = self.B_cyl() + + # shape the components + br, bp, bz = B.T + br_3 = br.reshape((nphi, nz, nr)) + bp_3 = bp.reshape((nphi, nz, nr)) + bz_3 = bz.reshape((nphi, nz, nr)) + + if include_potential: + A = self.A_cyl() + # shape the potential components + ar, ap, az = A.T + ar_3 = ar.reshape((nphi, nz, nr)) + ap_3 = ap.reshape((nphi, nz, nr)) + az_3 = az.reshape((nphi, nz, nr)) + + mgrid = MGrid(nfp=nfp, + nr=nr, nz=nz, nphi=nphi, + rmin=rmin, rmax=rmax, zmin=zmin, zmax=zmax) + if include_potential: + mgrid.add_field_cylindrical(br_3, bp_3, bz_3, ar=ar_3, ap=ap_3, az=az_3, name='simsopt_coils') + else: + mgrid.add_field_cylindrical(br_3, bp_3, bz_3, name='simsopt_coils') + + + mgrid.write(filename) + class MagneticFieldMultiply(MagneticField): """ @@ -126,17 +189,17 @@ def _dA_by_dX_impl(self, dA): def _d2A_by_dXdX_impl(self, ddA): ddA[:] = self.scalar*self.Bfield.d2A_by_dXdX() - def as_dict(self) -> dict: - d = MSONable.as_dict(self) + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): - decoder = MontyDecoder() - Bfield = decoder.process_decoded(d["Bfield"]) + def from_dict(cls, d, serial_objs_dict, recon_objs): + decoder = GSONDecoder() + Bfield = decoder.process_decoded(d["Bfield"], serial_objs_dict, recon_objs) field = cls(d["scalar"], Bfield) - xyz = decoder.process_decoded(d["points"]) + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field.set_points_cart(xyz) return field @@ -178,19 +241,17 @@ def _d2A_by_dXdX_impl(self, ddA): def B_vjp(self, v): return sum([bf.B_vjp(v) for bf in self.Bfields if np.any(bf.dofs_free_status)]) - def as_dict(self) -> dict: - d = MSONable.as_dict(self) + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): - decoder = MontyDecoder() - Bfields = [] - for field in d["Bfields"]: - Bfields.append(decoder.process_decoded(field)) + def from_dict(cls, d, serial_objs_dict, recon_objs): + decoder = GSONDecoder() + Bfields = decoder.process_decoded(d["Bfields"], serial_objs_dict, recon_objs) field_sum = cls(Bfields) - xyz = decoder.process_decoded(d["points"]) + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field_sum.set_points_cart(xyz) return field_sum diff --git a/src/simsopt/field/magneticfieldclasses.py b/src/simsopt/field/magneticfieldclasses.py index e40bbea5d..101fe80e7 100644 --- a/src/simsopt/field/magneticfieldclasses.py +++ b/src/simsopt/field/magneticfieldclasses.py @@ -1,7 +1,7 @@ import logging import numpy as np -from monty.json import MSONable, MontyDecoder +import warnings from scipy.special import ellipk, ellipe try: from sympy.parsing.sympy_parser import parse_expr @@ -10,13 +10,15 @@ except ImportError: sympy_found = False -from simsopt.field.magneticfield import MagneticField import simsoptpp as sopp +from .magneticfield import MagneticField +from .._core.json import GSONDecoder logger = logging.getLogger(__name__) __all__ = ['ToroidalField', 'PoloidalField', 'ScalarPotentialRZMagneticField', - 'CircularCoil', 'Dommaschk', 'Reiman', 'InterpolatedField'] + 'CircularCoil', 'Dommaschk', 'Reiman', 'InterpolatedField', 'DipoleField', + 'MirrorModel'] class ToroidalField(MagneticField): @@ -44,7 +46,6 @@ def _B_impl(self, B): def _dB_by_dX_impl(self, dB): points = self.get_points_cart_ref() - phi = np.arctan2(points[:, 1], points[:, 0]) R = np.sqrt(np.square(points[:, 0]) + np.square(points[:, 1])) x = points[:, 0] @@ -64,8 +65,6 @@ def _dB_by_dX_impl(self, dB): def _d2B_by_dXdX_impl(self, ddB): points = self.get_points_cart_ref() - x = points[:, 0] - y = points[:, 1] ddB[:] = 2*self.B0*self.R0*np.multiply( 1/(points[:, 0]**2+points[:, 1]**2)**3, np.array([ [[3*points[:, 0]**2+points[:, 1]**3, points[:, 0]**3-3*points[:, 0]*points[:, 1]**2, np.zeros((len(points)))], [ @@ -112,16 +111,16 @@ def _d2A_by_dXdX_impl(self, ddA): (points[:, 0]**4-points[:, 1]**4)/(2*points[:, 2]), np.zeros((len(points)))]], np.zeros((3, 3, len(points)))])).transpose((3, 0, 1, 2)) - def as_dict(self) -> dict: - d = MSONable.as_dict(self) + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): + def from_dict(cls, d, serial_objs_dict, recon_objs): field = cls(d["R0"], d["B0"]) - decoder = MontyDecoder() - xyz = decoder.process_decoded(d["points"]) + decoder = GSONDecoder() + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field.set_points_cart(xyz) return field @@ -220,16 +219,16 @@ def _dB_by_dX_impl(self, dB): dB[:] = self.B0/self.R0/self.q*np.array([dB_by_dX1_term1+dB_by_dX1_term2, dB_by_dX2_term1+dB_by_dX2_term2, dB_by_dX3_term1+dB_by_dX3_term2]).T - def as_dict(self) -> dict: - d = MSONable.as_dict(self) + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): + def from_dict(cls, d, serial_objs_dict, recon_objs): field = cls(d["R0"], d["B0"], d["q"]) - decoder = MontyDecoder() - xyz = decoder.process_decoded(d["points"]) + decoder = GSONDecoder() + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field.set_points_cart(xyz) return field @@ -257,8 +256,8 @@ def __init__(self, phi_str): self.phi_str = phi_str self.phi_parsed = parse_expr(phi_str) R, Z, Phi = sp.symbols('R Z phi') - self.Blambdify = sp.lambdify((R, Z, Phi), [self.phi_parsed.diff(R)+1e-30*Phi*R*Z,\ - self.phi_parsed.diff(Phi)/R+1e-30*Phi*R*Z,\ + self.Blambdify = sp.lambdify((R, Z, Phi), [self.phi_parsed.diff(R)+1e-30*Phi*R*Z, \ + self.phi_parsed.diff(Phi)/R+1e-30*Phi*R*Z, \ self.phi_parsed.diff(Z)+1e-30*Phi*R*Z]) self.dBlambdify_by_dX = sp.lambdify( (R, Z, Phi), @@ -319,16 +318,16 @@ def _dB_by_dX_impl(self, dB): + Bphi * dcosphidy dB[:, 2, 1] = dBrdz * np.sin(phi) + dBphidz * np.cos(phi) - def as_dict(self) -> dict: - d = MSONable.as_dict(self) + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): + def from_dict(cls, d, serial_objs_dict, recon_objs): field = cls(d["phi_str"]) - decoder = MontyDecoder() - xyz = decoder.process_decoded(d["points"]) + decoder = GSONDecoder() + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field.set_points_cart(xyz) return field @@ -381,6 +380,10 @@ def __init__(self, r0=0.1, center=[0, 0, 0], I=5e5/np.pi, normal=[0, 0]): self.rotMatrixInv = np.array(self.rotMatrix.T) + @property + def I(self): + return self.Inorm * 25e5 + def _B_impl(self, B): points = self.get_points_cart_ref() points = np.array(np.dot(self.rotMatrixInv, np.array(np.subtract(points, self.center)).T).T) @@ -391,7 +394,6 @@ def _B_impl(self, B): k = np.sqrt(1-np.divide(np.square(alpha), np.square(beta))) ellipek2 = ellipe(k**2) ellipkk2 = ellipk(k**2) - gamma = np.square(points[:, 0]) - np.square(points[:, 1]) B[:] = np.dot(self.rotMatrix, np.array( [self.Inorm*points[:, 0]*points[:, 2]/(2*alpha**2*beta*rho**2+1e-31)*((self.r0**2+r**2)*ellipek2-alpha**2*ellipkk2), self.Inorm*points[:, 1]*points[:, 2]/(2*alpha**2*beta*rho**2+1e-31)*((self.r0**2+r**2)*ellipek2-alpha**2*ellipkk2), @@ -478,29 +480,209 @@ def _A_impl(self, A): ellipek2 = ellipe(k**2) ellipkk2 = ellipk(k**2) - A[:] = -self.Inorm/2*np.dot(self.rotMatrix, np.array( - (2*self.r0+np.sqrt(points[:, 0]**2+points[:, 1]**2)*ellipek2+(self.r0**2+points[:, 0]**2+points[:, 1]**2+points[:, 2]**2)*(ellipe(k**2)-ellipkk2)) / - ((points[:, 0]**2+points[:, 1]**2+1e-31)*np.sqrt(self.r0**2+points[:, 0]**2+points[:, 1]**2+2*self.r0*np.sqrt(points[:, 0]**2+points[:, 1]**2)+points[:, 2]**2+1e-31)) * - np.array([-points[:, 1], points[:, 0], 0])).T) - - def as_dict(self): - d = {} - d["r0"] = self.r0 - d["center"] = self.center - d["I"] = self.Inorm * 25e5 - d["normal"] = self.normal + num = (2*self.r0+np.sqrt(points[:, 0]**2+points[:, 1]**2)*ellipek2+(self.r0**2+points[:, 0]**2+points[:, 1]**2+points[:, 2]**2)*(ellipe(k**2)-ellipkk2)) + denom = ((points[:, 0]**2+points[:, 1]**2+1e-31)*np.sqrt(self.r0**2+points[:, 0]**2+points[:, 1]**2+2*self.r0*np.sqrt(points[:, 0]**2+points[:, 1]**2)+points[:, 2]**2+1e-31)) + fak = num/denom + pts = fak[:, None]*np.concatenate((-points[:, 1][:, None], points[:, 0][:, None], np.zeros((points.shape[0], 1))), axis=-1) + A[:] = -self.Inorm/2*np.dot(self.rotMatrix, pts.T).T + + def as_dict(self, serial_objs_dict): + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): + def from_dict(cls, d, serial_objs_dict, recon_objs): field = cls(d["r0"], d["center"], d["I"], d["normal"]) - decoder = MontyDecoder() - xyz = decoder.process_decoded(d["points"]) + decoder = GSONDecoder() + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field.set_points_cart(xyz) return field +class DipoleField(MagneticField): + r""" + Computes the MagneticField induced by N dipoles. The field is given by + + .. math:: + + B(\mathbf{x}) = \frac{\mu_0}{4\pi} \sum_{i=1}^{N} \left(\frac{3\mathbf{r}_i\cdot \mathbf{m}_i}{|\mathbf{r}_i|^5}\mathbf{r}_i - \frac{\mathbf{m}_i}{|\mathbf{r}_i|^3}\right) + + where :math:`\mu_0=4\pi\times 10^{-7}\;N/A^2` is the permeability of free space + and :math:`\mathbf{r_i} = \mathbf{x} - \mathbf{x}^{dipole}_i` is the + vector between the field evaluation point and the dipole :math:`i` + position. + + Args: + dipole_grid: 2D numpy array, shape (ndipoles, 3). + A set of points corresponding to the locations of magnetic dipoles. + dipole_vectors: 2D numpy array, shape (ndipoles, 3). + The dipole vectors of each of the dipoles in the grid. + stellsym: bool (default True). + Whether or not the dipole grid is stellarator symmetric. + nfp: int (default 1). + The field-period symmetry of the dipole-grid. + coordinate_flag: string (default "cartesian"). + The global coordinate system that should be considered grid-aligned in the calculation. + The options are "cartesian" (rectangular bricks), "cylindrical" (cylindrical bricks), + and "toroidal" (uniform grid in simple toroidal coordinates). Note that this ASSUMES + that the global coordinate system for the dipole locations is one of these three + choices, so be careful if your permanent magnets are shaped/arranged differently! + m_maxima: 1D numpy array, shape (ndipoles,). + The maximum dipole strengths of each magnet in the grid. If not specified, defaults + to using the largest dipole strength of the magnets in dipole_grid, and using this + value for all the dipoles. Needed for plotting normalized dipole magnitudes in the + vtk functionality. + R0: double. + The value of the major radius of the stellarator needed only for simple toroidal + coordinates. + """ + + def __init__(self, dipole_grid, dipole_vectors, stellsym=True, nfp=1, coordinate_flag='cartesian', m_maxima=None, R0=1): + super().__init__() + if coordinate_flag == 'toroidal': + warnings.warn('Note that if using simple toroidal coordinates, ' + 'the major radius must be specified through R0 argument.') + self.R0 = R0 + self._dipole_fields_from_symmetries(dipole_grid, dipole_vectors, stellsym, nfp, coordinate_flag, m_maxima, R0) + + def _B_impl(self, B): + points = self.get_points_cart_ref() + B[:] = sopp.dipole_field_B(points, self.dipole_grid, self.m_vec) + + def _dB_by_dX_impl(self, dB): + points = self.get_points_cart_ref() + dB[:] = sopp.dipole_field_dB(points, self.dipole_grid, self.m_vec) + + def _A_impl(self, A): + points = self.get_points_cart_ref() + A[:] = sopp.dipole_field_A(points, self.dipole_grid, self.m_vec) + + def _dA_by_dX_impl(self, dA): + points = self.get_points_cart_ref() + dA[:] = sopp.dipole_field_dA(points, self.dipole_grid, self.m_vec) + + def _dipole_fields_from_symmetries(self, dipole_grid, dipole_vectors, stellsym=True, nfp=1, coordinate_flag='cartesian', m_maxima=None, R0=1): + """ + Takes the dipoles and grid initialized in a PermanentMagnetOptimizer (for a half-period surface) + and generates the full dipole manifold so that the call to B() (the magnetic field from + the dipoles) correctly returns contributions from all the dipoles from symmetries. + """ + self.dipole_grid = dipole_grid + + # Read in the required fields from pm_opt object + ndipoles = dipole_grid.shape[0] + if m_maxima is None: + m_maxima = np.max(np.linalg.norm(dipole_vectors, axis=-1)) * np.ones(ndipoles) + if stellsym: + stell_list = [1, -1] + nsym = nfp * 2 + else: + stell_list = [1] + nsym = nfp + m = dipole_vectors.reshape(ndipoles, 3) + + # Initialize new grid and dipole vectors for all the dipoles + # after we account for the symmetries below. + dipole_grid_x = np.zeros(ndipoles * nsym) + dipole_grid_y = np.zeros(ndipoles * nsym) + dipole_grid_z = np.zeros(ndipoles * nsym) + m_vec = np.zeros((ndipoles * nsym, 3)) + m_max = np.zeros(ndipoles * nsym) + + # Load in the dipole locations for a half-period surface + ox = dipole_grid[:, 0] + oy = dipole_grid[:, 1] + oz = dipole_grid[:, 2] + + # loop through the dipoles and repeat for fp and stellarator symmetries + index = 0 + n = ndipoles + + # get the components in Cartesian, converting if needed + mmx = m[:, 0] + mmy = m[:, 1] + mmz = m[:, 2] + if coordinate_flag == 'cylindrical': + phi_dipole = np.arctan2(oy, ox) + mmx_temp = mmx * np.cos(phi_dipole) - mmy * np.sin(phi_dipole) + mmy_temp = mmx * np.sin(phi_dipole) + mmy * np.cos(phi_dipole) + mmx = mmx_temp + mmy = mmy_temp + if coordinate_flag == 'toroidal': + phi_dipole = np.arctan2(oy, ox) + theta_dipole = np.arctan2(oz, np.sqrt(ox ** 2 + oy ** 2) - R0) + mmx_temp = mmx * np.cos(phi_dipole) * np.cos(theta_dipole) - mmy * np.sin(phi_dipole) - mmz * np.cos(phi_dipole) * np.sin(theta_dipole) + mmy_temp = mmx * np.sin(phi_dipole) * np.cos(theta_dipole) + mmy * np.cos(phi_dipole) - mmz * np.sin(phi_dipole) * np.sin(theta_dipole) + mmz_temp = mmx * np.sin(theta_dipole) + mmz * np.cos(theta_dipole) + mmx = mmx_temp + mmy = mmy_temp + mmz = mmz_temp + + # Loop over stellarator and field-period symmetry contributions + for stell in stell_list: + for fp in range(nfp): + phi0 = (2 * np.pi / nfp) * fp + + # get new dipoles locations by flipping the y and z components, then rotating by phi0 + dipole_grid_x[index:index + n] = ox * np.cos(phi0) - oy * np.sin(phi0) * stell + dipole_grid_y[index:index + n] = ox * np.sin(phi0) + oy * np.cos(phi0) * stell + dipole_grid_z[index:index + n] = oz * stell + + # get new dipole vectors by flipping the x component, then rotating by phi0 + m_vec[index:index + n, 0] = mmx * np.cos(phi0) * stell - mmy * np.sin(phi0) + m_vec[index:index + n, 1] = mmx * np.sin(phi0) * stell + mmy * np.cos(phi0) + m_vec[index:index + n, 2] = mmz + + m_max[index:index + n] = m_maxima + index += n + + contig = np.ascontiguousarray + self.dipole_grid = contig(np.array([dipole_grid_x, dipole_grid_y, dipole_grid_z]).T) + self.m_vec = contig(m_vec) + self.m_maxima = contig(m_max) + + def _toVTK(self, vtkname): + """ + Write dipole data into a VTK file (acknowledgements to Caoxiang's CoilPy code). + + Args: + vtkname (str): VTK filename, will be appended with .vts or .vtu. + """ + + # get the coordinates + ox = np.ascontiguousarray(self.dipole_grid[:, 0]) + oy = np.ascontiguousarray(self.dipole_grid[:, 1]) + oz = np.ascontiguousarray(self.dipole_grid[:, 2]) + ophi = np.arctan2(oy, ox) + otheta = np.arctan2(oz, np.sqrt(ox ** 2 + oy ** 2) - self.R0) + + # define the m vectors and the normalized m vectors + # in Cartesian, cylindrical, and simple toroidal coordinates. + mx = np.ascontiguousarray(self.m_vec[:, 0]) + my = np.ascontiguousarray(self.m_vec[:, 1]) + mz = np.ascontiguousarray(self.m_vec[:, 2]) + mx_normalized = np.ascontiguousarray(mx / self.m_maxima) + my_normalized = np.ascontiguousarray(my / self.m_maxima) + mz_normalized = np.ascontiguousarray(mz / self.m_maxima) + mr = np.ascontiguousarray(mx * np.cos(ophi) + my * np.sin(ophi)) + mrminor = np.ascontiguousarray(mx * np.cos(ophi) * np.cos(otheta) + my * np.sin(ophi) * np.cos(otheta) + np.sin(otheta) * mz) + mphi = np.ascontiguousarray(-mx * np.sin(ophi) + my * np.cos(ophi)) + mtheta = np.ascontiguousarray(-mx * np.cos(ophi) * np.sin(otheta) - my * np.sin(ophi) * np.sin(otheta) + np.cos(otheta) * mz) + mr_normalized = np.ascontiguousarray(mr / self.m_maxima) + mrminor_normalized = np.ascontiguousarray(mrminor / self.m_maxima) + mphi_normalized = np.ascontiguousarray(mphi / self.m_maxima) + mtheta_normalized = np.ascontiguousarray(mtheta / self.m_maxima) + + # Save all the data to a vtk file which can be visualized nicely with ParaView + data = {"m": (mx, my, mz), "m_normalized": (mx_normalized, my_normalized, mz_normalized), + "m_rphiz": (mr, mphi, mz), "m_rphiz_normalized": (mr_normalized, mphi_normalized, mz_normalized), + "m_rphitheta": (mrminor, mphi, mtheta), + "m_rphitheta_normalized": (mrminor_normalized, mphi_normalized, mtheta_normalized)} + from pyevtk.hl import pointsToVTK + pointsToVTK(str(vtkname), ox, oy, oz, data=data) + + class Dommaschk(MagneticField): """ Vacuum magnetic field created by an explicit representation of the magnetic @@ -532,19 +714,21 @@ def _dB_by_dX_impl(self, dB): points = self.get_points_cart_ref() dB[:] = np.add.reduce(sopp.DommaschkdB(self.m, self.n, self.coeffs, points))+self.Btor.dB_by_dX() - def as_dict(self) -> dict: - d = {} - d["mn"] = np.column_stack((self.m, self.n)) - d["coeffs"] = self.coeffs + @property + def mn(self): + return np.column_stack((self.m, self.n)) + + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): - decoder = MontyDecoder() - mn = decoder .process_decoded(d["mn"]) + def from_dict(cls, d, serial_objs_dict, recon_objs): + decoder = GSONDecoder() + mn = decoder.process_decoded(d["mn"], serial_objs_dict, recon_objs) field = cls(mn, d["coeffs"]) - xyz = decoder.process_decoded(d["points"]) + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field.set_points_cart(xyz) return field @@ -580,16 +764,16 @@ def _dB_by_dX_impl(self, dB): points = self.get_points_cart_ref() dB[:] = sopp.ReimandB(self.iota0, self.iota1, self.k, self.epsilonk, self.m0, points) - def as_dict(self): - d = MSONable.as_dict(self) + def as_dict(self, serial_objs_dict): + d = super().as_dict(serial_objs_dict=serial_objs_dict) d["points"] = self.get_points_cart() return d @classmethod - def from_dict(cls, d): + def from_dict(cls, d, serial_objs_dict, recon_objs): field = cls(d["iota0"], d["iota1"], d["k"], d["epsilonk"], d["m0"]) - decoder = MontyDecoder() - xyz = decoder.process_decoded(d["points"]) + decoder = GSONDecoder() + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) field.set_points_cart(xyz) return field @@ -664,3 +848,114 @@ def to_vtk(self, filename): rmin=self.r_range[0], rmax=self.r_range[1], zmin=self.z_range[0], zmax=self.z_range[1] ) + + +class MirrorModel(MagneticField): + r""" + Model magnetic field employed in https://arxiv.org/abs/2305.06372 to study + the magnetic mirror experiment WHAM. The + magnetic field is given by :math:`\vec{B}=B_R \vec{e}_R + B_Z \vec{e}_Z`, where + :math:`\vec{e}_R` and :math:`\vec{e}_Z` are + the cylindrical radial and axial unit vectors, respectively, and + :math:`B_R` and :math:`B_Z` are given by + + .. math:: + + B_R = -\frac{1}{R} \frac{\partial\psi}{\partial Z}, \; B_Z = \frac{1}{R}. + \frac{\partial\psi}{\partial R} + + In this model, the magnetic flux function :math:`\psi` is written as a double + Lorentzian function + + .. math:: + + \psi = \frac{R^2 \mathcal{B}}{2 \pi \gamma}\left(\left[1+\left(\frac{Z-Z_m}{\gamma}\right)^2\right]^{-1}+\left[1+\left(\frac{Z+Z_m}{\gamma}\right)^2\right]^{-1}\right). + + Note that this field is neither a vacuum field nor a solution of MHD force balance. + The input parameters are ``B0``, ``gamma`` and ``Z_m`` with the standard values the + ones used in https://arxiv.org/abs/2305.06372, that is, ``B0 = 6.51292``, + ``gamma = 0.124904``, and ``Z_m = 0.98``. + + Args: + B0: parameter :math:`\mathcal{B}` of the flux surface function + gamma: parameter :math:`\gamma` of the flux surface function + Z_m: parameter :math:`Z_m` of the flux surface function + """ + + def __init__(self, B0=6.51292, gamma=0.124904, Z_m=0.98): + MagneticField.__init__(self) + self.B0 = B0 + self.gamma = gamma + self.Z_m = Z_m + + def _psi(self, R, Z): + factor1 = 1+((Z-self.Z_m)/(self.gamma))**2 + factor2 = 1+((Z+self.Z_m)/(self.gamma))**2 + psi = (R*R*self.B0/(2*np.pi*self.gamma))*(1/factor1+1/factor2) + return psi + + def _B_impl(self, B): + points = self.get_points_cart_ref() + r = np.sqrt(np.square(points[:, 0]) + np.square(points[:, 1])) + z = points[:, 2] + phi = np.arctan2(points[:, 1], points[:, 0]) + # BR = -(1/R)dpsi/dZ, BZ=(1/R)dpsi/dR + factor1 = (1+((z-self.Z_m)/(self.gamma))**2)**2 + factor2 = (1+((z+self.Z_m)/(self.gamma))**2)**2 + Br = (r*self.B0/(np.pi*self.gamma**3))*((z-self.Z_m)/factor1+(z+self.Z_m)/factor2) + Bz = self._psi(r, z)*2/r/r + B[:, 0] = Br * np.cos(phi) + B[:, 1] = Br * np.sin(phi) + B[:, 2] = Bz + + def _dB_by_dX_impl(self, dB): + points = self.get_points_cart_ref() + r = np.sqrt(np.square(points[:, 0]) + np.square(points[:, 1])) + z = points[:, 2] + phi = np.arctan2(points[:, 1], points[:, 0]) + + factor1 = (1+((z-self.Z_m)/(self.gamma))**2)**2 + factor2 = (1+((z+self.Z_m)/(self.gamma))**2)**2 + Br = (r*self.B0/(np.pi*self.gamma**3))*((z-self.Z_m)/factor1+(z+self.Z_m)/factor2) + # Bz = self._psi(r,z)*2/r/r + dBrdr = (self.B0/(np.pi*self.gamma**3))*((z-self.Z_m)/factor1+(z+self.Z_m)/factor2) + dBzdz = -2*dBrdr + dBrdz = (self.B0*r/(np.pi*self.gamma**3))*(1/factor1+1/factor2 + - 4*self.gamma**4*((z-self.Z_m)**2/((z-self.Z_m)**2+self.gamma**2)**3+(z+self.Z_m)**2/((z+self.Z_m)**2+self.gamma**2)**3)) + cosphi = np.cos(phi) + sinphi = np.sin(phi) + dcosphidx = -points[:, 0]**2/r**3 + 1/r + dsinphidx = -points[:, 0]*points[:, 1]/r**3 + dcosphidy = -points[:, 0]*points[:, 1]/r**3 + dsinphidy = -points[:, 1]**2/r**3 + 1/r + drdx = points[:, 0]/r + drdy = points[:, 1]/r + dBxdx = dBrdr*drdx*cosphi + Br*dcosphidx + dBxdy = dBrdr*drdy*cosphi + Br*dcosphidy + dBxdz = dBrdz*cosphi + dBydx = dBrdr*drdx*sinphi + Br*dsinphidx + dBydy = dBrdr*drdy*sinphi + Br*dsinphidy + dBydz = dBrdz*sinphi + + dB[:, 0, 0] = dBxdx + dB[:, 1, 0] = dBxdy + dB[:, 2, 0] = dBxdz + dB[:, 0, 1] = dBydx + dB[:, 1, 1] = dBydy + dB[:, 2, 1] = dBydz + dB[:, 0, 2] = 0 + dB[:, 1, 2] = 0 + dB[:, 2, 2] = dBzdz + + def as_dict(self, serial_objs_dict) -> dict: + d = super().as_dict(serial_objs_dict=serial_objs_dict) + d["points"] = self.get_points_cart() + return d + + @classmethod + def from_dict(cls, d, serial_objs_dict, recon_objs): + field = cls(d["B0"], d["gamma"], d["Z_m"]) + decoder = GSONDecoder() + xyz = decoder.process_decoded(d["points"], serial_objs_dict, recon_objs) + field.set_points_cart(xyz) + return field diff --git a/src/simsopt/field/mgrid.py b/src/simsopt/field/mgrid.py new file mode 100644 index 000000000..8e7528cd2 --- /dev/null +++ b/src/simsopt/field/mgrid.py @@ -0,0 +1,375 @@ +import numpy as np +from scipy.io import netcdf_file + +__all__ = ["MGrid"] + + +def _pad_string(string): + ''' + Pads a string with 30 underscores (for writing coil group names). + ''' + return '{:^30}'.format(string).replace(' ', '_') + + +def _unpack(binary_array): + ''' + Decrypt binary char array into a string. + This function is used for reading coil group names. + ''' + return "".join(np.char.decode(binary_array)).strip() + + +class MGrid(): + + ''' + This class reads and writes mgrid files for use in free boundary VMEC and other codes. + + The mgrid representation consists of the cylindrical components of B on a + tensor product grid in cylindrical coordinates. Mgrid files are saved in + NetCDF format. + + Args: + nr: number of radial points + nz: number of axial points + nphi: number of azimuthal points, in one field period + nfp: number of field periods + rmin: minimum r grid point + rmax: maximum r grid point + zmin: minimum z grid point + zmax: maximum z grid point + + Note the (r,z) dimensions include both end points. The (phi) dimension includes the start point and excludes the end point. + ''' + + def __init__(self, # fname='temp', #binary=False, + nr: int = 51, + nz: int = 51, + nphi: int = 24, + nfp: int = 2, + rmin: float = 0.20, + rmax: float = 0.40, + zmin: float = -0.10, + zmax: float = 0.10, + ): + + self.nr = nr + self.nz = nz + self.nphi = nphi + self.nfp = nfp + + self.rmin = rmin + self.rmax = rmax + self.zmin = zmin + self.zmax = zmax + + self.n_ext_cur = 0 + self.coil_names = [] + + self.br_arr = [] + self.bz_arr = [] + self.bp_arr = [] + + self.ar_arr = [] + self.az_arr = [] + self.ap_arr = [] + + def add_field_cylindrical(self, br, bp, bz, ar=None, ap=None, az=None, name=None): + ''' + This function saves the vector field B, and (optionally) the vector potential A, to the Mgrid object. + B and A are provided on a tensor product grid in cylindrical components. + + The Mgrid array assumes B is sampled linearly first in r, then z, and last phi. + Python arrays use the opposite convention such that B[0] gives a (r,z) square at const phi + and B[0,0] gives a radial line and const phi and z. + + It is assumed that each of the inputs ``br``, ``bp``, and ``bz`` for this function are already + arrays of shape ``(nphi, nz, nr)``. The same is true for ``ar``, ``ap``, and ``az`` if they are provided. + + This function may be called once for each coil group, + to save sets of fields that can be scaled using EXTCUR in VMEC. + + Args: + br: the radial component of B field + bp: the azimuthal component of B field + bz: the axial component of B field + ar: the radial component of the vector potential A + ap: the azimuthal component of the vector potential A + az: the axial component of the vector potential A + name: Name of the coil group + ''' + + # appending B field to an array for all coil groups. + self.br_arr.append(br) + self.bz_arr.append(bz) + self.bp_arr.append(bp) + + # add potential + if ar is not None: + self.ar_arr.append(ar) + if ap is not None: + self.az_arr.append(az) + if az is not None: + self.ap_arr.append(ap) + + # add coil label + if (name is None): + label = _pad_string('magnet_%i' % self.n_ext_cur) + else: + label = _pad_string(name) + self.coil_names.append(label) + self.n_ext_cur = self.n_ext_cur + 1 + + + # TO-DO: this function could check for size consistency, between different fields, and for the (nr,nphi,nz) settings of a given instance + + def write(self, filename): + ''' + Export class data as a netCDF binary. + + The field data is represented as a single "current group". For + free-boundary vmec, the "extcur" array should have a single nonzero + element, set to 1.0. + + Args: + filename: output file name + ''' + + with netcdf_file(filename, 'w', mmap=False) as ds: + + # set netcdf dimensions + ds.createDimension('stringsize', 30) + ds.createDimension('dim_00001', 1) + ds.createDimension('external_coil_groups', self.n_ext_cur) + ds.createDimension('external_coils', self.n_ext_cur) + ds.createDimension('rad', self.nr) + ds.createDimension('zee', self.nz) + ds.createDimension('phi', self.nphi) + + # declare netcdf variables + var_ir = ds.createVariable('ir', 'i4', tuple()) + var_jz = ds.createVariable('jz', 'i4', tuple()) + var_kp = ds.createVariable('kp', 'i4', tuple()) + var_nfp = ds.createVariable('nfp', 'i4', tuple()) + var_nextcur = ds.createVariable('nextcur', 'i4', tuple()) + + var_rmin = ds.createVariable('rmin', 'f8', tuple()) + var_zmin = ds.createVariable('zmin', 'f8', tuple()) + var_rmax = ds.createVariable('rmax', 'f8', tuple()) + var_zmax = ds.createVariable('zmax', 'f8', tuple()) + + if self.n_ext_cur == 1: + var_coil_group = ds.createVariable('coil_group', 'c', ('stringsize',)) + var_coil_group[:] = self.coil_names[0] + else: + var_coil_group = ds.createVariable('coil_group', 'c', ('external_coil_groups', 'stringsize',)) + var_coil_group[:] = self.coil_names + var_mgrid_mode = ds.createVariable('mgrid_mode', 'c', ('dim_00001',)) + var_raw_coil_cur = ds.createVariable('raw_coil_cur', 'f8', ('external_coils',)) + + # assign values + var_ir.data[()] = self.nr + var_jz.data[()] = self.nz + var_kp.data[()] = self.nphi + var_nfp.data[()] = self.nfp + var_nextcur.data[()] = self.n_ext_cur + + var_rmin.data[()] = self.rmin + var_zmin.data[()] = self.zmin + var_rmax.data[()] = self.rmax + var_zmax.data[()] = self.zmax + + var_mgrid_mode[:] = 'N' # R - Raw, S - scaled, N - none (old version) + var_raw_coil_cur[:] = np.ones(self.n_ext_cur) + + # add fields + for j in np.arange(self.n_ext_cur): + + tag = '_%.3i' % (j+1) + var_br_001 = ds.createVariable('br'+tag, 'f8', ('phi', 'zee', 'rad')) + var_bp_001 = ds.createVariable('bp'+tag, 'f8', ('phi', 'zee', 'rad')) + var_bz_001 = ds.createVariable('bz'+tag, 'f8', ('phi', 'zee', 'rad')) + + var_br_001[:, :, :] = self.br_arr[j] + var_bz_001[:, :, :] = self.bz_arr[j] + var_bp_001[:, :, :] = self.bp_arr[j] + + #If the potential value is not an empty cell, then include it + if len(self.ar_arr) > 0 : + var_ar_001 = ds.createVariable('ar'+tag, 'f8', ('phi', 'zee', 'rad')) + var_ar_001[:, :, :] = self.ar_arr[j] + if len(self.ap_arr) > 0 : + var_ap_001 = ds.createVariable('ap'+tag, 'f8', ('phi', 'zee', 'rad')) + var_ap_001[:, :, :] = self.ap_arr[j] + if len(self.az_arr) > 0 : + var_az_001 = ds.createVariable('az'+tag, 'f8', ('phi', 'zee', 'rad')) + var_az_001[:, :, :] = self.az_arr[j] + + @classmethod + def from_file(cls, filename): + ''' + This method reads MGrid data from file. + + Args: + filename: mgrid netCDF input file name + ''' + + with netcdf_file(filename, 'r', mmap=False) as f: + + # load grid + nr = f.variables['ir'].getValue() + nphi = f.variables['kp'].getValue() + nz = f.variables['jz'].getValue() + rmin = f.variables['rmin'].getValue() + rmax = f.variables['rmax'].getValue() + zmin = f.variables['zmin'].getValue() + zmax = f.variables['zmax'].getValue() + kwargs = {"nr": nr, "nphi": nphi, "nz": nz, + "rmin": rmin, "rmax": rmax, "zmin": zmin, "zmax": zmax} + + mgrid = cls(**kwargs) + mgrid.filename = filename + mgrid.n_ext_cur = int(f.variables['nextcur'].getValue()) + coil_data = f.variables['coil_group'][:] + if len(f.variables['coil_group'].dimensions) == 2: + mgrid.coil_names = [_unpack(coil_data[j]) for j in range(mgrid.n_ext_cur)] + else: + mgrid.coil_names = [_unpack(coil_data)] + + mgrid.mode = f.variables['mgrid_mode'][:][0].decode() + mgrid.raw_coil_current = np.array(f.variables['raw_coil_cur'][:]) + + br_arr = [] + bp_arr = [] + bz_arr = [] + + ar_arr = [] + ap_arr = [] + az_arr = [] + + ar = [] + ap = [] + az = [] + + nextcur = mgrid.n_ext_cur + for j in range(nextcur): + idx = '{:03d}'.format(j+1) + br = f.variables['br_'+idx][:] # phi z r + bp = f.variables['bp_'+idx][:] + bz = f.variables['bz_'+idx][:] + + if (mgrid.mode == 'S'): + br_arr.append(br * mgrid.raw_coil_current[j]) + bp_arr.append(bp * mgrid.raw_coil_current[j]) + bz_arr.append(bz * mgrid.raw_coil_current[j]) + else: + br_arr.append(br) + bp_arr.append(bp) + bz_arr.append(bz) + + #check for potential in mgrid file + if 'ar_'+idx in f.variables: + ar = f.variables['ar_'+idx][:] + if mgrid.mode == 'S': + ar_arr.append(ar * mgrid.raw_coil_current[j]) + else: + ar_arr.append(ar) + + if 'ap_'+idx in f.variables: + ap = f.variables['ap_'+idx][:] + if mgrid.mode == 'S': + ap_arr.append(ap * mgrid.raw_coil_current[j]) + else: + ap_arr.append(ap) + + if 'az_'+idx in f.variables: + az = f.variables['az_'+idx][:] + if mgrid.mode == 'S': + az_arr.append(az * mgrid.raw_coil_current[j]) + else: + az_arr.append(az) + + mgrid.br_arr = np.array(br_arr) + mgrid.bp_arr = np.array(bp_arr) + mgrid.bz_arr = np.array(bz_arr) + + mgrid.ar_arr = np.array(ar_arr) + mgrid.ap_arr = np.array(ap_arr) + mgrid.az_arr = np.array(az_arr) + + # sum over coil groups + if nextcur > 1: + br = np.sum(br_arr, axis=0) + bp = np.sum(bp_arr, axis=0) + bz = np.sum(bz_arr, axis=0) + + if len(mgrid.ar_arr) > 0: + ar = np.sum(ar_arr, axis=0) + if len(mgrid.ap_arr) > 0: + ap = np.sum(ap_arr, axis=0) + if len(mgrid.az_arr) > 0: + az = np.sum(az_arr, axis=0) + else: + br = br_arr[0] + bp = bp_arr[0] + bz = bz_arr[0] + if len(mgrid.ar_arr) > 0: + ar = ar_arr[0] + if len(mgrid.ap_arr) > 0: + ap = ap_arr[0] + if len(mgrid.az_arr) > 0: + az = az_arr[0] + + mgrid.br = br + mgrid.bp = bp + mgrid.bz = bz + + mgrid.ar = ar + mgrid.ap = ap + mgrid.az = az + + mgrid.bvec = np.transpose([br, bp, bz]) + + return mgrid + + def plot(self, jphi=0, bscale=0, show=True): + ''' + Creates a plot of the mgrid data. + Shows the three components (br,bphi,bz) in a 2D plot for a fixed toroidal plane. + + Args: + jphi: integer index for a toroidal slice. + bscale: sets saturation scale for colorbar. (This is useful, because + the mgrid domain often includes coil currents, and arbitrarily + close to the current source the Bfield values become singular.) + show: Whether to call matplotlib's ``show()`` function. + + Returns: + 2-element tuple ``(fig, axs)`` with the Matplotlib figure and axes handles. + ''' + + import matplotlib.pyplot as plt + fig, axs = plt.subplots(3, 1, figsize=(5, 10)) + + rax = np.linspace(self.rmin, self.rmax, self.nr) + zax = np.linspace(self.zmin, self.zmax, self.nz) + + def subplot_slice(s, data, rax, zax, bscale=0.3, tag=''): + + if bscale > 0: + cf = axs[s].contourf(rax, zax, data, np.linspace(-bscale, bscale, 11), extend='both') + else: + cf = axs[s].contourf(rax, zax, data) + axs[s].plot([], [], '.', label=tag) + axs[s].legend() + fig.colorbar(cf, ax=axs[s]) + + subplot_slice(np.s_[0], self.br[jphi], rax, zax, tag='br', bscale=bscale) + subplot_slice(np.s_[1], self.bp[jphi], rax, zax, tag='bp', bscale=bscale) + subplot_slice(np.s_[2], self.bz[jphi], rax, zax, tag='bz', bscale=bscale) + + axs[1].set_title(f"nextcur = {self.n_ext_cur}, mode {self.mode}", fontsize=10) + axs[2].set_title(f"nr,np,nz = ({self.nr},{self.nphi},{self.nz})", fontsize=10) + + if (show): plt.show() + + return fig, axs diff --git a/src/simsopt/field/normal_field.py b/src/simsopt/field/normal_field.py new file mode 100644 index 000000000..8c14d3398 --- /dev/null +++ b/src/simsopt/field/normal_field.py @@ -0,0 +1,289 @@ +import logging + +import numpy as np + +from .._core.optimizable import DOFs, Optimizable + +logger = logging.getLogger(__name__) + +try: + import py_spec +except ImportError as e: + py_spec = None + logger.debug(str(e)) + +__all__ = ['NormalField'] + + +class NormalField(Optimizable): + r""" + ``NormalField`` represents the magnetic field normal to a toroidal surface, for example the + computational boundary of SPEC free-boundary. + + Args: + nfp: The number of field period + stellsym: Whether (=True) or not (=False) stellarator symmetry is enforced. + mpol: Poloidal Fourier resolution + ntor: Toroidal Fourier resolution + vns: Odd fourier modes of :math:`\mathbf{B}\cdot\mathbf{\hat{n}}`. 2D array of size + (mpol+1)x(2ntor+1). Set to None to fill with zeros + + vns( mm, self.ntor+nn ) is the mode (mm,nn) + + vnc: Even fourier modes of :math:`\mathbf{B}\cdot\mathbf{\hat{n}}`. 2D array of size + (mpol+1)x(2ntor+1). Ignored if stellsym if True. Set to None to fill with zeros + + vnc( mm, self.ntor+nn ) is the mode (mm,nn) + """ + + def __init__(self, nfp=1, stellsym=True, mpol=1, ntor=0, + vns=None, vnc=None): + + self.nfp = nfp + self.stellsym = stellsym + self.mpol = mpol + self.ntor = ntor + + if vns is None: + vns = np.zeros((self.mpol + 1, 2 * self.ntor + 1)) + + if vnc is None and not stellsym: + vnc = np.zeros((self.mpol + 1, 2 * self.ntor + 1)) + + if self.stellsym: + self.ndof = self.ntor + self.mpol * (2 * self.ntor + 1) + else: + self.ndof = 2 * (self.ntor + self.mpol * (2 * self.ntor + 1)) + 1 + + # Pack in a single array + dofs = np.zeros((self.ndof,)) + + # Populate dofs array + vns_shape = vns.shape + input_mpol = int(vns_shape[0]-1) + input_ntor = int((vns_shape[1]-1)/2) + for mm in range(0, self.mpol+1): + for nn in range(-self.ntor, self.ntor+1): + if mm == 0 and nn < 0: continue + if mm > input_mpol: continue + if nn > input_ntor: continue + + if not (mm == 0 and nn == 0): + ii = self.get_index_in_dofs(mm, nn, even=False) + dofs[ii] = vns[mm, input_ntor+nn] + + if not self.stellsym: + ii = self.get_index_in_dofs(mm, nn, even=True) + dofs[ii] = vnc[mm, input_ntor+nn] + + Optimizable.__init__( + self, + x0=dofs, + names=self._make_names()) + + @classmethod + def from_spec(cls, filename): + """ + Initialize using the harmonics in SPEC input file + """ + + # Test if py_spec is available + if py_spec is None: + raise RuntimeError( + "Initialization from Spec requires py_spec to be installed.") + + # Read Namelist + nm = py_spec.SPECNamelist(filename) + ph = nm['physicslist'] + + # Read modes from SPEC input file + vns = np.asarray(ph['vns']) + if ph['istellsym']: + vnc = None + else: + vnc = np.asarray(ph['vnc']) + + normal_field = cls( + nfp=ph['nfp'], + stellsym=bool(ph['istellsym']), + mpol=ph['Mpol'], + ntor=ph['Ntor'], + vns=vns, + vnc=vnc + ) + + return normal_field + + def get_index_in_dofs(self, m, n, mpol=None, ntor=None, even=False): + """ + Returns position of mode (m,n) in dofs array + + Args: + - m: poloidal mode number + - n: toroidal mode number (normalized by Nfp) + - mpol: resolution of dofs array. If None (by default), use self.mpol + - ntor: resolution of dofs array. If None (by default), use self.ntor + - even: set to True to get vnc. Default is False + """ + + if mpol is None: + mpol = self.mpol + if ntor is None: + ntor = self.ntor + + if m < 0 or m > mpol: + raise ValueError('m out of bound') + if abs(n) > ntor: + raise ValueError('n out of bound') + if m == 0 and n < 0: + raise ValueError('n has to be positive if m==0') + if not even and m == 0 and n == 0: + raise ValueError('m=n=0 not supported for odd series') + + ii = -1 + if m == 0: + ii = n + else: + ii = m * (2*ntor+1) + n + + nvns = ntor + mpol * (ntor * 2 + 1) + if not even: # Vns + ii = ii - 1 # remove (0,0) element + else: # Vnc + ii = ii + nvns + + return ii + + def get_vns(self, m, n): + self.check_mn(m, n) + ii = self.get_index_in_dofs(m, n) + return self.local_full_x[ii] + + def set_vns(self, m, n, value): + self.check_mn(m, n) + ii = self.get_index_in_dofs(m, n) + self.local_full_x[ii] = value + + def get_vnc(self, m, n): + self.check_mn(m, n) + ii = self.get_index_in_dofs(m, n, even=True) + if self.stellsym: + return 0.0 + else: + return self.local_full_x[ii] + + def set_vnc(self, m, n, value): + self.check_mn(m, n) + ii = self.get_index_in_dofs(m, n, even=True) + if self.stellsym: + raise ValueError('Stellarator symmetric has no vnc') + else: + self.local_full_x[ii] = value + + def check_mn(self, m, n): + if m < 0 or m > self.mpol: + raise ValueError('m out of bound') + if n < -self.ntor or n > self.ntor: + raise ValueError('n out of bound') + if m == 0 and n < 0: + raise ValueError('n has to be positive if m==0') + + def _make_names(self): + """ + Form a list of names of the ``vns``, ``vnc`` + """ + if self.stellsym: + names = self._make_names_helper(False) + else: + names = np.append(self._make_names_helper(False), + self._make_names_helper(True)) + + return names + + def _make_names_helper(self, even=False): + names = [] + indices = [] + + if even: + prefix = 'vnc' + else: + prefix = 'vns' + + for mm in range(0, self.mpol+1): + for nn in range(-self.ntor, self.ntor+1): + if mm == 0 and nn < 0: + continue + if not even and mm == 0 and nn == 0: + continue + + ind = self.get_index_in_dofs(mm, nn, even=even) + names.append(prefix + '({m},{n})'.format(m=mm, n=nn)) + indices.append(ind) + + # Sort names + ind = np.argsort(np.asarray(indices)) + sorted_names = [names[ii] for ii in ind] + + return sorted_names + + def change_resolution(self, mpol, ntor): + """ + Change the values of `mpol` and `ntor`. Any new Fourier amplitudes + will have a magnitude of zero. Any previous nonzero Fourier + amplitudes that are not within the new range will be + discarded. + """ + + # Set new number of dofs + if self.stellsym: + ndof = ntor + mpol * (2 * ntor + 1) # Only Vns - odd series + else: + ndof = 2 * (ntor + mpol * (2 * ntor + 1)) + 1 # Vns and Vns + + # Fill relevant modes + min_mpol = np.min((mpol, self.mpol)) + min_ntor = np.min((ntor, self.ntor)) + + dofs = np.zeros((ndof,)) + for m in range(min_mpol + 1): + for n in range(-min_ntor, min_ntor + 1): + if m == 0 and n < 0: continue + + if m > 0 or n > 0: + ind = self.get_index_in_dofs(m, n, mpol=mpol, ntor=ntor, even=False) + dofs[ind] = self.get_vns(m, n) + + if not self.stellsym: + ind = self.get_index_in_dofs(m, n, mpol=mpol, ntor=ntor, even=True) + dofs[ind] = self.get_vnc(m, n) + + # Update attributes + self.mpol = mpol + self.ntor = ntor + self.ndof = ndof + self._dofs = DOFs(dofs, self._make_names()) + + def fixed_range(self, mmin, mmax, nmin, nmax, fixed=True): + """ + Set the 'fixed' property for a range of `m` and `n` values. + + All modes with `m` in the interval [`mmin`, `mmax`] and `n` in the + interval [`nmin`, `nmax`] will have their fixed property set to + the value of the `fixed` parameter. Note that `mmax` and `nmax` + are included (unlike the upper bound in python's range(min, + max).) + + In case of non stellarator symmetric field, both vns and vnc will be + fixed (or unfixed) + """ + + fn = self.fix if fixed else self.unfix + for m in range(mmin, mmax + 1): + this_nmin = nmin + if m == 0 and nmin < 0: + this_nmin = 0 + for n in range(this_nmin, nmax + 1): + if m > 0 or n != 0: + fn(f'vns({m},{n})') + if not self.stellsym: + fn(f'vnc({m},{n})') diff --git a/src/simsopt/util/logger.py b/src/simsopt/util/logger.py index ab47e538c..d5934de1f 100644 --- a/src/simsopt/util/logger.py +++ b/src/simsopt/util/logger.py @@ -1,6 +1,6 @@ # coding: utf-8 # Copyright (c) HiddenSymmetries Development Team. -# Distributed under the terms of the LGPL License +# Distributed under the terms of the MIT License __all__ = ['initialize_logging'] @@ -11,7 +11,6 @@ from ruamel.yaml import YAML try: from mpi4py import MPI - # from .mpi_logger import MPILogHandler except: MPI = None diff --git a/src/simsopt/util/mpi.py b/src/simsopt/util/mpi.py index a893d9ee9..dbcc5df9c 100644 --- a/src/simsopt/util/mpi.py +++ b/src/simsopt/util/mpi.py @@ -1,6 +1,6 @@ # coding: utf-8 # Copyright (c) HiddenSymmetries Development Team. -# Distributed under the terms of the LGPL License +# Distributed under the terms of the MIT License """ This module contains the :class:`~simsopt.util.mpi.MpiPartition` class. @@ -11,18 +11,19 @@ __all__ = ['log', 'MpiPartition'] import logging -from typing import Union import numpy as np from .._core.dev import SimsoptRequires try: from mpi4py import MPI + comm_world = MPI.COMM_WORLD except ImportError: MPI = None + comm_world = None STOP = 0 -__all__ = ['log', 'MpiPartition'] +__all__ = ['log', 'MpiPartition', 'proc0_print', 'comm_world'] def log(level: int = logging.INFO): @@ -43,6 +44,18 @@ def log(level: int = logging.INFO): logger = logging.getLogger(__name__) +""" +Print only on MPI process 0. This function works also if MPI is not found. +""" + + +def proc0_print(*args, **kwargs): + if MPI is None: + print(*args, **kwargs) + else: + if MPI.COMM_WORLD.rank == 0: + print(*args, **kwargs) + @SimsoptRequires(MPI is not None, "mpi4py is not installed") class MpiPartition: @@ -77,12 +90,8 @@ class MpiPartition: """ def __init__(self, - # ngroups: Union[None, int] = None, - # comm_world: Union[MPI.Intracomm, None] = MPI.COMM_WORLD): ngroups=None, comm_world=None): - # if MPI is None: - # raise RuntimeError("MpiPartition class requires the mpi4py package.") self.is_apart = False self.comm_world = comm_world if comm_world is not None else MPI.COMM_WORLD