Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 26 additions & 1 deletion .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,31 @@
"console": "integratedTerminal",
"justMyCode": true
},
{
"name": "Apply attribution to projects in bulk",
"type": "debugpy",
"request": "launch",
"program": "${workspaceFolder}/scripts/utility/project_attribution/apply_attribution.py",
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"envFile": "${workspaceFolder}/.env",
"env": {
"PYTHONPATH": "${workspaceFolder}"
},
"args": [
"--stage",
"production", // staging or production
"--mode",
"ADD", // ADD, REPLACE or REMOVE
"--csv-folder",
"{env:CSV_FOLDER}",
"--organization",
"f1b8e6ae-d103-4ffb-b402-a9ee0eaf7607", //NAR on production
"--roles", // OWNER DESIGNER CO_FUNDER etc.
"CO_FUNDER",
"--verbose"
]
},
{
"name": "📦 Merge Projects Tool",
"type": "debugpy",
Expand Down Expand Up @@ -179,7 +204,7 @@
"PYTHONPATH": "${workspaceFolder}"
},
"args": [
"production",
"staging",
"{env:CSV_FOLDER}",
]
},
Expand Down
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@

This project is designed to simplify interaction with the Riverscapes GraphQL API. It uses modern Python packaging standards, including a `pyproject.toml` file for configuration and dependency management.

### Data Exchange API (GraphQL definitions)

This project includes a static local copy of ot he riverscapes data exchange API. We do this as we've had trouble getting linter/VSCode introspection to work with the online version. But it means if the API changes, this code needs to be updated.

## Using UV for Environment Management

This project uses [uv](https://github.com/astral-sh/uv) to manage Python virtual environments and dependencies. `uv` is an alternative to tools like `pipenv` and `poetry`.
Expand Down
29 changes: 17 additions & 12 deletions pydex/classes/RiverscapesAPI.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
from pathlib import Path
from typing import Dict, List, Generator, Tuple
import webbrowser
import re
Expand Down Expand Up @@ -325,17 +326,21 @@ def load_query(self, query_name: str) -> str:
with open(os.path.join(os.path.dirname(__file__), '..', 'graphql', 'queries', f'{query_name}.graphql'), 'r', encoding='utf-8') as queryFile:
return queryFile.read()

def load_mutation(self, mutation_name: str) -> str:
""" Load a mutation file from the file system.
def load_mutation(self, mutation_name: str | Path) -> str:
""" Load a mutation file from the file system graphql/mutations folder or from a specific path.

Args:
mutationName (str): _description_
mutationName (str|Path): name of mutation in library, or Path to .graphql file

Returns:
str: _description_
str: the contents of the file
"""
with open(os.path.join(os.path.dirname(__file__), '..', 'graphql', 'mutations', f'{mutation_name}.graphql'), 'r', encoding='utf-8') as queryFile:
return queryFile.read()
if Path(mutation_name).exists():
mutation_file_path = Path(mutation_name)
else:
mutation_file_path = Path(__file__).parent.parent / 'graphql' / 'mutations' / f'{mutation_name}.graphql'

return mutation_file_path.read_text(encoding='utf-8')

def search(self, search_params: RiverscapesSearchParams, progress_bar: bool = False, page_size: int = 500, sort: List[str] = None, max_results: int = None, search_query_name: str = None) -> Generator[Tuple[RiverscapesProject, Dict, int], None, None]:
""" A simple function to make a yielded search on the riverscapes API
Expand Down Expand Up @@ -542,18 +547,18 @@ def search_count(self, search_params: RiverscapesSearchParams):
stats = results['data']['searchProjects']['stats']
return (total, stats)

def run_query(self, query, variables):
""" A simple function to use requests.post to make the API call. Note the json= section.
def run_query(self, query: str, variables: dict) -> dict:
"""A simple function to use requests.post to make the API call. Note the json= section.

Args:
query (_type_): _description_
variables (_type_): _description_
query (str): GraphQL query string
variables (dict): mapping variable names to values

Raises:
Exception: _description_
Exception: RiverscapesAPIException

Returns:
_type_: _description_
dict: parsed JSON response from the API
"""
headers = {"authorization": "Bearer " + self.access_token} if self.access_token else {}
request = requests.post(self.uri, json={
Expand Down
143 changes: 143 additions & 0 deletions pydex/generate_python_classes_from_graphql_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
"""
Generate Python TypedDict definitions from a GraphQL schema.

This script reads the project's 'graphql.config.json' to locate the schema file,
parses it, and generates Python `TypedDict` classes for all InputObjects. and enums.
This allows for type-safe construction of GraphQL mutation payloads.

Quickly built with copilot/gemini 3 pro (preview) 2026-01-27 by Lorin
NOTE: If we want to go deeper, there are established libraries for this:
* ariadne https://github.com/mirumee/ariadne-codegen/
* https://github.com/sauldom102/gql_schema_codegen
e.g. Could add types, could make Total=True if all fields are required
"""

import argparse
from pathlib import Path

from graphql import (
EnumTypeDefinitionNode,
InputObjectTypeDefinitionNode,
ListTypeNode,
NamedTypeNode,
NonNullTypeNode,
TypeNode,
parse,
)


def get_python_type(type_node: TypeNode) -> str:
"""
Recursively resolve GraphQL types to modern Python type strings.

Args:
type_node: The GraphQL AST node representing the type.

Returns:
A string representing the Python type (e.g., 'list[str]', 'int').
"""
if isinstance(type_node, NonNullTypeNode):
return get_python_type(type_node.type)

if isinstance(type_node, ListTypeNode):
inner_type = get_python_type(type_node.type)
return f"list[{inner_type}]"

if isinstance(type_node, NamedTypeNode):
name = type_node.name.value
mapping = {
'String': 'str',
'ID': 'str',
'Boolean': 'bool',
'Int': 'int',
'Float': 'float'
}
# Use quotes for forward references to other classes
return mapping.get(name, f"'{name}'")

return "Any"


def generate_types(schema_path: Path, output_path: Path) -> None:
"""
Parse the schema and write Python TypedDict definitions to a file.

Args:
schema_path: Path to the .graphql schema file.
output_path: Path to the output .py file.
"""
if not schema_path.exists():
print(f"Error: Schema file not found at {schema_path}")
return

print(f"Reading schema from: {schema_path}")
print(f"Writing types to: {output_path}")

with open(schema_path, 'r', encoding='utf-8') as f:
schema_content = f.read()

doc = parse(schema_content)

# Ensure output directory exists
output_path.parent.mkdir(parents=True, exist_ok=True)

with open(output_path, 'w', encoding='utf-8') as f:
f.write(f'"""\nGenerated from {schema_path.name} using {Path(__file__).name}\n"""\n')
f.write("from typing import TypedDict\n")
f.write("from enum import Enum\n\n\n")

enum_count = 0
input_count = 0

# Pass 1: generate Enums
for definition in doc.definitions:
if isinstance(definition, EnumTypeDefinitionNode):
enum_count += 1
name = definition.name.value
f.write(f"class {name}(str, Enum):\n")
if not definition.values:
f.write(" pass\n\n")
continue

for value_def in definition.values:
val = value_def.name.value
# Handle Python reserved keywords or invalid identifiers if necessary
# For now assume schema values are safe or valid python identifiers
f.write(f" {val} = '{val}'\n")
f.write("\n")

# Pass 2: generate Input Objects
for definition in doc.definitions:
# We focus on Input types as they are critical for constructing mutation payloads
if isinstance(definition, InputObjectTypeDefinitionNode):
input_count += 1
name = definition.name.value
f.write(f"class {name}(TypedDict, total=False):\n")

if not definition.fields:
f.write(" pass\n\n")
continue

for field in definition.fields:
field_name = field.name.value
python_type = get_python_type(field.type)
f.write(f" {field_name}: {python_type}\n")
f.write("\n")

print(f"Successfully generated {enum_count} Enums and {input_count} Input Types.")


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate Python TypedDicts from GraphQL Schema")

default_schema = Path("pydex/graphql/riverscapes.schema.graphql")
default_output = Path("pydex/generated_types.py")

parser.add_argument('--schema', type=Path, default=default_schema,
help='Path to riverscapes.schema.graphql')
parser.add_argument('--output', type=Path, default=default_output,
help='Path to output .py file')

args = parser.parse_args()
generate_types(args.schema, args.output)
print('DONE.')
Loading