Skip to content

Runner API

Overview

The JSL Runner API provides the core execution engine for JSL programs, handling evaluation, environment management, and host interaction coordination.

Core Classes

JSL Runner - High-level execution interface

This module provides the JSLRunner class and related utilities for executing JSL programs with advanced features like environment management, host interaction, and performance monitoring.

JSLRunner(config=None, security=None, resource_limits=None, host_gas_policy=None, use_recursive_evaluator=False)

High-level JSL execution engine with advanced features.

Initialize JSL runner.

Parameters:

Name Type Description Default
config Optional[Dict[str, Any]]

Configuration options (recursion depth, debugging, etc.)

None
security Optional[Dict[str, Any]]

Security settings (allowed commands, sandbox mode, etc.)

None
resource_limits Optional[ResourceLimits]

Resource limits for execution

None
host_gas_policy Optional[HostGasPolicy]

Gas cost policy for host operations

None
use_recursive_evaluator bool

If True, use recursive evaluator instead of stack (default: False)

False
Source code in jsl/runner.py
def __init__(self, config: Optional[Dict[str, Any]] = None, 
             security: Optional[Dict[str, Any]] = None,
             resource_limits: Optional[ResourceLimits] = None,
             host_gas_policy: Optional[HostGasPolicy] = None,
             use_recursive_evaluator: bool = False):
    """
    Initialize JSL runner.

    Args:
        config: Configuration options (recursion depth, debugging, etc.)
        security: Security settings (allowed commands, sandbox mode, etc.)
        resource_limits: Resource limits for execution
        host_gas_policy: Gas cost policy for host operations
        use_recursive_evaluator: If True, use recursive evaluator instead of stack (default: False)
    """
    self.config = config or {}
    self.security = security or {}
    self.use_recursive_evaluator = use_recursive_evaluator

    # Set up host dispatcher
    self.host_dispatcher = HostDispatcher()

    # Set up base environment - keep prelude separate
    self.prelude = make_prelude()
    # Working environment extends the prelude (can be modified)
    self.base_environment = self.prelude.extend({})

    # Set up resource limits
    if resource_limits is None and self.config:
        # Build from config
        resource_limits = ResourceLimits(
            max_gas=self.config.get('max_gas'),
            max_memory=self.config.get('max_memory'),
            max_time_ms=self.config.get('max_time_ms'),
            max_stack_depth=self.config.get('max_stack_depth'),
            max_collection_size=self.config.get('max_collection_size'),
            max_string_length=self.config.get('max_string_length')
        )

    # Set up evaluators
    if use_recursive_evaluator:
        # Recursive evaluator as reference implementation
        self.recursive_evaluator = Evaluator(
            self.host_dispatcher, 
            resource_limits=resource_limits,
            host_gas_policy=host_gas_policy
        )
        self.stack_evaluator = None
    else:
        # Stack evaluator is the default for production use
        # Create resource budget if we have limits
        budget = None
        if resource_limits:
            from .resources import ResourceBudget
            budget = ResourceBudget(resource_limits, host_gas_policy)
        self.stack_evaluator = StackEvaluator(
            env=self.base_environment,
            resource_budget=budget,
            host_dispatcher=self.host_dispatcher
        )
        self.recursive_evaluator = None

    # Keep backward compatibility - evaluator points to the active one
    self.evaluator = self.recursive_evaluator if use_recursive_evaluator else self.stack_evaluator

    # Store for reference
    self.resource_limits = resource_limits
    self.host_gas_policy = host_gas_policy

    # Performance tracking
    self._profiling_enabled = False
    self._performance_stats = {}

    # Apply configuration
    self._apply_config()

add_host_handler(command, handler)

Add a host command handler.

Parameters:

Name Type Description Default
command str

Command name (e.g., "file", "time")

required
handler Any

Handler object or function

required
Source code in jsl/runner.py
def add_host_handler(self, command: str, handler: Any) -> None:
    """
    Add a host command handler.

    Args:
        command: Command name (e.g., "file", "time")
        handler: Handler object or function
    """
    # Check security restrictions
    allowed_commands = self.security.get('allowed_host_commands')
    if allowed_commands and command not in allowed_commands:
        raise JSLRuntimeError(f"Host command '{command}' not allowed by security policy")

    self.host_dispatcher.register(command, handler)

disable_profiling()

Disable performance profiling.

Source code in jsl/runner.py
def disable_profiling(self) -> None:
    """Disable performance profiling."""
    self._profiling_enabled = False

enable_profiling()

Enable performance profiling.

Source code in jsl/runner.py
def enable_profiling(self) -> None:
    """Enable performance profiling."""
    self._profiling_enabled = True
    self._performance_stats = {}

execute(expression)

Execute a JSL expression.

Supports multiple input formats: - S-expression Lisp style: "(+ 1 2 3)" - S-expression JSON style: "["+", 1, 2, 3]" - JPN postfix compiled: "[1, 2, 3, 3, "+"]"

Parameters:

Name Type Description Default
expression Union[str, JSLExpression]

JSL expression as string or parsed structure

required

Returns:

Type Description
JSLValue

The result of evaluating the expression

Raises:

Type Description
JSLSyntaxError

If the expression is malformed

JSLRuntimeError

If execution fails

Source code in jsl/runner.py
def execute(self, expression: Union[str, JSLExpression]) -> JSLValue:
    """
    Execute a JSL expression.

    Supports multiple input formats:
    - S-expression Lisp style: "(+ 1 2 3)"
    - S-expression JSON style: "[\"+\", 1, 2, 3]"
    - JPN postfix compiled: "[1, 2, 3, 3, \"+\"]"

    Args:
        expression: JSL expression as string or parsed structure

    Returns:
        The result of evaluating the expression

    Raises:
        JSLSyntaxError: If the expression is malformed
        JSLRuntimeError: If execution fails
    """
    start_time = time.time() if self._profiling_enabled else None

    try:
        # Detect format and parse accordingly
        format_type = self._detect_format(expression)
        parse_start = time.time() if self._profiling_enabled else None

        if format_type == 'lisp':
            # Parse Lisp-style S-expressions
            if isinstance(expression, str):
                expression = from_canonical_sexp(expression)
            else:
                raise JSLSyntaxError("Lisp format detected but expression is not a string")
        elif isinstance(expression, str):
            # Try to parse as JSON
            try:
                expression = json.loads(expression)
            except json.JSONDecodeError:
                # If it's a simple identifier (variable name), keep it as-is
                # This allows execute("x") to work for variable lookup
                if expression.isidentifier() or expression.startswith('@'):
                    expression = expression
                else:
                    # Invalid JSON that's not a simple identifier
                    raise JSLSyntaxError(f"Invalid expression: {expression}")

        # Re-detect format after parsing
        if isinstance(expression, list):
            format_type = self._detect_parsed_format(expression)

        if self._profiling_enabled and parse_start:
            self._performance_stats['parse_time_ms'] = (time.time() - parse_start) * 1000
            self._performance_stats['input_format'] = format_type

        # Execute the expression
        eval_start = time.time() if self._profiling_enabled else None

        # Don't reset resources - they persist across executions
        # If users want fresh resources, they should create a new Runner

        try:
            if format_type == 'jpn':
                # Already in JPN format, use stack evaluator directly
                if self.use_recursive_evaluator:
                    # Need to decompile JPN back to S-expression for recursive evaluator
                    expression = decompile_from_postfix(expression)
                    result = self.recursive_evaluator.eval(expression, self.base_environment)
                else:
                    result = self.stack_evaluator.eval(expression, env=self.base_environment)
            else:
                # S-expression format (json or lisp parsed to json)
                if self.use_recursive_evaluator:
                    # Use recursive evaluator directly
                    result = self.recursive_evaluator.eval(expression, self.base_environment)
                else:
                    # Compile to JPN and use stack evaluator
                    jpn = compile_to_postfix(expression)
                    result = self.stack_evaluator.eval(jpn, env=self.base_environment)

            # Record performance stats
            if self._profiling_enabled:
                if eval_start:
                    self._performance_stats['eval_time_ms'] = (time.time() - eval_start) * 1000
                if start_time:
                    self._performance_stats['total_time_ms'] = (time.time() - start_time) * 1000

                # Resource usage stats (only for recursive evaluator currently)
                if self.use_recursive_evaluator and self.recursive_evaluator.resources:
                    checkpoint = self.recursive_evaluator.resources.checkpoint()
                    self._performance_stats['gas_used'] = checkpoint.get('gas_used', 0)
                    self._performance_stats['memory_used'] = checkpoint.get('memory_used', 0)
                    self._performance_stats['stack_depth_max'] = checkpoint.get('stack_depth', 0)

                # Track call count
                self._performance_stats['call_count'] = self._performance_stats.get('call_count', 0) + 1

            return result

        except ResourceExhausted as e:
            # Record resource exhaustion in stats
            if self._profiling_enabled:
                if self.use_recursive_evaluator and self.recursive_evaluator.resources:
                    checkpoint = self.recursive_evaluator.resources.checkpoint()
                    self._performance_stats['resources_exhausted'] = True
                    self._performance_stats['gas_used'] = checkpoint.get('gas_used', 0)
                    self._performance_stats['memory_used'] = checkpoint.get('memory_used', 0)

            # Re-raise directly - ResourceExhausted is already informative
            # The evaluator will have already set remaining_expr and env if needed
            raise

    except Exception as e:
        if self._profiling_enabled and start_time:
            self._performance_stats['error_time_ms'] = (time.time() - start_time) * 1000
            self._performance_stats['error_count'] = self._performance_stats.get('error_count', 0) + 1

        if isinstance(e, (JSLSyntaxError, JSLRuntimeError, ResourceExhausted)):
            raise
        else:
            raise JSLRuntimeError(f"Execution failed: {e}")

get_performance_stats()

Get performance statistics.

Returns:

Type Description
Dict[str, Any]

Dictionary with performance metrics including:

Dict[str, Any]
  • total_time_ms: Total execution time
Dict[str, Any]
  • parse_time_ms: Time spent parsing JSON
Dict[str, Any]
  • eval_time_ms: Time spent evaluating
Dict[str, Any]
  • call_count: Number of execute() calls
Dict[str, Any]
  • error_count: Number of errors encountered
Dict[str, Any]
  • gas_used: Amount of gas consumed (if resource limits are set)
Dict[str, Any]
  • resources_exhausted: True if resource limits were hit
Source code in jsl/runner.py
def get_performance_stats(self) -> Dict[str, Any]:
    """
    Get performance statistics.

    Returns:
        Dictionary with performance metrics including:
        - total_time_ms: Total execution time
        - parse_time_ms: Time spent parsing JSON
        - eval_time_ms: Time spent evaluating
        - call_count: Number of execute() calls
        - error_count: Number of errors encountered
        - gas_used: Amount of gas consumed (if resource limits are set)
        - resources_exhausted: True if resource limits were hit
    """
    return self._performance_stats.copy()

new_environment()

Create a new isolated environment context.

Yields:

Name Type Description
ExecutionContext

New execution context

Source code in jsl/runner.py
@contextmanager
def new_environment(self):
    """
    Create a new isolated environment context.

    Yields:
        ExecutionContext: New execution context
    """
    # Create new environment extending the current base environment
    # This allows access to variables defined in the parent context
    new_env = self.base_environment.extend({})        
    context = ExecutionContext(new_env)

    # Create temporary runner for this context with same configuration
    temp_runner = JSLRunner(
        self.config, 
        self.security,
        use_recursive_evaluator=self.use_recursive_evaluator
    )
    temp_runner.base_environment = new_env

    # Update the stack evaluator's default env (if it exists)
    # Both evaluators receive env as parameter, but stack evaluator also
    # needs its internal env updated for variable lookups during evaluation
    if temp_runner.stack_evaluator:
        temp_runner.stack_evaluator.env = new_env

    try:
        yield temp_runner
    finally:
        # Cleanup happens automatically when context exits
        pass

reset_performance_stats()

Reset performance statistics.

Source code in jsl/runner.py
def reset_performance_stats(self) -> None:
    """Reset performance statistics."""
    self._performance_stats = {}

ExecutionContext(environment, parent=None)

Context for a single execution session.

Source code in jsl/runner.py
def __init__(self, environment: Env, parent: Optional['ExecutionContext'] = None):
    self.environment = environment
    self.parent = parent
    self.start_time = time.time()
    self.memory_used = 0

define(name, value)

Define a variable in this context.

Source code in jsl/runner.py
def define(self, name: str, value: Any) -> None:
    """Define a variable in this context."""
    self.environment.define(name, value)

get_variable(name)

Get a variable from this context.

Source code in jsl/runner.py
def get_variable(self, name: str) -> Any:
    """Get a variable from this context."""
    try:
        return self.environment.get(name)
    except KeyError:
        raise JSLRuntimeError(f"Undefined variable: {name}")

JSLRuntimeError(message, remaining_expr=None, env=None)

Bases: Exception

Runtime error during JSL execution.

Source code in jsl/runner.py
def __init__(self, message: str, remaining_expr=None, env=None):
    super().__init__(message)
    self.remaining_expr = remaining_expr
    self.env = env

JSLSyntaxError

Bases: Exception

Syntax error in JSL code.

Usage Examples

Basic Program Execution

from jsl.runner import JSLRunner

# Create runner instance
runner = JSLRunner()

# Execute simple expression
result = runner.execute(["+", 1, 2])
print(result)  # Output: 3

# Execute with variables
runner.define("x", 10)
result = runner.execute(["*", "x", 2])
print(result)  # Output: 20

Environment Management

# Create isolated environment
with runner.new_environment() as env:
    env.define("temp_var", 42)
    result = env.execute(["*", "temp_var", 2])
    print(result)  # Output: 84
# temp_var is no longer accessible

Closure Execution

# Define function
runner.execute(["def", "square", ["lambda", ["x"], ["*", "x", "x"]]])

# Call function
result = runner.execute(["square", 5])
print(result)  # Output: 25

# Access function object
square_fn = runner.get_variable("square")
print(square_fn.params)  # Output: ["x"]
print(square_fn.body)    # Output: ["*", "x", "x"]

Host Interaction

from jsl.runner import JSLRunner
from jsl.jhip import FileHandler

# Configure with host handlers
runner = JSLRunner()
runner.add_host_handler("file", FileHandler())

# Execute host interaction
result = runner.execute(["host", "file/read", "/tmp/data.txt"])

Error Handling

try:
    result = runner.execute(["undefined_function", 1, 2])
except JSLRuntimeError as e:
    print(f"Runtime error: {e}")
except JSLSyntaxError as e:
    print(f"Syntax error: {e}")

Configuration Options

Runner Configuration

config = {
    "max_recursion_depth": 1000,
    "max_steps": 10000,  # Limit evaluation steps (None for unlimited)
    "enable_debugging": True,
    "timeout_seconds": 30,
    "memory_limit_mb": 512
}

runner = JSLRunner(config=config)

Security Settings

# Restrict to specific host commands
security_config = {
    "allowed_host_commands": ["file/read", "time/now"]
}
runner = JSLRunner(security=security_config)

# Sandbox mode - blocks all host commands unless explicitly allowed
sandbox_config = {
    "sandbox_mode": True,
    "allowed_host_commands": ["safe_operation"]  # Only this is allowed
}
sandbox_runner = JSLRunner(security=sandbox_config)

# Complete sandbox - no host operations
strict_sandbox = JSLRunner(security={"sandbox_mode": True})

Step Limiting and Resumption

JSL supports limiting the number of evaluation steps to prevent DOS attacks and enable fair resource allocation in distributed environments:

# Create runner with step limit
runner = JSLRunner(config={"max_steps": 1000})

try:
    result = runner.execute(complex_expression)
except JSLRuntimeError as e:
    if "Step limit exceeded" in str(e):
        # Can resume with additional steps
        if hasattr(e, 'remaining_expr'):
            result = runner.resume(
                e.remaining_expr, 
                e.env, 
                additional_steps=500
            )

This enables:

  • DOS Prevention: Limits computation to prevent infinite loops
  • Fair Resource Allocation: In multi-tenant environments
  • Pauseable Computation: Serialize and resume long-running tasks
  • Step Accounting: Track resource usage per user/request

Performance Monitoring

# Enable performance tracking
runner.enable_profiling()

# Execute expressions
runner.execute('["*", 10, 20]')  # Parse from JSON
runner.execute(["+", 1, 2, 3])   # Direct expression

# Get performance metrics
stats = runner.get_performance_stats()
print(f"Total time: {stats['total_time_ms']}ms")
print(f"Parse time: {stats.get('parse_time_ms', 0)}ms")
print(f"Eval time: {stats['eval_time_ms']}ms")
print(f"Call count: {stats['call_count']}")
print(f"Errors: {stats.get('error_count', 0)}")

# Reset stats
runner.reset_performance_stats()

# Disable profiling
runner.disable_profiling()