瀏覽代碼

First work on the analysis!

Nicholas Schense 2 天之前
父節點
當前提交
8544f31c07

+ 247 - 0
.venv/bin/Activate.ps1

@@ -0,0 +1,247 @@
+<#
+.Synopsis
+Activate a Python virtual environment for the current PowerShell session.
+
+.Description
+Pushes the python executable for a virtual environment to the front of the
+$Env:PATH environment variable and sets the prompt to signify that you are
+in a Python virtual environment. Makes use of the command line switches as
+well as the `pyvenv.cfg` file values present in the virtual environment.
+
+.Parameter VenvDir
+Path to the directory that contains the virtual environment to activate. The
+default value for this is the parent of the directory that the Activate.ps1
+script is located within.
+
+.Parameter Prompt
+The prompt prefix to display when this virtual environment is activated. By
+default, this prompt is the name of the virtual environment folder (VenvDir)
+surrounded by parentheses and followed by a single space (ie. '(.venv) ').
+
+.Example
+Activate.ps1
+Activates the Python virtual environment that contains the Activate.ps1 script.
+
+.Example
+Activate.ps1 -Verbose
+Activates the Python virtual environment that contains the Activate.ps1 script,
+and shows extra information about the activation as it executes.
+
+.Example
+Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
+Activates the Python virtual environment located in the specified location.
+
+.Example
+Activate.ps1 -Prompt "MyPython"
+Activates the Python virtual environment that contains the Activate.ps1 script,
+and prefixes the current prompt with the specified string (surrounded in
+parentheses) while the virtual environment is active.
+
+.Notes
+On Windows, it may be required to enable this Activate.ps1 script by setting the
+execution policy for the user. You can do this by issuing the following PowerShell
+command:
+
+PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
+
+For more information on Execution Policies: 
+https://go.microsoft.com/fwlink/?LinkID=135170
+
+#>
+Param(
+    [Parameter(Mandatory = $false)]
+    [String]
+    $VenvDir,
+    [Parameter(Mandatory = $false)]
+    [String]
+    $Prompt
+)
+
+<# Function declarations --------------------------------------------------- #>
+
+<#
+.Synopsis
+Remove all shell session elements added by the Activate script, including the
+addition of the virtual environment's Python executable from the beginning of
+the PATH variable.
+
+.Parameter NonDestructive
+If present, do not remove this function from the global namespace for the
+session.
+
+#>
+function global:deactivate ([switch]$NonDestructive) {
+    # Revert to original values
+
+    # The prior prompt:
+    if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
+        Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
+        Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
+    }
+
+    # The prior PYTHONHOME:
+    if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
+        Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
+        Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
+    }
+
+    # The prior PATH:
+    if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
+        Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
+        Remove-Item -Path Env:_OLD_VIRTUAL_PATH
+    }
+
+    # Just remove the VIRTUAL_ENV altogether:
+    if (Test-Path -Path Env:VIRTUAL_ENV) {
+        Remove-Item -Path env:VIRTUAL_ENV
+    }
+
+    # Just remove VIRTUAL_ENV_PROMPT altogether.
+    if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
+        Remove-Item -Path env:VIRTUAL_ENV_PROMPT
+    }
+
+    # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
+    if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
+        Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
+    }
+
+    # Leave deactivate function in the global namespace if requested:
+    if (-not $NonDestructive) {
+        Remove-Item -Path function:deactivate
+    }
+}
+
+<#
+.Description
+Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
+given folder, and returns them in a map.
+
+For each line in the pyvenv.cfg file, if that line can be parsed into exactly
+two strings separated by `=` (with any amount of whitespace surrounding the =)
+then it is considered a `key = value` line. The left hand string is the key,
+the right hand is the value.
+
+If the value starts with a `'` or a `"` then the first and last character is
+stripped from the value before being captured.
+
+.Parameter ConfigDir
+Path to the directory that contains the `pyvenv.cfg` file.
+#>
+function Get-PyVenvConfig(
+    [String]
+    $ConfigDir
+) {
+    Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
+
+    # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
+    $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
+
+    # An empty map will be returned if no config file is found.
+    $pyvenvConfig = @{ }
+
+    if ($pyvenvConfigPath) {
+
+        Write-Verbose "File exists, parse `key = value` lines"
+        $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
+
+        $pyvenvConfigContent | ForEach-Object {
+            $keyval = $PSItem -split "\s*=\s*", 2
+            if ($keyval[0] -and $keyval[1]) {
+                $val = $keyval[1]
+
+                # Remove extraneous quotations around a string value.
+                if ("'""".Contains($val.Substring(0, 1))) {
+                    $val = $val.Substring(1, $val.Length - 2)
+                }
+
+                $pyvenvConfig[$keyval[0]] = $val
+                Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
+            }
+        }
+    }
+    return $pyvenvConfig
+}
+
+
+<# Begin Activate script --------------------------------------------------- #>
+
+# Determine the containing directory of this script
+$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
+$VenvExecDir = Get-Item -Path $VenvExecPath
+
+Write-Verbose "Activation script is located in path: '$VenvExecPath'"
+Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
+Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
+
+# Set values required in priority: CmdLine, ConfigFile, Default
+# First, get the location of the virtual environment, it might not be
+# VenvExecDir if specified on the command line.
+if ($VenvDir) {
+    Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
+}
+else {
+    Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
+    $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
+    Write-Verbose "VenvDir=$VenvDir"
+}
+
+# Next, read the `pyvenv.cfg` file to determine any required value such
+# as `prompt`.
+$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
+
+# Next, set the prompt from the command line, or the config file, or
+# just use the name of the virtual environment folder.
+if ($Prompt) {
+    Write-Verbose "Prompt specified as argument, using '$Prompt'"
+}
+else {
+    Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
+    if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
+        Write-Verbose "  Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
+        $Prompt = $pyvenvCfg['prompt'];
+    }
+    else {
+        Write-Verbose "  Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
+        Write-Verbose "  Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
+        $Prompt = Split-Path -Path $venvDir -Leaf
+    }
+}
+
+Write-Verbose "Prompt = '$Prompt'"
+Write-Verbose "VenvDir='$VenvDir'"
+
+# Deactivate any currently active virtual environment, but leave the
+# deactivate function in place.
+deactivate -nondestructive
+
+# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
+# that there is an activated venv.
+$env:VIRTUAL_ENV = $VenvDir
+
+if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
+
+    Write-Verbose "Setting prompt to '$Prompt'"
+
+    # Set the prompt to include the env name
+    # Make sure _OLD_VIRTUAL_PROMPT is global
+    function global:_OLD_VIRTUAL_PROMPT { "" }
+    Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
+    New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
+
+    function global:prompt {
+        Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
+        _OLD_VIRTUAL_PROMPT
+    }
+    $env:VIRTUAL_ENV_PROMPT = $Prompt
+}
+
+# Clear PYTHONHOME
+if (Test-Path -Path Env:PYTHONHOME) {
+    Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
+    Remove-Item -Path Env:PYTHONHOME
+}
+
+# Add the venv to the PATH
+Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
+$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

+ 70 - 0
.venv/bin/activate

@@ -0,0 +1,70 @@
+# This file must be used with "source bin/activate" *from bash*
+# You cannot run it directly
+
+deactivate () {
+    # reset old environment variables
+    if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
+        PATH="${_OLD_VIRTUAL_PATH:-}"
+        export PATH
+        unset _OLD_VIRTUAL_PATH
+    fi
+    if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
+        PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
+        export PYTHONHOME
+        unset _OLD_VIRTUAL_PYTHONHOME
+    fi
+
+    # Call hash to forget past commands. Without forgetting
+    # past commands the $PATH changes we made may not be respected
+    hash -r 2> /dev/null
+
+    if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
+        PS1="${_OLD_VIRTUAL_PS1:-}"
+        export PS1
+        unset _OLD_VIRTUAL_PS1
+    fi
+
+    unset VIRTUAL_ENV
+    unset VIRTUAL_ENV_PROMPT
+    if [ ! "${1:-}" = "nondestructive" ] ; then
+    # Self destruct!
+        unset -f deactivate
+    fi
+}
+
+# unset irrelevant variables
+deactivate nondestructive
+
+# on Windows, a path can contain colons and backslashes and has to be converted:
+if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
+    # transform D:\path\to\venv to /d/path/to/venv on MSYS
+    # and to /cygdrive/d/path/to/venv on Cygwin
+    export VIRTUAL_ENV=$(cygpath /home/nschense/Medphys_Research/senior_research_thesis/.venv)
+else
+    # use the path as-is
+    export VIRTUAL_ENV=/home/nschense/Medphys_Research/senior_research_thesis/.venv
+fi
+
+_OLD_VIRTUAL_PATH="$PATH"
+PATH="$VIRTUAL_ENV/"bin":$PATH"
+export PATH
+
+# unset PYTHONHOME if set
+# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
+# could use `if (set -u; : $PYTHONHOME) ;` in bash
+if [ -n "${PYTHONHOME:-}" ] ; then
+    _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
+    unset PYTHONHOME
+fi
+
+if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
+    _OLD_VIRTUAL_PS1="${PS1:-}"
+    PS1='(.venv) '"${PS1:-}"
+    export PS1
+    VIRTUAL_ENV_PROMPT='(.venv) '
+    export VIRTUAL_ENV_PROMPT
+fi
+
+# Call hash to forget past commands. Without forgetting
+# past commands the $PATH changes we made may not be respected
+hash -r 2> /dev/null

+ 27 - 0
.venv/bin/activate.csh

@@ -0,0 +1,27 @@
+# This file must be used with "source bin/activate.csh" *from csh*.
+# You cannot run it directly.
+
+# Created by Davide Di Blasi <davidedb@gmail.com>.
+# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
+
+alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+setenv VIRTUAL_ENV /home/nschense/Medphys_Research/senior_research_thesis/.venv
+
+set _OLD_VIRTUAL_PATH="$PATH"
+setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
+
+
+set _OLD_VIRTUAL_PROMPT="$prompt"
+
+if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
+    set prompt = '(.venv) '"$prompt"
+    setenv VIRTUAL_ENV_PROMPT '(.venv) '
+endif
+
+alias pydoc python -m pydoc
+
+rehash

+ 69 - 0
.venv/bin/activate.fish

@@ -0,0 +1,69 @@
+# This file must be used with "source <venv>/bin/activate.fish" *from fish*
+# (https://fishshell.com/). You cannot run it directly.
+
+function deactivate  -d "Exit virtual environment and return to normal shell environment"
+    # reset old environment variables
+    if test -n "$_OLD_VIRTUAL_PATH"
+        set -gx PATH $_OLD_VIRTUAL_PATH
+        set -e _OLD_VIRTUAL_PATH
+    end
+    if test -n "$_OLD_VIRTUAL_PYTHONHOME"
+        set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
+        set -e _OLD_VIRTUAL_PYTHONHOME
+    end
+
+    if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
+        set -e _OLD_FISH_PROMPT_OVERRIDE
+        # prevents error when using nested fish instances (Issue #93858)
+        if functions -q _old_fish_prompt
+            functions -e fish_prompt
+            functions -c _old_fish_prompt fish_prompt
+            functions -e _old_fish_prompt
+        end
+    end
+
+    set -e VIRTUAL_ENV
+    set -e VIRTUAL_ENV_PROMPT
+    if test "$argv[1]" != "nondestructive"
+        # Self-destruct!
+        functions -e deactivate
+    end
+end
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+set -gx VIRTUAL_ENV /home/nschense/Medphys_Research/senior_research_thesis/.venv
+
+set -gx _OLD_VIRTUAL_PATH $PATH
+set -gx PATH "$VIRTUAL_ENV/"bin $PATH
+
+# Unset PYTHONHOME if set.
+if set -q PYTHONHOME
+    set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
+    set -e PYTHONHOME
+end
+
+if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
+    # fish uses a function instead of an env var to generate the prompt.
+
+    # Save the current fish_prompt function as the function _old_fish_prompt.
+    functions -c fish_prompt _old_fish_prompt
+
+    # With the original prompt function renamed, we can override with our own.
+    function fish_prompt
+        # Save the return status of the last command.
+        set -l old_status $status
+
+        # Output the venv prompt; color taken from the blue of the Python logo.
+        printf "%s%s%s" (set_color 4B8BBE) '(.venv) ' (set_color normal)
+
+        # Restore the return status of the previous command.
+        echo "exit $old_status" | .
+        # Output the original/"old" prompt.
+        _old_fish_prompt
+    end
+
+    set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
+    set -gx VIRTUAL_ENV_PROMPT '(.venv) '
+end

+ 8 - 0
.venv/bin/f2py

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from numpy.f2py.f2py2e import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 8 - 0
.venv/bin/fonttools

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from fontTools.__main__ import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 8 - 0
.venv/bin/nc3tonc4

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from netCDF4.utils import nc3tonc4
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(nc3tonc4())

+ 8 - 0
.venv/bin/nc4tonc3

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from netCDF4.utils import nc4tonc3
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(nc4tonc3())

+ 8 - 0
.venv/bin/ncinfo

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from netCDF4.utils import ncinfo
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(ncinfo())

+ 8 - 0
.venv/bin/numpy-config

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from numpy._configtool import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 8 - 0
.venv/bin/pip

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from pip._internal.cli.main import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 8 - 0
.venv/bin/pip3

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from pip._internal.cli.main import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 8 - 0
.venv/bin/pip3.12

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from pip._internal.cli.main import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 8 - 0
.venv/bin/pyftmerge

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from fontTools.merge import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 8 - 0
.venv/bin/pyftsubset

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from fontTools.subset import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 1 - 0
.venv/bin/python

@@ -0,0 +1 @@
+/usr/bin/python

+ 1 - 0
.venv/bin/python3

@@ -0,0 +1 @@
+python

+ 1 - 0
.venv/bin/python3.12

@@ -0,0 +1 @@
+python

+ 8 - 0
.venv/bin/ttx

@@ -0,0 +1,8 @@
+#!/home/nschense/Medphys_Research/senior_research_thesis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from fontTools.ttx import main
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())

+ 1 - 0
.venv/lib64

@@ -0,0 +1 @@
+lib

+ 5 - 0
.venv/pyvenv.cfg

@@ -0,0 +1,5 @@
+home = /usr/bin
+include-system-site-packages = false
+version = 3.12.3
+executable = /usr/bin/python3.12
+command = /usr/bin/python -m venv /home/nschense/Medphys_Research/senior_research_thesis/.venv

+ 225 - 0
.venv/share/man/man1/ttx.1

@@ -0,0 +1,225 @@
+.Dd May 18, 2004
+.\" ttx is not specific to any OS, but contrary to what groff_mdoc(7)
+.\" seems to imply, entirely omitting the .Os macro causes 'BSD' to
+.\" be used, so I give a zero-width space as its argument.
+.Os \&
+.\" The "FontTools Manual" argument apparently has no effect in
+.\" groff 1.18.1. I think it is a bug in the -mdoc groff package.
+.Dt TTX 1 "FontTools Manual"
+.Sh NAME
+.Nm ttx
+.Nd tool for manipulating TrueType and OpenType fonts
+.Sh SYNOPSIS
+.Nm
+.Bk
+.Op Ar option ...
+.Ek
+.Bk
+.Ar file ...
+.Ek
+.Sh DESCRIPTION
+.Nm
+is a tool for manipulating TrueType and OpenType fonts.  It can convert
+TrueType and OpenType fonts to and from an
+.Tn XML Ns -based format called
+.Tn TTX .
+.Tn TTX
+files have a
+.Ql .ttx
+extension.
+.Pp
+For each
+.Ar file
+argument it is given,
+.Nm
+detects whether it is a
+.Ql .ttf ,
+.Ql .otf
+or
+.Ql .ttx
+file and acts accordingly: if it is a
+.Ql .ttf
+or
+.Ql .otf
+file, it generates a
+.Ql .ttx
+file; if it is a
+.Ql .ttx
+file, it generates a
+.Ql .ttf
+or
+.Ql .otf
+file.
+.Pp
+By default, every output file is created in the same directory as the
+corresponding input file and with the same name except for the
+extension, which is substituted appropriately.
+.Nm
+never overwrites existing files; if necessary, it appends a suffix to
+the output file name before the extension, as in
+.Pa Arial#1.ttf .
+.Ss "General options"
+.Bl -tag -width ".Fl t Ar table"
+.It Fl h
+Display usage information.
+.It Fl d Ar dir
+Write the output files to directory
+.Ar dir
+instead of writing every output file to the same directory as the
+corresponding input file.
+.It Fl o Ar file
+Write the output to
+.Ar file
+instead of writing it to the same directory as the
+corresponding input file.
+.It Fl v
+Be verbose.  Write more messages to the standard output describing what
+is being done.
+.It Fl a
+Allow virtual glyphs ID's on compile or decompile.
+.El
+.Ss "Dump options"
+The following options control the process of dumping font files
+(TrueType or OpenType) to
+.Tn TTX
+files.
+.Bl -tag -width ".Fl t Ar table"
+.It Fl l
+List table information.  Instead of dumping the font to a
+.Tn TTX
+file, display minimal information about each table.
+.It Fl t Ar table
+Dump table
+.Ar table .
+This option may be given multiple times to dump several tables at
+once.  When not specified, all tables are dumped.
+.It Fl x Ar table
+Exclude table
+.Ar table
+from the list of tables to dump.  This option may be given multiple
+times to exclude several tables from the dump.  The
+.Fl t
+and
+.Fl x
+options are mutually exclusive.
+.It Fl s
+Split tables.  Dump each table to a separate
+.Tn TTX
+file and write (under the name that would have been used for the output
+file if the
+.Fl s
+option had not been given) one small
+.Tn TTX
+file containing references to the individual table dump files.  This
+file can be used as input to
+.Nm
+as long as the referenced files can be found in the same directory.
+.It Fl i
+.\" XXX: I suppose OpenType programs (exist and) are also affected.
+Don't disassemble TrueType instructions.  When this option is specified,
+all TrueType programs (glyph programs, the font program and the
+pre-program) are written to the
+.Tn TTX
+file as hexadecimal data instead of
+assembly.  This saves some time and results in smaller
+.Tn TTX
+files.
+.It Fl y Ar n
+When decompiling a TrueType Collection (TTC) file,
+decompile font number
+.Ar n ,
+starting from 0.
+.El
+.Ss "Compilation options"
+The following options control the process of compiling
+.Tn TTX
+files into font files (TrueType or OpenType):
+.Bl -tag -width ".Fl t Ar table"
+.It Fl m Ar fontfile
+Merge the input
+.Tn TTX
+file
+.Ar file
+with
+.Ar fontfile .
+No more than one
+.Ar file
+argument can be specified when this option is used.
+.It Fl b
+Don't recalculate glyph bounding boxes.  Use the values in the
+.Tn TTX
+file as is.
+.El
+.Sh "THE TTX FILE FORMAT"
+You can find some information about the
+.Tn TTX
+file format in
+.Pa documentation.html .
+In particular, you will find in that file the list of tables understood by
+.Nm
+and the relations between TrueType GlyphIDs and the glyph names used in
+.Tn TTX
+files.
+.Sh EXAMPLES
+In the following examples, all files are read from and written to the
+current directory.  Additionally, the name given for the output file
+assumes in every case that it did not exist before
+.Nm
+was invoked.
+.Pp
+Dump the TrueType font contained in
+.Pa FreeSans.ttf
+to
+.Pa FreeSans.ttx :
+.Pp
+.Dl ttx FreeSans.ttf
+.Pp
+Compile
+.Pa MyFont.ttx
+into a TrueType or OpenType font file:
+.Pp
+.Dl ttx MyFont.ttx
+.Pp
+List the tables in
+.Pa FreeSans.ttf
+along with some information:
+.Pp
+.Dl ttx -l FreeSans.ttf
+.Pp
+Dump the
+.Sq cmap
+table from
+.Pa FreeSans.ttf
+to
+.Pa FreeSans.ttx :
+.Pp
+.Dl ttx -t cmap FreeSans.ttf
+.Sh NOTES
+On MS\-Windows and MacOS,
+.Nm
+is available as a graphical application to which files can be dropped.
+.Sh SEE ALSO
+.Pa documentation.html
+.Pp
+.Xr fontforge 1 ,
+.Xr ftinfo 1 ,
+.Xr gfontview 1 ,
+.Xr xmbdfed 1 ,
+.Xr Font::TTF 3pm
+.Sh AUTHORS
+.Nm
+was written by
+.An -nosplit
+.An "Just van Rossum" Aq just@letterror.com .
+.Pp
+This manual page was written by
+.An "Florent Rougon" Aq f.rougon@free.fr
+for the Debian GNU/Linux system based on the existing FontTools
+documentation.  It may be freely used, modified and distributed without
+restrictions.
+.\" For Emacs:
+.\" Local Variables:
+.\" fill-column: 72
+.\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\|   \\|  \\)[   \n]*"
+.\" sentence-end-double-space: t
+.\" End:

+ 70 - 0
analysis/confidence_percentile.py

@@ -0,0 +1,70 @@
+import xarray as xr
+import numpy as np
+import matplotlib.pyplot as plt
+from utils.config import config
+import pathlib as pl
+import os
+
+
+# Load the evaluation results
+os.chdir(pl.Path(__file__).parent)
+model_dataset_path = pl.Path("../model_evaluations") / pl.Path(
+    config["analysis"]["evaluation_name"].strip()
+).with_suffix(".nc")
+
+print(f"Loading evaluation results from {model_dataset_path}")
+
+array = xr.open_dataset(model_dataset_path)  # type: ignore
+
+
+predictions: xr.DataArray = array["predictions"]
+labels: xr.DataArray = array["labels"]
+
+# Make plots directory if it doesn't exist
+plots_dir = (
+    pl.Path("../output") / pl.Path(config["analysis"]["evaluation_name"]) / "plots"
+)
+plots_dir.mkdir(parents=True, exist_ok=True)
+
+# This script calculates and plots accuracy vs minimum confidence percentile threshold
+
+# Average predictions across models
+avg_predictions = predictions.mean(dim="model")
+# Get confidence scores for the positive class
+confidence_scores = avg_predictions.sel(img_class=1).values
+true_labels = labels.sel(label=1).values
+
+
+# Calculate accuracy at different confidence percentiles
+percentiles = np.linspace(0, 100, num=21)
+accuracies: list[float] = []
+for p in percentiles:
+    absolute_confidences = 2 * np.abs(confidence_scores - 0.5)
+    threshold = np.percentile(absolute_confidences, p)
+
+    # Filter the predictions such that only those with absolute confidence above the threshold are considered
+    selected_indices = np.where(absolute_confidences >= threshold)[0]
+    if len(selected_indices) == 0:
+        accuracies.append(0.0)
+        continue
+    selected_confidences = confidence_scores[selected_indices]
+    selected_true_labels = true_labels[selected_indices]
+
+    predicted_positive = selected_confidences >= 0.5
+    true_positive = selected_true_labels == 1
+
+    correct_predictions = (predicted_positive == true_positive).sum().item()
+    total_predictions = len(selected_confidences)
+    accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0.0
+    accuracies.append(accuracy)
+
+# Plot accuracy vs confidence percentile threshold
+plt.figure(figsize=(10, 6))
+plt.plot(percentiles, accuracies, marker="o")
+plt.title("Accuracy vs Confidence Percentile Threshold")
+plt.xlabel("Confidence Percentile Threshold")
+plt.ylabel("Accuracy")
+plt.grid()
+plt.xticks(percentiles)
+
+plt.savefig(plots_dir / "accuracy_vs_confidence_percentile_threshold.png")

+ 48 - 0
analysis/generate_statistics.py

@@ -0,0 +1,48 @@
+import xarray as xr
+from utils.config import config
+import pathlib as pl
+import numpy as np
+import os
+
+# Load the evaluation results
+os.chdir(pl.Path(__file__).parent)
+model_dataset_path = pl.Path("../model_evaluations") / pl.Path(
+    config["analysis"]["evaluation_name"].strip()
+).with_suffix(".nc")
+
+print(f"Loading evaluation results from {model_dataset_path}")
+
+array = xr.open_dataset(model_dataset_path)  # type: ignore
+
+
+# This dataset includes two dataarrays: 'predictions' and 'labels'
+
+# For the first analysis, the goal is to average the predictions across all models for each image, then to determine the accuracy of these averaged predictions against the true labels, graphing accurac vs confidence threshold.
+
+predictions: xr.DataArray = array["predictions"]
+labels: xr.DataArray = array["labels"]
+
+# Average predictions across models
+avg_predictions = predictions.mean(dim="model")
+
+# Loop through different confidence thresholds and calculate accuracy
+thresholds = np.linspace(0.5, 1.0, num=10)  # From 0.5 to 1.0
+accuracies: list[float] = []
+
+for i, threshold in enumerate(thresholds):
+    # pick the positive class for the lables and predictions
+    predicted_positive = avg_predictions.sel(img_class=1) >= threshold
+    true_positive = labels.sel(label=1) == 1
+
+    # Calculate accuracy
+    correct_predictions = (predicted_positive == true_positive).sum().item()
+
+    # For debugging, print list of predictions, labels and correctness
+
+    total_predictions = len(avg_predictions.img_id)
+    accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0.0
+    accuracies.append(accuracy)
+
+# Print the accuracies for each threshold
+for threshold, accuracy in zip(thresholds, accuracies):
+    print(f"Threshold: {threshold:.2f}, Accuracy: {accuracy:.4f}")

+ 129 - 0
analysis/sanity_check.py

@@ -0,0 +1,129 @@
+import xarray as xr
+import numpy as np
+from utils.config import config
+import pathlib as pl
+import colorama as clr
+import os
+
+
+os.chdir(pl.Path(__file__).parent)
+model_dataset_path = pl.Path("../model_evaluations") / pl.Path(
+    config["analysis"]["evaluation_name"].strip()
+).with_suffix(".nc")
+array = xr.open_dataset(model_dataset_path)  # type: ignore
+
+predictions: xr.DataArray = array["predictions"]
+labels: xr.DataArray = array["labels"]
+
+# Average predictions across models
+avg_predictions = predictions.mean(dim="model")
+
+# Sort from highest to lowest confidence for the positive class (img_class=1)
+sorted_indices = np.argsort(-avg_predictions.sel(img_class=1).values)
+sorted_avg_predictions = avg_predictions.isel(img_id=sorted_indices)
+sorted_labels = labels.isel(img_id=sorted_indices)
+
+# Print out all predictions with their labels
+top_n = sorted_avg_predictions.sizes[
+    "img_id"
+]  # Change this value to print more or fewer
+print(
+    clr.Fore.CYAN
+    + f"Top {top_n} Predictions (Confidence for Positive Class):"
+    + clr.Style.RESET_ALL
+)
+for i in range(top_n):
+    confidence = sorted_avg_predictions.sel(img_class=1).isel(img_id=i).item()
+    label = sorted_labels.isel(img_id=i, label=1).values
+
+    correctness = (
+        "CORRECT"
+        if (confidence >= 0.5 and label == 1) or (confidence < 0.5 and label == 0)
+        else "INCORRECT"
+    )
+    color = clr.Fore.GREEN if correctness == "CORRECT" else clr.Fore.RED
+    print(
+        f"Image ID: {sorted_avg_predictions.img_id.isel(img_id=i).item():<8}, "
+        f"Confidence: {confidence:.4f}, "
+        f"Label: {label:<3}, " + color + f"{correctness:<9}" + clr.Style.RESET_ALL
+    )
+
+
+# Calculate overall accuracy
+predicted_positive = avg_predictions.sel(img_class=1) >= 0.5
+true_positive = labels.sel(label=1) == 1
+correct_predictions = (predicted_positive == true_positive).sum().item()
+total_predictions = len(avg_predictions.img_id)
+overall_accuracy = (
+    correct_predictions / total_predictions if total_predictions > 0 else 0.0
+)
+print(
+    clr.Fore.MAGENTA
+    + f"\nOverall Accuracy (Threshold 0.5): {overall_accuracy:.4f}"
+    + clr.Style.RESET_ALL
+)
+
+
+# Then go through all individual models and print out their accuracies for comparison, sorted from highest to lowest
+model_accuracies = []
+for model_idx in predictions.coords["model"].values:
+    model_preds = predictions.sel(model=model_idx)
+    predicted_positive = model_preds.sel(img_class=1) >= 0.5
+    correct_predictions = (predicted_positive == true_positive).sum().item()
+    accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0.0
+    model_accuracies.append((model_idx, accuracy))
+
+# Sort by accuracy
+model_accuracies.sort(key=lambda x: x[1], reverse=True)
+print(
+    clr.Fore.CYAN
+    + f"\nIndividual Model Accuracies (Threshold 0.5):"
+    + clr.Style.RESET_ALL
+)
+for model_idx, accuracy in model_accuracies:
+    print(f"Model {int(model_idx):<3}: Accuracy: {accuracy:.4f}")
+
+
+# Then calculate the average accuracy if we were to ensemble the top K models, for K=1 to total number of models
+total_models = len(predictions.coords["model"].values)
+ensemble_accuracies = []
+for k in range(1, total_models + 1):
+    top_k_models = [ma[0] for ma in model_accuracies[:k]]
+    ensemble_preds = predictions.sel(model=top_k_models).mean(dim="model")
+    predicted_positive = ensemble_preds.sel(img_class=1) >= 0.5
+    correct_predictions = (predicted_positive == true_positive).sum().item()
+    accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0.0
+    ensemble_accuracies.append((k, accuracy))
+print(
+    clr.Fore.CYAN
+    + f"\nEnsemble Accuracies for Top K Models (Threshold 0.5):"
+    + clr.Style.RESET_ALL
+)
+for k, accuracy in ensemble_accuracies:
+    print(f"Top {k:<3} Models: Ensemble Accuracy: {accuracy:.4f}")
+
+
+# Finally, identify the top 5 most confidently incorrect predictions
+incorrect_predictions = []
+for i in range(len(avg_predictions.img_id)):
+    confidence = avg_predictions.sel(img_class=1).isel(img_id=i).item()
+    label = labels.isel(img_id=i, label=1).values
+    predicted_label = 1 if confidence >= 0.5 else 0
+    if predicted_label != label:
+        incorrect_predictions.append((i, confidence, label))
+# Sort by confidence
+incorrect_predictions.sort(key=lambda x: -abs(x[1] - 0.5))
+top_incorrect = incorrect_predictions[:5]
+print(
+    clr.Fore.YELLOW
+    + f"\nTop 5 Most Confident Incorrect Predictions:"
+    + clr.Style.RESET_ALL
+)
+for i, confidence, label in top_incorrect:
+    predicted_label = 1 if confidence >= 0.5 else 0
+    print(
+        f"Image ID: {avg_predictions.img_id.isel(img_id=i).item():<8}, "
+        f"Confidence: {confidence:.4f}, "
+        f"Predicted Label: {predicted_label:<3}, "
+        f"True Label: {label:<3}"
+    )

+ 94 - 0
analysis/sensitivity_analysis.py

@@ -0,0 +1,94 @@
+# The purpose of this file is to perform a sensitivity analysis on the model evaluation results and graph the findings.
+# The sensitivity analysis will be done by varying the number of models used in the ensemble and observing the effect on overall accuracy.
+# We will take 50 different random arrangemnts of models for each ensemble size (other than 50, which is the full set) to get a distribution of accuracies for each ensemble size.
+# The will have associated error bars based on the standard deviation of the accuracies for each ensemble size.
+import xarray as xr
+from utils.config import config
+import pathlib as pl
+import numpy as np
+import matplotlib.pyplot as plt
+import os
+
+# Load the evaluation results
+os.chdir(pl.Path(__file__).parent)
+model_dataset_path = pl.Path("../model_evaluations") / pl.Path(
+    config["analysis"]["evaluation_name"].strip()
+).with_suffix(".nc")
+
+print(f"Loading evaluation results from {model_dataset_path}")
+array = xr.open_dataset(model_dataset_path)  # type: ignore
+
+
+# This section was generated by Github Copilot - 2025-11-04
+# Perform sensitivity analysis by varying ensemble size and sampling subsets of models.
+
+predictions: xr.DataArray = array["predictions"]
+labels: xr.DataArray = array["labels"]
+
+# Make plots directory if it doesn't exist (matching other scripts)
+plots_dir = (
+    pl.Path("../output") / pl.Path(config["analysis"]["evaluation_name"]) / "plots"
+)
+plots_dir.mkdir(parents=True, exist_ok=True)
+
+# Configuration for the sensitivity analysis
+rng = np.random.default_rng(42)
+num_models = int(predictions.sizes["model"])
+ensemble_sizes = list(range(1, num_models + 1))
+samples_per_size = 50
+
+# Extract true labels for the positive class (assumes same structure as other script)
+true_labels = labels.sel(label=1).values  # shape: (n_samples,)
+
+# Container for results
+mean_accuracies: list[float] = []
+std_accuracies: list[float] = []
+all_accuracies: dict[int, list[float]] = {k: [] for k in ensemble_sizes}
+
+for k in ensemble_sizes:
+    accuracies_k = []
+    # If using the full set, evaluate once deterministically
+    if k == num_models:
+        selected_idx = np.arange(num_models)
+        preds_selected = predictions.isel(model=selected_idx).mean(dim="model")
+        confs = preds_selected.sel(img_class=1).values
+        predicted_positive = confs >= 0.5
+        true_positive = true_labels == 1
+        acc = (predicted_positive == true_positive).sum().item() / len(confs)
+        accuracies_k.append(acc)
+    else:
+        for _ in range(samples_per_size):
+            selected_idx = rng.choice(num_models, size=k, replace=False)
+            preds_selected = predictions.isel(model=selected_idx).mean(dim="model")
+            confs = preds_selected.sel(img_class=1).values
+            predicted_positive = confs >= 0.5
+            true_positive = true_labels == 1
+            acc = (predicted_positive == true_positive).sum().item() / len(confs)
+            accuracies_k.append(acc)
+    all_accuracies[k] = accuracies_k
+    mean_accuracies.append(float(np.mean(accuracies_k)))
+    std_accuracies.append(float(np.std(accuracies_k, ddof=0)))
+
+# Plot mean accuracy vs ensemble size with error bars (std)
+plt.figure(figsize=(10, 6))
+plt.errorbar(ensemble_sizes, mean_accuracies, yerr=std_accuracies, fmt="-o", capsize=3)
+plt.title("Sensitivity Analysis: Accuracy vs Ensemble Size")
+plt.xlabel("Number of Models in Ensemble")
+plt.ylabel("Accuracy")
+plt.grid(True)
+# Set x-ticks every 5 models (and always include the final model count)
+ticks = list(range(1, num_models + 1, 5))
+if len(ticks) == 0 or ticks[-1] != num_models:
+    ticks.append(num_models)
+plt.xticks(ticks)
+
+# Optionally overlay raw sample distributions as jittered points
+for i, k in enumerate(ensemble_sizes):
+    y = all_accuracies[k]
+    x = np.full(len(y), k) + (rng.random(len(y)) - 0.5) * 0.2  # small jitter
+    plt.scatter(x, y, alpha=0.3, s=8, color="gray")
+
+plt.tight_layout()
+
+plt.savefig(plots_dir / "sensitivity_accuracy_vs_ensemble_size.png")
+# End of Copilot section

+ 27 - 0
analysis/utils/config.py

@@ -0,0 +1,27 @@
+# This file serves as a singleton for the configuration settings of the project
+
+import tomllib
+import os
+import pathlib as pl
+from typing import Any
+
+
+def get_config() -> dict[str, Any]:
+    """
+    Load the configuration file and return the settings as a dictionary.
+    """
+    match os.getenv("ANN_CONFIG_PATH"):
+        case None:
+            config_path = pl.Path(__file__).parent.parent.parent / "config.toml"
+        case str(path):
+            config_path = pl.Path(path)
+
+    if not config_path.exists():
+        config_path = pl.Path(__file__).parent.parent.parent / "config.toml"
+    with open(config_path, "rb") as f:
+        config = tomllib.load(f)
+
+    return config
+
+
+config = get_config()

+ 2 - 0
config.toml

@@ -0,0 +1,2 @@
+[analysis]
+evaluation_name = "Full_Ensemble(50x30) "

二進制
model_evaluations/Full_Ensemble(50x30).nc


二進制
output/Full_Ensemble(50x30) /plots/accuracy_vs_confidence_percentile_threshold.png


二進制
output/Full_Ensemble(50x30) /plots/sensitivity_accuracy_vs_ensemble_size.png


+ 20 - 0
requirements.txt

@@ -0,0 +1,20 @@
+certifi==2025.10.5
+cftime==1.6.5
+colorama==0.4.6
+contourpy==1.3.3
+cycler==0.12.1
+fonttools==4.60.1
+kiwisolver==1.4.9
+matplotlib==3.10.7
+netCDF4==1.7.3
+numpy==2.3.4
+packaging==25.0
+pandas==2.3.3
+pillow==12.0.0
+pyparsing==3.2.5
+python-dateutil==2.9.0.post0
+pytz==2025.2
+scipy==1.16.3
+six==1.17.0
+tzdata==2025.2
+xarray==2025.10.1