diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
deleted file mode 100644
index bd74dc04..00000000
--- a/.github/workflows/pages.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Deploy Website
-on:
- push:
- branches: # triggers the workflow on push events to the main branch
- - main
- workflow_dispatch: # allows you to run the workflow manually
-permissions:
- contents: read
- pages: write
- id-token: write
-concurrency:
- group: "pages"
- cancel-in-progress: true
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # Fetch all history for all branches and tags. This is required for the git revision date plugin and git authors plugin.
- - name: Set up Python
- uses: actions/setup-python@v2
- with:
- python-version: '3.x'
- - name: Install MkDocs and plugins
- run: pip install mkdocs mkdocs-mermaid2-plugin mkdocs-git-revision-date-localized-plugin mkdocs-git-authors-plugin
- - name: Build with MkDocs
- run: mkdocs build
- # - name: Check for hard links and symlinks
- # run: |
- # find site -type l -exec echo "Symlink found: {}" \; -o -type f -links +1 -exec echo "Hard link found: {}" \;
- - name: Upload build artifacts
- uses: actions/upload-pages-artifact@v3
- with:
- name: github-pages
- path: site
-
- deploy:
- environment:
- name: github-pages
- url: ${{ steps.deployment.outputs.page_url }}
- runs-on: ubuntu-latest
- needs: build
- steps:
- - name: Deploy to GitHub Pages
- id: deployment
- uses: actions/deploy-pages@v4
-
diff --git a/.gitignore b/.gitignore
index b9cfee33..992a162d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,224 @@
-site
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[codz]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py.cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+# Pipfile.lock
+
+# UV
+# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# uv.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+# poetry.lock
+# poetry.toml
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
+# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
+# pdm.lock
+# pdm.toml
+.pdm-python
+.pdm-build/
+
+# pixi
+# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
+# pixi.lock
+# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
+# in the .venv directory. It is recommended not to include this directory in version control.
+.pixi
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# Redis
+*.rdb
+*.aof
+*.pid
+
+# RabbitMQ
+mnesia/
+rabbitmq/
+rabbitmq-data/
+
+# ActiveMQ
+activemq-data/
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.envrc
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+# .idea/
+
+# Abstra
+# Abstra is an AI-powered process automation framework.
+# Ignore directories containing user credentials, local state, and settings.
+# Learn more at https://abstra.io/docs
+.abstra/
+
+# Visual Studio Code
+# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
+# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
+# and can be added to the global gitignore or merged into this file. However, if you prefer,
+# you could uncomment the following to ignore the entire vscode folder
+# .vscode/
+
+# Ruff stuff:
+.ruff_cache/
+
+# PyPI configuration file
+.pypirc
+
+# Marimo
+marimo/_static/
+marimo/_lsp/
+__marimo__/
+
+# Streamlit
+.streamlit/secrets.toml
+
+
+
+# Custom rules
*ignored*
-*__pycache__*
-.vscode/
*.zip
Captured_Frames
Debug
diff --git a/docs/Asservissement.md b/docs/Asservissement.md
deleted file mode 100644
index f9061f2d..00000000
--- a/docs/Asservissement.md
+++ /dev/null
@@ -1 +0,0 @@
-Page asservissement
\ No newline at end of file
diff --git a/docs/Camera.md b/docs/Camera.md
deleted file mode 100644
index 03a1518e..00000000
--- a/docs/Camera.md
+++ /dev/null
@@ -1,4 +0,0 @@
-les mode de camera peuve etre obtenu avec
-
-picam2 = Picamera2()
-picam2.sensor_modes
\ No newline at end of file
diff --git a/docs/Deroulement_course.md b/docs/Deroulement_course.md
deleted file mode 100644
index d13a490c..00000000
--- a/docs/Deroulement_course.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# Déroulement de la course
-
-Cette page sert à détailler les différentes étapes auxquelles vous pouvez/devez participer au cours de l'année.
-
-## Tests sur pistes
-
-Respectivement un et deux mois avant la course, ainsi que les 2 jours qui la précèdent, l'ENS vous ouvre ses portes pour des essais sur piste. Ils auront installé une piste en utilisant les mêmes composants que le jour de la course sur laquelle vous pouvez essayer de faire tourner votre voiture.
-
-!!! warning
-Ces essais sont très importants ! Faites au maximum pour y aller ! En effet, votre voiture peut très bien tourner sur le simulateur ou sur une piste artisanale si vous en avez une mais se comporter de manière très différente dans les conditions de la course. C'est une occasion de ne pas travailler dans le vide et de vous assurer concrètement de la validité de l'avancement de votre projet.
-
-### Où aller
-
-Ces essais ont lieu au même endroit qu'aura lieu la course. Rendez-vous donc à l'ENS (4 Avenue des Sciences, 91190 Gif-sur-Yvette).
-
-- Passez les 2 portes tournantes, vous arrivez dans la cour interne.
-- Suivez l'allée jusqu'au bout.
-- Passez la porte et prenez l'escalier à gauche.
-- Montez 2 étages.
-- Dans le corps principal du batiment (longue allée avec plusieurs étages de plateforme), allez tout au fond à droite.
-
-### A ne pas oublier
-
-Pensez à embarquer tout votre matériel. Voiture, batteries, testeur/chargeur de batterie, câbles de remplacement, mini-routeur diffusant du Intech_Second, vos PCs, etc. C'est un peu loin pour faire l'aller-retour si vous avez oublié des choses.
-
-## Homologation
-
-Il s'agit de la première partie de la course. Vous devez pouvoir montrer que votre voiture respecte les critères définis par le règlement.
-
-!!! Note
-Vous pouvez faire votre homologation la veille vers la fin de journée si vous êtes encore là pour vos essais sur pistes (ce qui est fortement recommandé !!). Si vous le pouvez, faites le, cela sera un énorme gain de temps (et diminution de stress) pour vous le jour J.
-
-L'homologation se déroule de la façon suivante :
-
--
-
-## Le contre-la-montre
-
-Vous voici le jour J, homologué, prêts à en découdre. Félicitations pour cette première étape !
-Vous passez donc maintenant à la première phase des qualifications : le contre-la-montre !
-
-### Aparte : pour la com
-
-Avant de vous parler des courses, petit conseil pour la com : c'est le moment de faire des rushs (je rappelle que vous devez faire un clip à la fin du GATE). Si vous voulez avoir une bonne vue des courses, mettez vous sur les plateformes en hauteur pour filmer (un étage suffit). Et hésitez pas à filmer d'autres moments de la journée, vous savez pas de quoi vous aurez besoin pour le clip (et puis ça fait des moments sympas entre vous 😉).
-
-### Revenons à nos moutons
-
-Le principe est simple : vous devez effectuer 2 tours de piste seul et avec obstacles le plus rapidement possible. Vous avez pour cela 2 essais (séparés par un peu de temps pour vous permettre de corriger des choses si le premier se déroule mal). Ne sera retenu évidemment que le meilleur des 2 chronos. Si jamais vous ne parvenez pas à terminer les 2 tours, votre temps sera mis au maximum (le maximum sera normalement défini un peu au-dessus du temps de la voiture ayant fini les 2 tours avec le plus haut temps).
-
-Un classement sera alors dressé qui pemerttra de définir les poules pour la seconde phase des qualifications.
-
-C'est maintenant l'heure de la pause du midi, bon appétit !
-
-## Courses à plusieurs
-
-Passons à la deuxième phase des qualifications. Vous allez affronter d'autres voitures sur pistes cette fois (4 à 8 voitures selon le nombre de voitures présentes à la course), et ce, sur 3 tours.
-
-A chaque course la voiture accumule des points pour le classement général des qualifications : 10 points pour le premier, 6 points pour le 2nd, 4 pour le 3ème, et 2 points pour les voitures ayant terminé la course. Une voiture ne bouclant pas les 3 tours n'obtient aucun point.
-
-!!! note
-
-Pour information, pour notre année, le fait de ne pas terminer les 3 tours faisait que l'on avait 0 point, même si on les avait quasiment fait (par exemple, se crasher au dernier virage, on parle d'expérience 😭). M. Taillandier-Loize a indiqué qu'il demanderait à ce que cette règle soit rediscutée pour plutôt offrir des points par tour. On espère que ça sera accepté.
-
-!!! warning
-
-Attention, voici des règles importantes :
-
-- Les équipes ont 3 min pour installer leur véhicule sur la piste.
-- L’ensemble des véhicules de la poule est positionné sur la grille de départ selon les résultats des qualifications.
-- Une fois que toutes les équipes ont annoncé être prêtes, il est interdit de toucher les véhicules. Le signal de départ est donné oralement par l’arbitre.
-- L’ordre d’arrivée est relevé après un nombre de tours définis à l’avance (3 par défaut pour la phase de qualification et 5 pour les phases finales).
-- Une voiture ne terminant pas le nombre de tours définis n'est pas classée.
-- Un véhicule ayant un comportement notoirement agressif envers les véhicules adverses est disqualifié retiré de la piste, de même qu'une voiture empêchant volontairement une autre de la doubler.
-- Un véhicule immobilisé sur la piste plus de 10 secondes en l'absence d'une voiture le bloquant est retiré de la piste.
-- Un véhicule ayant parcouru plus de 2 m à contre-sens est retiré de la piste.
-
-## Finales
-
-On espère que tout s'est bien passé pour vous jusqu'ici ! C'est le moment, la dernière ligne droite, l'heure de briller !
-
-Bon pour cette partie je vais reprendre la docu de la course :
-
-A l'issue des qualifications, chaque voiture obtient 2 scores :
-
-- le total coursesQualif (la somme des points obtenus lors des 2 courses de qualification)
-- le total CLM+courseQualif(le score précédent plus les points obtenus en qualification contre la montre).
-
-2 manches sont composées de chacune de 3 courses.
-
-Les 2 premiers de chaque poule (avec le score courseQualif, le score CLM+courseQualif servant à départager les ex-aequo) font les courses 1 de chaque manche, les 3ème et 4ème font les courses 2 de chaque manche et les autres font la course 3 de chaque manche. Le classement des qualifications (avec le score courseQualif, le score CLM+courseQualif servant à départager les ex-aequo) détermine la grille de départ, la voiture ayant le plus de points s’élançant en tête sur la grille de départ.
-
-Les arbitres relèvent l’ordre d’arrivée après un nombre de tours définis à l’avance (5 par défaut) et attribuent les points de la manière suivante : 25 pts pour le 1er, 18 pts pour le 2e, 15 pts pour le 3e, 12 pts pour le 4e ; 10 pts pour le 5e, 8 pts pour le 6e ; 6 pts pour le 7e, 4 pts pour le 8e ; 2 pts pour le 9e et 1 pt pour le 10e. Une voiture ne terminant pas le nombre de tours défini obtient 0 point. Une fois les voitures de la première course classées, on continue l'attribution des points aux voitures de la 2nde course.
-
-Le classement final se fait avec les points des 2 courses finales uniquement. En cas d'égalité, le score CLM+courseQualif est pris en compte pour le classement final. Si il y a de nouveau égalité, le score coursesQualif départage les équipes.
-
-Les prix récompensent les 1er, 2ème, 3ème du classement général. S'y ajoute un prix de l'innovation et un prix pour les 1ers parmi les licences.
-
-Et voilà, CoVAPSy, c'est finiii... On espère que vous avez tout cassé (sauf la voiture svp) ! La partie technique du GATE est globalement finie, il vous reste donc les actions de promotions à faire, bon courage 😉
diff --git a/docs/elec/Hat/Hat.kicad_pcb b/docs/Hardware/Hat/Hat.kicad_pcb
similarity index 100%
rename from docs/elec/Hat/Hat.kicad_pcb
rename to docs/Hardware/Hat/Hat.kicad_pcb
diff --git a/docs/elec/Hat/Hat.kicad_prl b/docs/Hardware/Hat/Hat.kicad_prl
similarity index 100%
rename from docs/elec/Hat/Hat.kicad_prl
rename to docs/Hardware/Hat/Hat.kicad_prl
diff --git a/docs/elec/Hat/Hat.kicad_pro b/docs/Hardware/Hat/Hat.kicad_pro
similarity index 100%
rename from docs/elec/Hat/Hat.kicad_pro
rename to docs/Hardware/Hat/Hat.kicad_pro
diff --git a/docs/elec/Hat/Hat.kicad_sch b/docs/Hardware/Hat/Hat.kicad_sch
similarity index 100%
rename from docs/elec/Hat/Hat.kicad_sch
rename to docs/Hardware/Hat/Hat.kicad_sch
diff --git a/docs/elec/Hat/Hat.kicad_sch-bak b/docs/Hardware/Hat/Hat.kicad_sch-bak
similarity index 100%
rename from docs/elec/Hat/Hat.kicad_sch-bak
rename to docs/Hardware/Hat/Hat.kicad_sch-bak
diff --git a/docs/elec/Hat/PCA9517D_112/PCA9517D_112.kicad_sym b/docs/Hardware/Hat/PCA9517D_112/PCA9517D_112.kicad_sym
similarity index 100%
rename from docs/elec/Hat/PCA9517D_112/PCA9517D_112.kicad_sym
rename to docs/Hardware/Hat/PCA9517D_112/PCA9517D_112.kicad_sym
diff --git a/docs/elec/Hat/PCA9517D_112/PCA9517D_112.step b/docs/Hardware/Hat/PCA9517D_112/PCA9517D_112.step
similarity index 100%
rename from docs/elec/Hat/PCA9517D_112/PCA9517D_112.step
rename to docs/Hardware/Hat/PCA9517D_112/PCA9517D_112.step
diff --git a/docs/elec/Hat/PCA9517D_112/SOIC127P600X175-8N.kicad_mod b/docs/Hardware/Hat/PCA9517D_112/SOIC127P600X175-8N.kicad_mod
similarity index 100%
rename from docs/elec/Hat/PCA9517D_112/SOIC127P600X175-8N.kicad_mod
rename to docs/Hardware/Hat/PCA9517D_112/SOIC127P600X175-8N.kicad_mod
diff --git a/docs/elec/Hat/PCA9517D_112/how-to-import.htm b/docs/Hardware/Hat/PCA9517D_112/how-to-import.htm
similarity index 100%
rename from docs/elec/Hat/PCA9517D_112/how-to-import.htm
rename to docs/Hardware/Hat/PCA9517D_112/how-to-import.htm
diff --git a/docs/elec/Hat/fabrication-toolkit-options.json b/docs/Hardware/Hat/fabrication-toolkit-options.json
similarity index 100%
rename from docs/elec/Hat/fabrication-toolkit-options.json
rename to docs/Hardware/Hat/fabrication-toolkit-options.json
diff --git a/docs/elec/Hat/fp-info-cache b/docs/Hardware/Hat/fp-info-cache
similarity index 100%
rename from docs/elec/Hat/fp-info-cache
rename to docs/Hardware/Hat/fp-info-cache
diff --git a/docs/elec/Hat/sym-lib-table b/docs/Hardware/Hat/sym-lib-table
similarity index 100%
rename from docs/elec/Hat/sym-lib-table
rename to docs/Hardware/Hat/sym-lib-table
diff --git a/docs/elec/Hat_CoVASPSy_v1re2_Schema-1.pdf b/docs/Hardware/Hat_CoVASPSy_v1re2_Schema-1.pdf
similarity index 100%
rename from docs/elec/Hat_CoVASPSy_v1re2_Schema-1.pdf
rename to docs/Hardware/Hat_CoVASPSy_v1re2_Schema-1.pdf
diff --git a/docs/elec/Interface/Interface.kicad_pcb b/docs/Hardware/Interface/Interface.kicad_pcb
similarity index 100%
rename from docs/elec/Interface/Interface.kicad_pcb
rename to docs/Hardware/Interface/Interface.kicad_pcb
diff --git a/docs/elec/Interface/Interface.kicad_prl b/docs/Hardware/Interface/Interface.kicad_prl
similarity index 100%
rename from docs/elec/Interface/Interface.kicad_prl
rename to docs/Hardware/Interface/Interface.kicad_prl
diff --git a/docs/elec/Interface/Interface.kicad_pro b/docs/Hardware/Interface/Interface.kicad_pro
similarity index 100%
rename from docs/elec/Interface/Interface.kicad_pro
rename to docs/Hardware/Interface/Interface.kicad_pro
diff --git a/docs/elec/Interface/Interface.kicad_sch b/docs/Hardware/Interface/Interface.kicad_sch
similarity index 100%
rename from docs/elec/Interface/Interface.kicad_sch
rename to docs/Hardware/Interface/Interface.kicad_sch
diff --git a/docs/elec/Interface/fp-info-cache b/docs/Hardware/Interface/fp-info-cache
similarity index 100%
rename from docs/elec/Interface/fp-info-cache
rename to docs/Hardware/Interface/fp-info-cache
diff --git a/docs/elec/Interface_CoVASPSy_v1re2_Schema.pdf b/docs/Hardware/Interface_CoVASPSy_v1re2_Schema.pdf
similarity index 100%
rename from docs/elec/Interface_CoVASPSy_v1re2_Schema.pdf
rename to docs/Hardware/Interface_CoVASPSy_v1re2_Schema.pdf
diff --git a/docs/elec/Mezzanine_CoVASPSy_v1re2_Schema.pdf b/docs/Hardware/Mezzanine_CoVASPSy_v1re2_Schema.pdf
similarity index 100%
rename from docs/elec/Mezzanine_CoVASPSy_v1re2_Schema.pdf
rename to docs/Hardware/Mezzanine_CoVASPSy_v1re2_Schema.pdf
diff --git a/docs/elec/mezzanine/mezzanine.kicad_pcb b/docs/Hardware/mezzanine/mezzanine.kicad_pcb
similarity index 100%
rename from docs/elec/mezzanine/mezzanine.kicad_pcb
rename to docs/Hardware/mezzanine/mezzanine.kicad_pcb
diff --git a/docs/elec/mezzanine/mezzanine.kicad_prl b/docs/Hardware/mezzanine/mezzanine.kicad_prl
similarity index 100%
rename from docs/elec/mezzanine/mezzanine.kicad_prl
rename to docs/Hardware/mezzanine/mezzanine.kicad_prl
diff --git a/docs/elec/mezzanine/mezzanine.kicad_pro b/docs/Hardware/mezzanine/mezzanine.kicad_pro
similarity index 100%
rename from docs/elec/mezzanine/mezzanine.kicad_pro
rename to docs/Hardware/mezzanine/mezzanine.kicad_pro
diff --git a/docs/elec/mezzanine/mezzanine.kicad_sch b/docs/Hardware/mezzanine/mezzanine.kicad_sch
similarity index 100%
rename from docs/elec/mezzanine/mezzanine.kicad_sch
rename to docs/Hardware/mezzanine/mezzanine.kicad_sch
diff --git a/docs/IA.md b/docs/IA.md
deleted file mode 100644
index dc07f627..00000000
--- a/docs/IA.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Raceline is a Trakmania IA its journey is desribed here: [https://www.youtube.com/watch?v=cUojVsCJ51I](https://www.youtube.com/watch?v=cUojVsCJ51I)
-[https://linesight-rl.github.io/linesight/build/html/index.html](https://linesight-rl.github.io/linesight/build/html/index.html)
-[https://arxiv.org/abs/1806.06923](https://arxiv.org/abs/1806.06923)
-
diff --git a/docs/Instruction_manual_UST-10LX_MRS0020D_en_1513910662-1.pdf b/docs/Instruction_manual_UST-10LX_MRS0020D_en_1513910662-1.pdf
deleted file mode 100644
index 511f6217..00000000
Binary files a/docs/Instruction_manual_UST-10LX_MRS0020D_en_1513910662-1.pdf and /dev/null differ
diff --git a/docs/Lidar.md b/docs/Lidar.md
deleted file mode 100644
index 2ee9fbc9..00000000
--- a/docs/Lidar.md
+++ /dev/null
@@ -1,106 +0,0 @@
-# Lidar
-
-## UST-10lx basics
-
-
-
-
-Les Lidar utiliser par INTech sont des [UST-10LX](https://www.hokuyo-aut.jp/search/single.php?serial=167) concu et vendu par Hokuyo. Une documentation non officel (mais plutot complete [existe ici])(https://sourceforge.net/p/urgnetwork/wiki/Home/). Le manuel
-du l'UST-10lx est [télécharchable ici](./Instruction_manual_UST-10LX_MRS0020D_en_1513910662-1.pdf)
-
-## UST-10lx connectivity
-
-Il communique par Ethernet en utilisant le protocole [Secure Communications Interoperability Protocol (SCIP)](https://en.wikipedia.org/wiki/Secure_Communications_Interoperability_Protocol). Ce protocol peut faire peur a premiere vue mais nous n'utilison que les commande decrit [ici](https://sourceforge.net/p/urgnetwork/wiki/scip_en/). Pour communiquer par ethernet, les lidar possede une adresse IP:
-
-```
-192.168.0.10:10940/24
-```
-
-Il faut donc choisir une adresse ip dans le bon sous-resaux. Nous avons arbitrairement choisi:
-
-```
-192.168.0.20
-```
-
-## UST-10lx specs
-
-
-
-
-| Spec | Value | Unit |
-|---------------------|----------------------|------|
-| Min distance | 20 | mm |
-| Max distance | 30000 | mm |
-| Accuracy | ±40 | mm |
-| Scan angle | 270 | ° |
-| Angular resolution | 0.25 | pt/° |
-| Angular resolution | 1440 | pt/tr|
-| Scan rate | 40 | Hz |
-| Poids | 130 | g |
-| Voltage | 10-30 | VDC |
-| Current @24VDC | 150 (450 at startup) | mA |
-
-
-## Using HokuyoReader class
-
-This class is from [cassc/tcp_hokuyo.py](https://gist.github.com/cassc/26ac479624cb028b2567491a68c34fb8)
-This class is simpler than [hokuyolx](#using-hokuyolx-class)
-
-Create the class instance with
-``` python
-sensor = HokuyoReader(IP, PORT)
-```
-
-use `HokuyoReader.stop()` to get rid of any leftover data or problems from an improper shutdown and start mesuring with `HokuyoReader.startContinuous(0, 1080)` with 0 and 1080 the steps that are mesured
-
-```pyhton
-sensor.stop()
-sensor.startContinuous(0, 1080)
-```
-
-Distances can be retrived as a numpy array with the `HokuyoReader.rDistance()` (r standing for radial)
-
-```python
-distance_array=sensor.rDistance()
-```
-
-Use `HokuyoReader.stop()` to gracefully shutdown the lidar
-
-```pyhton
-sensor.stop()
-
-```
-
-!!! note
- This class has been renamed to Lidar to be more descriptive in code.
-
-!!! note
- After contacting the author, he has agreed to licence his script as MIT (none before), allowing us to do the same on this repository.
-
-## Using Hokuyolx class
-
-This class comes from [SkoltechRobotics/hokuyolx](https://github.com/SkoltechRobotics/hokuyolx).
-This class has considerably more options than [HokuyoReader](#using-hokuyoreader-class) but is more complicated to understand. This class is documented at [http://hokuyolx.rtfd.org/](http://hokuyolx.rtfd.org/). As of 12/12/24 it doesn't work out off the box and we use [HokuyoReader](#using-hokuyoreader-class)
-
-## Les galères
-
-On a eu pas mal de galères avec le lidar qui ce déconnecter de manière intempestive. En temps normal, soit le lidar est débranché, l’interface eth0 est Down et elle n’a pas d’IP, soit le lidar est branché et l’interface est UP avec un IP static (192.168.1.20). Malheureusement, des fois, pour des raisons un peu mystère, l’interface rester UP, mais perd son IP fixe.
-
-Débrancher et rebrancher le lidar a systématiquement marché, mais n’adresse pas la cause du problème. 2 théories :
-- Le câble est baisé et fait un faux contact
-- Le Network manager de la RPI fout la merde
-
-Comme la languette de rétention, Ethernet était un peu mort donc on a reserti un nouveau connecteur (Merci MiNET ❤️). Cela a résolu une grande partie du problème et nous n’avons observer que deux récidives : l’une directement après l’autre le 03/04/25 lors des essais sur piste.
-
-###Quelques commandes utiles
-
-```bash
-ip a
-```
-Permets de voir les différentes interfaces réseau, leurs statuts et les IP associer.
-
-```bash
-watch -n 0.1 "ip a"
-```
-Permet d’exécuter toutes les 0,1 s (le max) une commande et d’affiche le résultat, ici`ip a`.
-
diff --git a/docs/img/Logo/Autotech_original.jpeg b/docs/Logo/Autotech_original.jpeg
similarity index 100%
rename from docs/img/Logo/Autotech_original.jpeg
rename to docs/Logo/Autotech_original.jpeg
diff --git "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (carr\303\251)_promo Ecole.svg" "b/docs/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (carr\303\251)_promo Ecole.svg"
similarity index 100%
rename from "docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (carr\303\251)_promo Ecole.svg"
rename to "docs/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (carr\303\251)_promo Ecole.svg"
diff --git "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole.svg" "b/docs/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole.svg"
similarity index 100%
rename from "docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole.svg"
rename to "docs/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole.svg"
diff --git a/docs/Simulateur.md b/docs/Simulateur.md
deleted file mode 100644
index e568c8d5..00000000
--- a/docs/Simulateur.md
+++ /dev/null
@@ -1,132 +0,0 @@
-# Simulateur
-
-Cette page de documentation détaille le fonctionnement des scripts utilisés dans le cadre de l'utilisation de [Webots](https://cyberbotics.com) pour entrainer une IA sur une simulation de piste.
-
----
-## Objectifs
-
-On cherche à simuler le déplacement d'une voiture dans un environnement semblable à la piste de la course. A chaque étape de la simulation on donne en entrée les données des différents capteurs à un model qui nous renvoie une consigne d'angle de roues à appliquer au véhicule pour la prochaine étape de la simulation.
-
-
----
-## Organisation basique à une voiture
-
-Une première approche pourrairt être d'organiser notre monde avec une [Piste](#piste) et une [Voiture](#voiture). Dans cette approche toute la logique serait géré au niveau du [Driver](https://cyberbotics.com/doc/automobile/driver-library) de la [Voiture](#voiture):
-- obtenir la data des capteurs
-- exécuter le model pour obtenir une consigne
-- update l'état de la voiture en accord avec cette consigne
-- puis finalement calculer les rewards données à la fin de chaque episode pour entrainer l'[IA](./IA.md)
-
-!!! note
- Le problème de cette architecture est qu'elle n'est pas scalable. Elle nous force à n'utiliser qu'une seule voiture en même temps car en augmentant le nombre de voitures, elles gèreraient toutes leur propre model en local et on entraînerait donc _n_ models au lieu d'un 1 ce qui est inutile.
-
-
-
----
-## Organisation complexe à _n_ voitures
-
-Pour s'affranchir de ces limites on propose de déplacer l'éxécution du model dans un controller séparé des _n_ voitures. On choisira donc de crééer un [Robot](https://cyberbotics.com/doc/reference/robot) de type [Supervisor](https://cyberbotics.com/doc/reference/supervisor) qu'on nommera `WorldSupervisor` en ajoutant le field `supervisor TRUE` dans la string de définition du node correspondant à notre supervisor. Notre objectif est de créer _n_ voitures à l'initialisation du monde et de faire communiquer les voitures et le WorldSupervisor via des [Emitters](https://www.cyberbotics.com/doc/reference/emitter)/[Receivers](https://www.cyberbotics.com/doc/reference/receiver). Il faut donc que le WorldSupervisor ait _n_ couples d'[Emitters](https://www.cyberbotics.com/doc/reference/emitter)/[Receivers](https://www.cyberbotics.com/doc/reference/receiver) pour communiquer à chaque voiture sur un channel différent.
-
-!!! warning
- _n_ n'est pas connu avant l'éxecution du controller du WorldSupervisor. Or, l'état d'un [Robot](https://cyberbotics.com/doc/reference/robot) pour les fonctions de controller dans Webots ne change pas dynamiquement i.e. si le WorldSupervisor s'ajoute lui-même des devices via son controller au cours de l'éxecution, ces derniers ne seront pas acessibles via la méthode `getDevice(name)`.
-
-C'est pour cela qu'en plus d'un WorldSupervisor gérant la logique de l'[IA](./IA.md), nous avons besoin d'un autre [Supervisor](https://cyberbotics.com/doc/reference/supervisor) dont le rôle est de créer le WorldSupervisor avec ses _n_ couples d'[Emitters](https://www.cyberbotics.com/doc/reference/emitter)/[Receivers](https://www.cyberbotics.com/doc/reference/receiver) et les _n_ voitures au début de l'initialisation du monde. On nommera ce nouveau [Supervisor](https://cyberbotics.com/doc/reference/supervisor) `WorldInit`.
-
-!!! warning
- L'API [Emitters](https://www.cyberbotics.com/doc/reference/emitter)/[Receivers](https://www.cyberbotics.com/doc/reference/receiver) ne garantit pas un planning spécifique pour la transmission. Il arrive parfois que plusieurs paquets soient regroupés et reçus ensemble ou qu'aucun packet ne soit reçu. Ces cas doivent donc être traités et on perd le déterminisme de l'entraînement.
-
----
-### Pendant l'initialisation du monde
-
-
-
----
-## Environnement Webots utilisé
-
-### Piste
-
-Le monde utilisé est `Simulateur/worlds/piste2.wbt`. Ce monde vient de la version 2023 du [github officiel CoVAPsy](https://github.com/ajuton-ens/CourseVoituresAutonomesSaclay).
-
-### Voiture
-
-La voiture utilisé est TT02(`Simulateur/protos/TT02_2023b.proto`). Ce proto vient de la version 2023 du [github officiel CoVAPsy](https://github.com/ajuton-ens/CourseVoituresAutonomesSaclay).
-
-### Capteurs
-
-Les capteurs utilisés sont :
-
-- Un [Lidar](./Lidar.md) aux specifications de notre vrai lidar.
-
-- Un [TouchSensor](https://cyberbotics.com/doc/reference/touchsensor) uniquement sur le simulateur pour détecter les collisions.
diff --git a/docs/UV_USAGE.md b/docs/UV_USAGE.md
deleted file mode 100644
index b7cc2bdb..00000000
--- a/docs/UV_USAGE.md
+++ /dev/null
@@ -1,127 +0,0 @@
-# UV Dependency Management Guide
-
-This project now uses [UV](https://github.com/astral-sh/uv) for fast, reliable Python package management.
-
-## Quick Start
-
-### Install UV
-```bash
-curl -LsSf https://astral.sh/uv/install.sh | sh
-source $HOME/.cargo/env
-```
-
-### Install Project Dependencies
-```bash
-# Install all runtime dependencies
-uv sync
-
-# Install with documentation dependencies
-uv sync --extra docs
-
-# Install with development dependencies
-uv sync --extra dev
-
-# Install all optional dependencies
-uv sync --all-extras
-```
-
-### Running Python Scripts and Commands
-```bash
-# Run a script using the project's virtual environment
-uv run python src/HL/main.py
-
-# Run a script directly (if defined in pyproject.toml)
-uv run covapsy-main
-
-# Run any command with the project's dependencies
-uv run mkdocs serve
-
-# Note: Always use 'uv run' prefix to ensure you're using the UV-managed environment
-```
-
-### Managing Dependencies
-
-#### Add a new dependency
-```bash
-# Add runtime dependency
-uv add numpy
-
-# Add development dependency
-uv add --dev pytest
-
-# Add documentation dependency
-uv add --optional docs mkdocs
-```
-
-#### Remove a dependency
-```bash
-uv remove package-name
-```
-
-#### Update dependencies
-```bash
-# Update all packages
-uv lock --upgrade
-
-# Update specific package
-uv add package-name@latest
-```
-
-### Virtual Environment Management
-
-UV automatically manages virtual environments. You can also:
-
-```bash
-# Activate the virtual environment manually
-source .venv/bin/activate
-
-# Or run commands directly
-uv run python your_script.py
-```
-
-### Lock File
-
-The `uv.lock` file pins exact versions for reproducible builds. Commit this file to version control.
-
-
-### Old way (pip):
-```bash
-pip install -r requirements.txt
-```
-
-### New way (UV):
-```bash
-uv sync
-```
-
-## Benefits of UV
-
-- **Fast**: 10-100x faster than pip
-- **Reliable**: Deterministic resolution with lock files
-- **Cross-platform**: Works on Linux, macOS, Windows
-- **Drop-in replacement**: Compatible with existing Python tooling
-- **Modern**: Built in Rust with modern dependency resolution
-
-## Troubleshooting
-
-### Common Issues
-
-#### "Plugin not installed" or "Module not found" errors
-**Problem**: Running commands directly (e.g., `mkdocs serve`) instead of through UV.
-**Solution**: Always prefix commands with `uv run`:
-```bash
-# ❌ Wrong
-mkdocs serve
-
-# ✅ Correct
-uv run mkdocs serve
-```
-
-#### UV installs unwanted dependencies
-**Problem**: UV may incorrectly resolve optional dependencies due to dependency resolution bugs.
-**Solution**: Remove problematic lock files and clear cache:
-```bash
-rm uv.lock
-uv cache clean
-uv sync --extra docs
-```
diff --git a/docs/building_docs.md b/docs/building_docs.md
deleted file mode 100644
index 51352797..00000000
--- a/docs/building_docs.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# How to use Markdown and MkDocs
-
-This documentation is built using Markdown and MkDocs. This page is a guide to get you started contributing.
-
-## What is Markdown
-
-> Markdown is a lightweight markup language used to format text in a simple and readable way. It uses plain-text syntax to define elements like headings, lists, links, and emphasis (e.g., **bold**, *italic*). Markdown is commonly used in documentation, blogging platforms, and version control systems like GitHub because it is easy to write and converts seamlessly into HTML for web display. -ChatGPT
-
-Markdown files are often recognizable by their `.md` and rarely `.markdown` file extensions.
-
-## Basic Syntax
-
-### Bold and Italics
-
-To make bold text, surround your text with double asterisks:
-
-
-
**This text will be bolded**
-
This text will be bolded
-
-
-
-For italics, use a single asterisk:
-
-
-
*This text will be italicized*
-
This text will be italicized
-
-
-
-These can be combined:
-
-
-
***This text will be bold and italic***
-
This text will be bold and italic
-
-
-
-### Headings
-
-Headings are denoted using `#` at the beginning of a line. The number of `#` determines the depth of the header:
-
-| Input | Render |
-|------------------|------------------ |
-| # Heading 1 |
Heading 1
|
-| ## Heading 2 |
Heading 2
|
-| ### Heading 3 |
Heading 3
|
-| #### Heading 4 |
Heading 4
|
-| ##### Heading 5 |
Heading 5
|
-
-!!! warning
- A space is required for a heading to work.
-
- - \#Heading 1 ❌
- - \## Heading 1 ✔️
-
-### Links
-
-To add links, use the following syntax:
-
-\[The text that will be displayed](The link to follow)
-
-\[A link to example.org](https://example.org/)
-
-[A link to example.org](https://example.org/)
-
-!!! Note
- These can be used for internal links as well. For example, we can link this section with \[Links](#Links) or \[Links](/building_docs/#Links): [Links](#links).
-
- The second method can be generalized to reach any page or header on the website.
-
-### Images
-
-The syntax to add images is almost the same as links. Alt text is the text used by accessibility readers. It should therefore be descriptive rather than a title.
-
-\!\[Alt text](path/to/image.png)
-
-\!\[A square version of the autotech logo. It is used as a favicon for the site](img/favicon.png)
-
-
-
-!!! Note
- See [Images in HTML](#images-in-html) for more fine-grain control of images.
-
-### Code Blocks
-
-Code blocks in Markdown can be created using triple backticks (\`\`\`). You can specify the language for syntax highlighting by adding the language name after the opening backticks. Here is an example of a code block in Python:
-
-\`\`\`python
-
-def hello_world():
-
- print("Hello, world!")
-
-\`\`\`
-
-```python
-def hello_world():
- print("Hello, world!")
-```
-
-You can also create inline code by wrapping text in single backticks (\`). For example, \`print("Hello, world!")\` renders as `print("Hello, world!")`.
-
-## Advanced Syntax
-
-Markdown's strength is in the simplicity of writing, but that sometimes comes back to bite us when trying to do something a little more complicated. Luckily, most Markdown renderers accept some HTML in Markdown files. This is the case with MkDocs and GitHub. **HTML should only be used as necessary.**
-
-### Comments
-
-In Markdown, there is no built-in syntax for comments. However, you can use HTML comments to add notes or comments that will not be rendered in the final output. Here is an example:
-
-```
-
-```
-
-### Images in HTML
-
-Images are impossible to scale or render next to text. Using the HTML `` tag, it is possible to customize your image's behavior. Here is an example of an image that is scaled and displayed to the right. Keep in mind that images **can't** be rendered above the position of the `` tag:
-
-```
-
-```
-
-
-
-!!! warning
- Due to differences in how the HTML and Markdown are compiled, there are different paths when adding images in HTML and Markdown.
-
-
-
-### Admonitions
-
-Admonitions are a way to highlight important information in your documentation. MkDocs supports several types of admonitions, such as notes, warnings/caution, danger, and tips/hint. Here is an example of how to use them:
-
-```
-!!! note
- This is a note admonition.
-```
-!!! note
- This is a note admonition.
-
-```
-!!! warning
- This is a warning admonition.
-```
-!!! warning
- This is a warning admonition.
-
-## Using MkDocs
-
-For full documentation, visit [mkdocs.org](https://www.mkdocs.org).
-
-### Commands
-
-* `mkdocs serve` - Start the live-reloading docs server. It should now be accessible at [127.0.0.1:8000](http://127.0.0.1:8000/) on ***your machine**** `mkdocs gh-deploy` - Build the documentation site on the gh-pages branch of your repository.
-
-!!! Note
- On the [Club-INTech/CoVAPSy](https://github.com/Club-INTech/CoVAPSy) repository, `mkdocs gh-deploy` is automatically called upon push to the main branch. It can still be manually called when on another branch, but [gh-pages](https://github.com/Club-INTech/CoVAPSy/tree/gh-pages) will be overwritten by any push on main.
-
-### Project Layout
-
- mkdocs.yml ## The configuration file.
- docs/
- index.md ## The documentation homepage.
- ... ## Other markdown pages, images, and other files.
- img/
- favicon.png ## The icon that appears on the browser tab.
- ... other images
-
-### Using GitHub Pages
-
-Navigate to the repository, then to settings/pages. Using the drop-down menus, select "Deploy from branch" for source and "gh-pages" and "/root" for branch.
-
-!!! Note
- You may have to run `mkdocs gh-deploy` for the gh-pages branch to show. See [Commands](#commands).
diff --git "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (carr\303\251)_promo Ecole_transparent.png" "b/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (carr\303\251)_promo Ecole_transparent.png"
deleted file mode 100755
index b3c13413..00000000
Binary files "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (carr\303\251)_promo Ecole_transparent.png" and /dev/null differ
diff --git "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_blanc.png" "b/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_blanc.png"
deleted file mode 100644
index b7c48769..00000000
Binary files "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_blanc.png" and /dev/null differ
diff --git "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_blanc_120px.png" "b/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_blanc_120px.png"
deleted file mode 100644
index 55f269f9..00000000
Binary files "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_blanc_120px.png" and /dev/null differ
diff --git "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_noir.png" "b/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_noir.png"
deleted file mode 100644
index a67ea072..00000000
Binary files "a/docs/img/Logo/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_noir.png" and /dev/null differ
diff --git a/docs/img/UST-10LX_Scan_pattern.png b/docs/img/UST-10LX_Scan_pattern.png
deleted file mode 100644
index da6f082a..00000000
Binary files a/docs/img/UST-10LX_Scan_pattern.png and /dev/null differ
diff --git a/docs/img/UST-10lx.jpg b/docs/img/UST-10lx.jpg
deleted file mode 100644
index 7de34604..00000000
Binary files a/docs/img/UST-10lx.jpg and /dev/null differ
diff --git a/docs/img/favicon.ico b/docs/img/favicon.ico
deleted file mode 100644
index 48ff8b1f..00000000
Binary files a/docs/img/favicon.ico and /dev/null differ
diff --git a/docs/img/favicon.png b/docs/img/favicon.png
deleted file mode 100755
index b3c13413..00000000
Binary files a/docs/img/favicon.png and /dev/null differ
diff --git a/docs/index.md b/docs/index.md
deleted file mode 100755
index 134fbf30..00000000
--- a/docs/index.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Home
-
-Bonjour à toi, cher INTech-ien intéressé par AutoTech. Ces pages seront ta bible pour commencer à contribuer à AutoTech.
-
-Cette documentation a pour objectif d’être autosuffisante, mais comme aucune documentation n’est parfaite, n’hésite pas à te rapprocher de tes 2A.
-
-Le conseil le plus précieux que je puisse te donner est de documenter au fur et à mesure, et d’utiliser exclusivement Git.
-
-Utilise Git, car tu seras amené à faire des modifications par-ci par-là, et notamment pendant le rush de la course, tu pourrais être tenté de faire du scp (ssh copy). Résiste. Faire ça pose problème, car tu risques de perdre des versions de code qui fonctionnaient bien. Dans la même veine, utilise et abuse de la fonctionnalité Release de GitHub. Elle te sera très utile lorsque tu seras dans le rush et que plus rien ne fonctionnera.
-
-Enfin, documente au fur et à mesure. Tu pourrais être tenté de dire : « C’est le rush, je documenterai plus tard. » Résiste là aussi. Premièrement, tu pourrais mieux comprendre certains éléments en les documentant proprement. Deuxièmement, tu risques d’oublier des choses. Et troisièmement, tu pourrais te retrouver à rédiger la documentation le 17 septembre, alors que ta première réunion avec les 1A est le 18 septembre et que tu n’as pas touché au projet depuis la mi-avril.
\ No newline at end of file
diff --git a/docs/requirments.txt b/docs/requirments.txt
deleted file mode 100644
index 99f97a46..00000000
--- a/docs/requirments.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-mkdocs
-mkdocs-mermaid2-plugin
-mkdocs-git-revision-date-localized-plugin
-mkdocs-git-authors-plugin
\ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
deleted file mode 100755
index e298a5c4..00000000
--- a/mkdocs.yml
+++ /dev/null
@@ -1,67 +0,0 @@
-site_name: AutoTech's Docs
-
-nav:
- - Home: index.md
- - Lidar.md
- - Simulateur.md
- - IA.md
- - Asservissement.md
- - How to create documentation: building_docs.md
- - Déroulement de la course: Deroulement_course.md
- - Regle de la course: https://ajuton-ens.github.io/CourseVoituresAutonomesSaclay/
-
-theme:
- title: AutoTech's Docs
- name: readthedocs
- logo: img/Logo/Logo_équipe N°20_AutoTech 2025 (long)_promo Ecole_transparent.png
- favicon: img/favicon.ico
- theme:
- # icon: does not work idk why -Matthias
- # admonition:
- # note: octicons/tag-16
- # abstract: octicons/checklist-16
- # info: octicons/info-16
- # tip: octicons/squirrel-16
- # success: octicons/check-16
- # question: octicons/question-16
- # warning: octicons/alert-16
- # failure: octicons/x-circle-16
- # danger: octicons/zap-16
- # bug: octicons/bug-16
- # example: octicons/beaker-16
- # quote: octicons/quote-16
-
- # locale: fr # does not work idk why -Arnaud autotech 2025
-
-repo_url: https://github.com/Association-INTech/CoVAPSy
-
-site_description: "AutoTech est un projet de l'association de robotique, INTech, de Telecom SudParis et IMT Business School. Il a pour but de concevoir un véhicule autonome pour la compétition CoVAPSy."
-
-# site_author: "AutoTech"
-
-# exclude_docs: |
-# api-config.json # A file with this name anywhere.
-# /requirements.txt # Top-level "docs/requirements.txt".
-# *.py # Any file with this extension anywhere.
-# !/foo/example.py # But keep this particular file.
-
-markdown_extensions:
- - admonition # This is a plugin for adding colored boxes for notes, warnings, etc.
-
-plugins:
- - search
- - mermaid2
- # TODO: Overide theme to add authors and dates
- # - git-authors #https://github.com/timvink/mkdocs-git-authors-plugin
- # - git-revision-date-localized: #https://timvink.github.io/mkdocs-git-revision-date-localized-plugin/options/
- # type: timeago
- # custom_format: "%d. %B %Y"
- # timezone: Europe/Amsterdam
- # locale: en
- # fallback_to_build_date: false
- # enable_creation_date: true
- # exclude:
- # - index.md
- # enable_git_follow: true
- # enabled: true
- # strict: true
diff --git a/pyproject.toml b/pyproject.toml
index 848ed581..e0027d5b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,73 +1,25 @@
[project]
-name = "Autotech"
+name = "autotech"
version = "0.1.0"
-description = "Participation de INTech à CoVAPSy"
+description = "INTech participation to CoVAPSy"
readme = "README.md"
requires-python = ">=3.12,<3.14"
authors = [
+ # 2025 team
{ name = "Arnaud Costermans", email = "arnaud.costermans@intech-robotics.fr" },
{ name = "Matthias Bienvenu", email = "matthias.bienvenu@intech-robotics.fr" },
{ name = "Alexandre Naizondard", email = "alexandre.naizondard@telecom-sudparis.eu" },
{ name = "Antonin Fruchet", email = "antonin.fruchet@telecom-sudparis.eu" },
+ { name = "Mark Kirov", email = "mark.kirov@telecom-sudparis.eu" },
+ # 2026 team
{ name = "Cyril Bampeta", email = "cyril.bampeta@intech-robotics.fr" },
+ { name = "Robinson Cerceau", email = "robinson.cerceau@intech-robotics.fr" },
+ { name = "Nikopol Markgraf", email = "nikopol.markgra@intech-robotics.fr" },
+ { name = "Nachid Raslane", email = "nachid.raslane@intech-robotics.fr" },
+ { name = "Arthur Ripoll", email = "arthur.ripoll@intech-robotics.fr" },
+ { name = "Mohamed Benabadji", email = "mohamed.benabadji@intech-robotics.fr" },
]
-dependencies = [
-
-]
-
-[project.optional-dependencies]
-docs = [
- "mkdocs>=1.4.0",
- "mkdocs-mermaid2-plugin>=0.6.0",
- "mkdocs-git-revision-date-localized-plugin>=1.1.0",
- "mkdocs-git-authors-plugin>=0.6.0",
-]
-simu = [
- # Core scientific computing and data processing
- "numpy>=1.21.0",
- "matplotlib>=3.4.0",
-
- # Machine learning and AI
- "onnx>=1.8.0",
- "onnxruntime>=1.8.0",
- "torch>=1.9.0",
- "stable-baselines3>=1.6.0",
-
- # Used in the controller
- "psutil[simu]>=7.1.3",
-]
-rpi = [
- # Note: picamera2 causes UV to auto-include rpi extras even when not requested
- # This is likely a UV bug. Add back when needed:
- # "picamera2>=0.3.0",
- # AI Inference
- "onnxruntime>=1.8.0",
- # Raspberry Pi specific hardware control
- "rpi-hardware-pwm>=0.1.0",
- "RPi.GPIO>=0.7.1",
- "gpiozero>=1.6.0",
- # I2C and SPI communication (Linux specific)
- "smbus>=1.1.post2",
- "spidev>=3.5",
- # OLED display support
- "luma.oled>=3.8.0",
- "luma.core>=2.3.0",
- "Adafruit-SSD1306>=1.6.0",
- # Time-of-Flight sensor support
- "adafruit-circuitpython-vl53l0x>=3.6.0",
- "adafruit-blinka>=8.0.0",
- # PS4 controller support
- "pyPS4Controller>=1.2.0",
- "zmq>=0.0.0",
- "netifaces>=0.11.0",
- "matplotlib>=3.10.5",
- "opencv-python>=4.12.0.88",
- "scipy>=1.15.3",
- "websockets>=16.0",
- "fastapi>=0.128.0",
- "uvicorn>=0.40.0",
- "websocket>=0.2.1",
- "websockets>=16.0",
-]
+[tool.uv.workspace]
+members = ["src/high_level", "src/simulation"]
diff --git a/scripts/Simple.py b/scripts/Simple.py
deleted file mode 100644
index b7e26308..00000000
--- a/scripts/Simple.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from HL.Lidar import Lidar
-
-import time
-from rpi_hardware_pwm import HardwarePWM
-
-IP = '192.168.0.10'
-PORT = 10940
-
-#paramètres de la fonction vitesse_m_s
-direction_prop = 1 # -1 pour les variateurs inversés ou un petit rapport correspond à une marche avant
-pwm_stop_prop = 7.53
-point_mort_prop = 0.5
-delta_pwm_max_prop = 1.1 #pwm à laquelle on atteint la vitesse maximale
-
-vitesse_max_m_s_hard = 8 #vitesse que peut atteindre la voiture
-vitesse_max_m_s_soft = 2 #vitesse maximale que l'on souhaite atteindre
-
-
-#paramètres de la fonction set_direction_degre
-direction = 1 #1 pour angle_pwm_min a gauche, -1 pour angle_pwm_min à droite
-angle_pwm_min = 6.91 #min
-angle_pwm_max = 10.7 #max
-angle_pwm_centre= 8.805
-
-angle_degre_max = +18 #vers la gauche
-angle_degre=0
-
-pwm_prop = HardwarePWM(pwm_channel=0, hz=50, chip=2) #use chip 2 on pi 5 in accordance with the documentation
-pwm_prop.start(pwm_stop_prop)
-
-def set_vitesse_m_s(vitesse_m_s):
- if vitesse_m_s > vitesse_max_m_s_soft :
- vitesse_m_s = vitesse_max_m_s_soft
- elif vitesse_m_s < -vitesse_max_m_s_hard :
- vitesse_m_s = -vitesse_max_m_s_hard
- if vitesse_m_s == 0 :
- pwm_prop.change_duty_cycle(pwm_stop_prop)
- elif vitesse_m_s > 0 :
- vitesse = vitesse_m_s * (delta_pwm_max_prop)/vitesse_max_m_s_hard
- pwm_prop.change_duty_cycle(pwm_stop_prop + direction_prop*(point_mort_prop + vitesse ))
- elif vitesse_m_s < 0 :
- vitesse = vitesse_m_s * (delta_pwm_max_prop)/vitesse_max_m_s_hard
- pwm_prop.change_duty_cycle(pwm_stop_prop - direction_prop*(point_mort_prop - vitesse ))
-
-def recule():
- set_vitesse_m_s(-vitesse_max_m_s_hard)
- time.sleep(0.2)
- set_vitesse_m_s(0)
- time.sleep(0.2)
- set_vitesse_m_s(-1)
-
-pwm_dir = HardwarePWM(pwm_channel=1,hz=50,chip=2) #use chip 2 on pi 5 in accordance with the documentation
-pwm_dir.start(angle_pwm_centre)
-
-def set_direction_degre(angle_degre) :
- global angle_pwm_min
- global angle_pwm_max
- global angle_pwm_centre
- angle_pwm = angle_pwm_centre + direction * (angle_pwm_max - angle_pwm_min) * angle_degre /(2 * angle_degre_max )
- if angle_pwm > angle_pwm_max :
- angle_pwm = angle_pwm_max
- if angle_pwm < angle_pwm_min :
- angle_pwm = angle_pwm_min
- pwm_dir.change_duty_cycle(angle_pwm)
-
-#connexion et démarrage du lidar
-lidar = Lidar(IP, PORT)
-lidar.stop()
-lidar.startContinuous(0, 1080)
-
-
-tableau_lidar_mm = [0]*360 #création d'un tableau de 360 zéros
-
-time.sleep(1) #temps de démarrage du lidar
-
-try :
- while True :
- for angle in range(len(tableau_lidar_mm)) :
- # translation of the angle from the lidar to the angle of the table
- if angle > 135 and angle < 225:
- tableau_lidar_mm[angle] = float('nan')
- else:
- tableau_lidar_mm[angle] = lidar.rDistance[540 + (-angle * 4)]
- #l'angle de la direction est la différence entre les mesures
- #des rayons du lidar à -60 et +60°
-
- angle_degre = 0.02*(tableau_lidar_mm[60]-tableau_lidar_mm[-60])
- print(tableau_lidar_mm[60], tableau_lidar_mm[-60], angle_degre)
- set_direction_degre(angle_degre)
- vitesse_m_s = 0.05
- set_vitesse_m_s(vitesse_m_s)
- time.sleep(0.1)
- ##############################################
-except KeyboardInterrupt: #récupération du CTRL+C
- vitesse_m_s = 0
- set_vitesse_m_s(vitesse_m_s)
- print("fin des acquisitions")
-
-#arrêt et déconnexion du lidar et des moteurs
-lidar.stop()
-pwm_dir.stop()
-pwm_prop.start(pwm_stop_prop)
-
-
diff --git a/scripts/displayvoltage.py b/scripts/displayvoltage.py
deleted file mode 100644
index 1040c650..00000000
--- a/scripts/displayvoltage.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import time
-from luma.core.interface.serial import i2c
-from luma.core.render import canvas
-from luma.oled.device import ssd1306
-from PIL import Image, ImageDraw, ImageFont
-import struct
-import smbus #type: ignore #ignore the module could not be resolved error because it is a linux only module
-
-bus = smbus.SMBus(1) # 1 indicates /dev/i2c-1
-
-# I2C address of the slave
-SLAVE_ADDRESS = 0x08
-# I2C configuration
-serial = i2c(port=1, address=0x3C)
-oled_display = ssd1306(serial)
-font = ImageFont.load_default()
-
-def write_data(data):
- # Convert string to list of ASCII values
- data_list = [ord(char) for char in data]
- bus.write_i2c_block_data(SLAVE_ADDRESS, 0, data_list)
-
-def read_data(num_floats=3):
-
- # Each float is 4 bytes
- length = num_floats * 4
- # Read a block of data from the slave
- data = bus.read_i2c_block_data(SLAVE_ADDRESS, 0, length)
- # Convert the byte data to floats
- if len(data) >= length:
- float_values = struct.unpack('f' * num_floats, bytes(data[:length]))
- return list(float_values)
- else:
- raise ValueError("Not enough data received from I2C bus")
-
-def displayvoltage():
- received = read_data(2) # Adjust length as needed
- received= [ round(elem, 2) for elem in received ]
- for i in range(len(received)):
- if received[i] < 6:
- received[i] = 0.0
- # print(f"Received from slave: {received}")
- with canvas(oled_display) as draw:
- display_height = oled_display.height
- text = f"LiP:{received[0]:.2f}V|NiH:{received[1]:.2f}V"
- # _, text_height = draw.textsize(text, font=font) # Get the width and height of the text
- # text_height = draw.textlength(text, font=font, direction="ttb")
- text_height = 11
- print(f"Text height: {text_height}")
- text_y_position = display_height - text_height # Position text at the bottom
- draw.text((0, text_y_position), text, fill="white", font=font)
-
-
-if __name__ == "__main__":
- while True:
- displayvoltage()
-
\ No newline at end of file
diff --git a/scripts/launch_train_multiprocessing.py b/scripts/launch_train_multiprocessing.py
deleted file mode 100644
index 6ec5ba8a..00000000
--- a/scripts/launch_train_multiprocessing.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import logging
-import os
-import sys
-
-from typing import *
-
-import torch.nn as nn
-
-from stable_baselines3 import PPO
-from stable_baselines3.common.vec_env import SubprocVecEnv
-
-simu_path = __file__.rsplit('/', 2)[0] + '/src/Simulateur'
-if simu_path not in sys.path:
- sys.path.insert(0, simu_path)
-
-from config import *
-from TemporalResNetExtractor import TemporalResNetExtractor
-from CNN1DResNetExtractor import CNN1DResNetExtractor
-from onnx_utils import *
-
-from WebotsSimulationGymEnvironment import WebotsSimulationGymEnvironment
-if LOG_LEVEL == logging.DEBUG: from DynamicActionPlotCallback import DynamicActionPlotDistributionCallback
-
-
-if __name__ == "__main__":
-
- if not os.path.exists("/tmp/autotech/"):
- os.mkdir("/tmp/autotech/")
-
- os.system('if [ -n "$(ls /tmp/autotech)" ]; then rm /tmp/autotech/*; fi')
-
-
- def make_env(simulation_rank: int, vehicle_rank: int):
- if LOG_LEVEL == logging.DEBUG:
- print("CAREFUL !!! created an SERVER env with {simulation_rank}_{vehicle_rank}")
- return WebotsSimulationGymEnvironment(simulation_rank, vehicle_rank)
-
- envs = SubprocVecEnv([lambda simulation_rank=simulation_rank, vehicle_rank=vehicle_rank : make_env(simulation_rank, vehicle_rank) for vehicle_rank in range(n_vehicles) for simulation_rank in range(n_simulations)])
-
- ExtractorClass = CNN1DResNetExtractor
-
- policy_kwargs = dict(
- features_extractor_class=ExtractorClass,
- features_extractor_kwargs=dict(
- context_size=context_size,
- lidar_horizontal_resolution=lidar_horizontal_resolution,
- camera_horizontal_resolution=camera_horizontal_resolution,
- device=device
- ),
- activation_fn=nn.ReLU,
- net_arch=[512, 512, 512],
- )
-
-
- ppo_args = dict(
- n_steps=4096,
- n_epochs=10,
- batch_size=256,
- learning_rate=3e-4,
- gamma=0.99,
- verbose=1,
- normalize_advantage=True,
- device=device
- )
-
-
- save_path = __file__.rsplit("/", 1)[0] + "/checkpoints/" + ExtractorClass.__name__ + "/"
- os.makedirs(save_path, exist_ok=True)
-
-
- print(save_path)
- print(os.listdir(save_path))
-
- valid_files = [x for x in os.listdir(save_path) if x.rstrip(".zip").isnumeric()]
-
- if valid_files:
- model_name = max(
- valid_files,
- key=lambda x : int(x.rstrip(".zip"))
- )
- print(f"Loading model {save_path + model_name}")
- model = PPO.load(
- save_path + model_name,
- envs,
- **ppo_args,
- policy_kwargs=policy_kwargs
- )
- i = int(model_name.rstrip(".zip")) + 1
- print(f"----- Model found, loading {model_name} -----")
-
- else:
- model = PPO(
- "MlpPolicy",
- envs,
- **ppo_args,
- policy_kwargs=policy_kwargs
- )
-
- i = 0
- print("----- Model not found, creating a new one -----")
-
- print("MODEL HAS HYPER PARAMETERS:")
- print(f"{model.learning_rate=}")
- print(f"{model.gamma=}")
- print(f"{model.verbose=}")
- print(f"{model.n_steps=}")
- print(f"{model.n_epochs=}")
- print(f"{model.batch_size=}")
- print(f"{model.device=}")
-
- print("SERVER : finished executing")
-
- # obs = envs.reset()
- # while True:
- # action, _states = model.predict(obs, deterministic=True) # Use deterministic=True for evaluation
- # obs, reward, done, info = envs.step(action)
- # envs.render() # Optional: visualize the environment
-
-
- while True:
- export_onnx(model)
- test_onnx(model)
-
- if LOG_LEVEL <= logging.DEBUG:
- model.learn(total_timesteps=500_000, callback=DynamicActionPlotDistributionCallback())
- else:
- model.learn(total_timesteps=500_000)
-
- print("iteration over")
-
- model.save(save_path + str(i))
-
- i += 1
diff --git a/scripts/tune_pwm_direction.py b/scripts/tune_pwm_direction.py
deleted file mode 100644
index db7b9f71..00000000
--- a/scripts/tune_pwm_direction.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from rpi_hardware_pwm import HardwarePWM
-import time
-
-#paramètres de départ, avec des butées très proche du centre
-direction = -1 #1 pour angle_pwm_min a gauche, -1 pour angle_pwm_min à droite
-angle_pwm_min = 6.91 #min
-angle_pwm_max = 10.7 #max
-angle_pwm_centre= 8.805
-
-angle_degre_max = +18 #vers la gauche
-angle_degre=0
-
-pwm_dir = HardwarePWM(pwm_channel=1,hz=50,chip=2) #use chip 2 on pi 5 in accordance with the documentation
-pwm_dir.start(angle_pwm_centre)
-
-def set_direction_degre(angle_degre) :
- global angle_pwm_min
- global angle_pwm_max
- global angle_pwm_centre
- angle_pwm = angle_pwm_centre + direction * (angle_pwm_max - angle_pwm_min) * angle_degre /(2 * angle_degre_max )
- if angle_pwm > angle_pwm_max :
- angle_pwm = angle_pwm_max
- if angle_pwm < angle_pwm_min :
- angle_pwm = angle_pwm_min
- pwm_dir.change_duty_cycle(angle_pwm)
-
-print("réglage des butées, Q pour quitter")
-print("valeur numérique pour tester un angle de direction")
-print("I pour inverser droite et gauche")
-print("g pour diminuer la butée gauche et G pour l'augmenter")
-print("d pour diminuer la butée droite et D pour l'augmenter")
-
-while True :
- a = input("angle, I, g, G, d, D ?")
- try :
- angle_degre=int(a)
- set_direction_degre(angle_degre)
- except :
- if a == "I" :
- direction = -direction
- print("nouvelle direction : " + str(direction))
- elif a == "g" :
- if direction == 1 :
- angle_pwm_max -=0.1
- print("nouvelle butée gauche : " + str(angle_pwm_max))
- else :
- angle_pwm_min +=0.1
- print("nouvelle butée gauche : " + str(angle_pwm_min))
- angle_pwm_centre = (angle_pwm_max+angle_pwm_min)/2
- set_direction_degre(18)
- elif a == "G" :
- if direction == 1 :
- angle_pwm_max +=0.1
- print("nouvelle butée gauche : " + str(angle_pwm_max))
- else :
- angle_pwm_min -=0.1
- print("nouvelle butée gauche : " + str(angle_pwm_min))
- angle_pwm_centre = (angle_pwm_max+angle_pwm_min)/2
- set_direction_degre(18)
- elif a == "d" :
- if direction == -1 :
- angle_pwm_max -=0.1
- print("nouvelle butée droite : " + str(angle_pwm_max))
- else :
- angle_pwm_min +=0.1
- print("nouvelle butée droite : " + str(angle_pwm_min))
- angle_pwm_centre = (angle_pwm_max+angle_pwm_min)/2
- set_direction_degre(-18)
- elif a == "D" :
- if direction == -1 :
- angle_pwm_max +=0.1
- print("nouvelle butée droite : " + str(angle_pwm_max))
- else :
- angle_pwm_min -=0.1
- print("nouvelle butée droite : " + str(angle_pwm_min))
- angle_pwm_centre = (angle_pwm_max+angle_pwm_min)/2
- set_direction_degre(-18)
- else :
- break
-
-print("nouvelles valeurs")
-print("direction : " + str(direction))
-print("angle_pwm_min : " + str(angle_pwm_min))
-print("angle_pwm_max : " + str(angle_pwm_max))
-print("angle_pwm_centre : " + str(angle_pwm_centre))
\ No newline at end of file
diff --git a/scripts/tune_pwm_propulsion.py b/scripts/tune_pwm_propulsion.py
deleted file mode 100644
index 5bb65a4c..00000000
--- a/scripts/tune_pwm_propulsion.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from rpi_hardware_pwm import HardwarePWM
-import time
-import sys
-import os
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-from src.HL.Autotech_constant import PWM_PROP
-# ...existing code...
-
-#paramètres de la fonction vitesse_m_s, à étalonner
-direction_prop = PWM_PROP["direction_prop"] # 1 pour les variateurs à droite ou un petit rapport correspond à une marche avant
-pwm_stop_prop = PWM_PROP["pwm_stop_prop"]
-point_mort_prop = PWM_PROP["point_mort_prop"]
-delta_pwm_max_prop = PWM_PROP["delta_pwm_max_prop"] #pwm à laquelle on atteint la vitesse maximale
-
-vitesse_max_m_s_hard = 8 #vitesse que peut atteindre la voiture
-vitesse_max_m_s_soft = 2 #vitesse maximale que l'on souhaite atteindre
-
-pwm_prop = HardwarePWM(pwm_channel=0, hz=50, chip=2) #use chip 2 on pi 5 in accordance with the documentation
-pwm_prop.start(pwm_stop_prop)
-
-def set_vitesse_m_s(vitesse_m_s):
- if vitesse_m_s > vitesse_max_m_s_soft :
- vitesse_m_s = vitesse_max_m_s_soft
- elif vitesse_m_s < -vitesse_max_m_s_hard :
- vitesse_m_s = -vitesse_max_m_s_hard
- if vitesse_m_s == 0 :
- pwm_prop.change_duty_cycle(pwm_stop_prop)
- elif vitesse_m_s > 0 :
- vitesse = vitesse_m_s * (delta_pwm_max_prop)/vitesse_max_m_s_hard
- pwm_prop.change_duty_cycle(pwm_stop_prop + direction_prop*(point_mort_prop + vitesse ))
- elif vitesse_m_s < 0 :
- vitesse = vitesse_m_s * (delta_pwm_max_prop)/vitesse_max_m_s_hard
- pwm_prop.change_duty_cycle(pwm_stop_prop - direction_prop*(point_mort_prop - vitesse ))
-
-def recule():
- set_vitesse_m_s(-vitesse_max_m_s_hard)
- time.sleep(0.2)
- set_vitesse_m_s(0)
- time.sleep(0.2)
- set_vitesse_m_s(-1)
-
-print("réglage des butées, Q pour quitter")
-print("valeur numérique pour tester une vitesse en mm/s")
-print("R pour reculer")
-print("I pour inverser droite et gauche")
-print("p pour diminuer delta_pwm_max_prop et P pour l'augmenter")
-print("z pour diminuer le point zéro 1,5 ms et Z pour l'augmenter")
-print("m pour diminuer le point mort et M pour l'augmenter")
-
-
-while True :
- a = input("vitesse en mm/s, R, I, p, P, z, Z, m, M")
- try :
- vitesse_mm_s=int(a)
- set_vitesse_m_s(vitesse_mm_s/1000.0)
- except :
- if a == "I" or a == "i" :
- direction_prop = -direction_prop
- print("nouvelle direction : " + str(direction_prop))
- elif a == "R" :
- recule()
- print("recule")
- elif a == "p" :
- delta_pwm_max_prop -=0.1
- print("nouveau delta_pwm_max_prop : " + str(delta_pwm_max_prop))
- pwm_prop.change_duty_cycle(pwm_stop_prop + direction_prop*(point_mort_prop+delta_pwm_max_prop))
- elif a == "P" :
- delta_pwm_max_prop +=0.1
- print("nouveau delta_pwm_max_prop : " + str(delta_pwm_max_prop))
- pwm_prop.change_duty_cycle(pwm_stop_prop + direction_prop*(point_mort_prop+delta_pwm_max_prop))
- elif a == "z" :
- pwm_stop_prop -=0.01
- print("nouveau pwm_stop_prop : " + str(pwm_stop_prop))
- pwm_prop.change_duty_cycle(pwm_stop_prop)
- elif a == "Z" :
- pwm_stop_prop +=0.01
- print("nouveau pwm_stop_prop : " + str(pwm_stop_prop))
- pwm_prop.change_duty_cycle(pwm_stop_prop)
- elif a == "m" :
- point_mort_prop -=0.01
- print("nouveau point_mort_prop : " + str(point_mort_prop))
- pwm_prop.change_duty_cycle(pwm_stop_prop + direction_prop*(point_mort_prop))
- elif a == "M" :
- point_mort_prop +=0.01
- print("nouveau point_mort_prop : " + str(point_mort_prop))
- pwm_prop.change_duty_cycle(pwm_stop_prop + direction_prop*(point_mort_prop))
- else :
- break
-
-pwm_prop.change_duty_cycle(pwm_stop_prop)
-print("nouvelles valeurs")
-print("direction : " + str(direction_prop))
-print("delta_pwm_max_prop : " + str(delta_pwm_max_prop))
-print("point zero 1,5 ms : "+ str(pwm_stop_prop))
-print("point mort : " + str(point_mort_prop))
\ No newline at end of file
diff --git a/src/HL/README.md b/src/HL/README.md
deleted file mode 100644
index 42c2e728..00000000
--- a/src/HL/README.md
+++ /dev/null
@@ -1 +0,0 @@
-[HokuyoReader.py](HokuyoReader.py) now Lidar.py is included with permission from the author [cassc](https://gist.github.com/cassc/26ac479624cb028b2567491a68c34fb8) who graciously released his code under MIT.
\ No newline at end of file
diff --git a/src/HL/Serveur_mq.py b/src/HL/Serveur_mq.py
deleted file mode 100644
index 94496612..00000000
--- a/src/HL/Serveur_mq.py
+++ /dev/null
@@ -1,282 +0,0 @@
-import zmq
-context = zmq.Context()
-import cv2
-import time
-import threading
-import smbus
-import logging
-import struct
-import os, signal
-
-
-from luma.core.interface.serial import i2c
-from luma.core.render import canvas
-from luma.oled.device import ssd1306
-from PIL import Image, ImageDraw, ImageFont
-from gpiozero import LED, Button, Buzzer
-import textwrap
-import socket
-
-from src.HL.programme.scripts.get_ip import check_ssh_connections
-import subprocess
-from src.HL.actionneur_capteur.Lidar import Lidar
-from src.HL.actionneur_capteur.Camera import Camera
-from src.HL.actionneur_capteur.ToF import ToF
-from src.HL.actionneur_capteur.masterI2C import I2c_arduino
-from Autotech_constant import SOCKET_ADRESS, SLAVE_ADDRESS
-
-#différent programme
-from scripts.commande_PS4 import PS4ControllerProgram
-from src.HL.programme.SshProgramme import SshProgramme
-from src.HL.programme.RemoteControl import RemoteControl
-from src.HL.programme.Poweroff import Poweroff
-from src.HL.actionneur_capteur.Camera import ProgramStreamCamera
-from src.HL.programme.module_initialisation import Initialisation
-from src.HL.programme.Car import Ai_Programme
-from backend import BackendAPI
-
-from Autotech_constant import I2C_NUMBER_DATA_RECEIVED, I2C_SLEEP_RECEIVED, I2C_SLEEP_ERROR_LOOP, TEXT_HEIGHT, TEXT_LEFT_OFFSET
-
-
-# on utilise tcp pour les infos des différents informations
-telemetry = context.socket(zmq.REP)
-telemetry.bind("tcp://0.0.0.0:5557")
-
-
-class Serveur():
-
- def __init__(self):
- self.log = logging.getLogger(__name__)
- #initialisation des différents module qui tourne tout le temps
- self.log.info("Initialisation du serveur")
-
- # initialisation des boutons et de l'i2c
- self.bp_next = Button("GPIO5", bounce_time=0.1)
- self.bp_entre = Button("GPIO6", bounce_time=0.1)
-
- self.led1 = LED("GPIO17")
- self.led2 = LED("GPIO27")
- self.buzzer = Buzzer("GPIO26")
- self.log.info("GPIO: boutons, LEDs, buzzer initialisés")
-
- self.serial = i2c(port=1, address=0x3C)
- self.device = ssd1306(self.serial)
- self.bus = smbus.SMBus(1) # 1 indicates /dev/i2c-1
- self.log.info("I2C: bus ouvert sur /dev/i2c-1")
-
- self.length_i2c_received = I2C_NUMBER_DATA_RECEIVED #nombre de donnée à récupéré de l'arduino (voltage lipo, voltage nimh)
-
- # initialisation des commande de temps
- self.initial_time = time.time()
- self.last_cmd_time = time.time()
-
- #donnée des process
- self.process_output = ""
- self.last_programme_control = 0
- self.process = None
- self.temp = None
-
- self.initialisation_module = Initialisation(self,Camera,Lidar,ToF, I2c_arduino)
-
- self.programme = [SshProgramme(),
- self.initialisation_module,
- Ai_Programme(self),
- PS4ControllerProgram(),
- RemoteControl(),
- ProgramStreamCamera(self),
- BackendAPI(self, host="0.0.0.0", port=8001, site_dir="/home/intech/CoVAPSy/src/HL/site_controle"),
- Poweroff()]
- self.log.debug("Programmes chargés: %s", [type(p).__name__ for p in self.programme])
-
- # donnée de l'écran
- self.Screen = 0
- self.State = 0
- self.scroll_offset = 3
-
-
- @property
- def camera(self):
- return self.initialisation_module.camera
-
- @property
- def lidar(self):
- return self.initialisation_module.lidar
-
- @property
- def tof(self):
- return self.initialisation_module.tof
-
- @property
- def arduino_I2C(self):
- return self.initialisation_module.arduino_I2C
-
- @property
- def target_speed(self):
- return self.programme[self.last_programme_control].target_speed
-
- @property
- def direction(self):
- return self.programme[self.last_programme_control].direction
-
- #-----------------------------------------------------------------------------------------------------
- # affichage de l'écrans
- #-----------------------------------------------------------------------------------------------------
- def affichage_oled(self,selected): #test non utilisé
- im = Image.new("1", (128, 64), "black")
- draw = ImageDraw.Draw(im)
- font = ImageFont.load_default()
-
- for num, i in enumerate(range(max(selected - self.scroll_offset, 0), min(len(self.programme), selected + self.scroll_offset))):
- y = num * TEXT_HEIGHT
-
- if i == selected:
- draw.rectangle((0, y, 127, y + TEXT_HEIGHT), fill="white")
- draw.text((3, y), self.programme[i]["name"], fill="black", font=font)
- else:
- draw.text((3, y), self.programme[i]["name"], fill="white", font=font)
-
- with canvas(self.device) as display:
- display.bitmap((0, 0), im, fill="white")
-
- def make_voltage_im(self):
- """crée l'image de la derniére ligne qui affiche le voltage des deux batterie de la pi en temps réel"""
- if self.arduino_I2C is not None:
- received = [self.arduino_I2C.voltage_lipo , self.arduino_I2C.voltage_nimh]
- else:
- received = [0.0, 0.0]
-
- # filter out values below 6V and round to 2 decimal places
- received = [round(elem, 2) if elem > 6 else 0.0 for elem in received]
- text = f"LiP:{received[0]:.2f}V|NiH:{received[1]:.2f}V"
- im = Image.new("1", (128, TEXT_HEIGHT), "black")
- draw = ImageDraw.Draw(im)
- font = ImageFont.load_default()
- draw.text((3, 0), text, fill="white", font=font)
- return im
-
- def display_combined_im(self,text):
- """ fonction qui écris sur l'écran le texte qu'on lui fourni (et remet par dessus toujours le voltage des batteries)"""
- im = Image.new("1", (128, 64), "black")
- draw = ImageDraw.Draw(im)
- font = ImageFont.load_default()
-
- # Wrap the text to fit within the width of the display
- wrapped_text = textwrap.fill(text, width=20) # Adjust width as needed
- draw.text((3, 0), wrapped_text, fill="white", font=font)
-
- voltage_im = self.make_voltage_im()
- im.paste(voltage_im, (0, 64 - TEXT_HEIGHT))
-
- with canvas(self.device) as draw:
- draw.bitmap((0, 0), im, fill="white")
-
-
- def Idle(self):
- """
- gére l'affichage de l'écrans en fonction des fonction en cour ou choisie.
- le changement d'écran est géré par les fonction des boutons juste en dessous
- """
- if check_ssh_connections():
- self.led1.on()
-
- if not check_ssh_connections():
- self.led1.off()
-
- if (self.Screen < len(self.programme)):
- text = self.programme[self.Screen].display()
- self.display_combined_im(text)
-
- def bouton_next(self):
- """ passe à l'écrans suivant (juste visuelle)"""
- self.Screen+=1
- if self.Screen>=len(self.programme):
- self.Screen=0
-
- def bouton_entre(self,num=None):
- """séléctionne le programme afficher à l'acrans et le lance"""
- if num!=None:
- self.Screen = num
- self.State=self.Screen
- self.start_process(self.Screen)
-
-
- #---------------------------------------------------------------------------------------------------
- # Processus
- #---------------------------------------------------------------------------------------------------
-
-
- def start_process(self,num_programme):
- """lance le porgramme référencé avec son numéro:
- si il sagit d'un programme qui controle la voiture il kill lancient programme qui controlé,
- sinon le programme est lancé ou tué celon si il était déjà lancé ou tué avant"""
- self.log.info("Action utilisateur: programme %d (%s)",
- num_programme,
- type(self.programme[num_programme]).__name__)
- if self.programme[num_programme].running:
- self.programme[num_programme].kill()
- if self.programme[num_programme].controls_car:
- self.last_programme_control = 0
- self.log.warning(
- "Changement de contrôle voiture: %s -> %s",
- type(self.programme[num_programme]).__name__,
- type(self.programme[self.last_programme_control]).__name__
- )
-
-
- self.log.info("Arrêt du programme %s",
- type(self.programme[num_programme]).__name__)
-
- elif self.programme[num_programme].controls_car:
- self.programme[self.last_programme_control].kill()
- self.programme[num_programme].start()
- self.log.warning(
- "Changement de contrôle voiture: %s -> %s",
- type(self.programme[self.last_programme_control]).__name__,
- type(self.programme[num_programme]).__name__
- )
- self.last_programme_control = num_programme
-
-
- else:
- self.programme[num_programme].start()
-
-
- #---------------------------------------------------------------------------------------------------
- # car function
- #---------------------------------------------------------------------------------------------------
-
- def main(self):
- self.bp_next.when_pressed = self.bouton_next
- self.bp_entre.when_pressed = self.bouton_entre
-
- self.log.info("Serveur démarré, entrée dans la boucle principale")
-
- while True:
- self.Idle()
-
-#---------------------------------------------------------------------------------------------------
-# main
-#---------------------------------------------------------------------------------------------------
-
-if __name__ == "__main__":
-
- logging.basicConfig(
- level=logging.INFO,
- format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
- handlers=[
- logging.FileHandler("/home/intech/CoVAPSy/covapsy.log"),
- logging.StreamHandler()
- ]
-
- )
- log_serveur = logging.getLogger("__main__")
- log_serveur.setLevel(level=logging.DEBUG)
-
- log_serveur = logging.getLogger("src.HL")
- log_serveur.setLevel(level=logging.DEBUG)
-
- log_lidar = logging.getLogger("src.HL.actionneur_capteur.Lidar")
- log_lidar.setLevel(level=logging.INFO)
-
- boot = Serveur()
- boot.main()
diff --git a/src/HL/__init__.py b/src/HL/__init__.py
deleted file mode 100644
index 93d8406c..00000000
--- a/src/HL/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-#Fuck la gestion de module de python
-# On secod thought, pas sure que __init__.py soit necessaire
\ No newline at end of file
diff --git a/src/HL/programme/programme.py b/src/HL/programme/programme.py
deleted file mode 100644
index 9530372f..00000000
--- a/src/HL/programme/programme.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from ..Autotech_constant import LOGGING_LEVEL
-import logging
-from typing import Optional
-
-""" classe type pour tout les programme """
-class Program:
- controls_car:bool # correspond si le programme cotronle la voiture ou non pour savoir si il faut arreter l'ancien programme qui controle la voiture
- running:bool # état de base du programme (au lancement de la voiture)
- target_speed:Optional[float] # target speed
- direction:Optional[float] # target direction
-
- def __init__(self):
- self.logger = logging.getLogger(__name__)
- self.logger.setLevel(LOGGING_LEVEL)
-
- def kill(self):
- pass
-
- def start(self):
- pass
-
- def display(self):
- name = self.__class__.__name__
- if (self.running):
- return f"{name} \n (running)"
- else:
- return name
-
\ No newline at end of file
diff --git a/src/HL/programme/scripts/get_ip.py b/src/HL/programme/scripts/get_ip.py
deleted file mode 100644
index 1ca3662c..00000000
--- a/src/HL/programme/scripts/get_ip.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import netifaces as ni #netifaces is abondened, so I use netifaces2
-import subprocess
-
-def get_ip(device='wlan0'):
- ip = ni.ifaddresses(device)[ni.AF_INET][0]['addr'] #note this only gets the first IP address of the interface. An Interface can have sevral IP addresses
- return ip
-
-
-def check_ssh_connections():
- result = subprocess.run(['who'], stdout=subprocess.PIPE)
- output = result.stdout.decode('utf-8')
- ssh_connections = [line for line in output.split('\n') if 'pts/' in line]
- return len(ssh_connections) > 0 #bool
-if __name__ == "__main__":
- if check_ssh_connections():
- print("There are active SSH connections.")
- else:
- print("No active SSH connections.")
\ No newline at end of file
diff --git "a/src/HL/site_controle/ressources/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_transparent.png" "b/src/HL/site_controle/ressources/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_transparent.png"
deleted file mode 100644
index 3a478e11..00000000
Binary files "a/src/HL/site_controle/ressources/Logo_\303\251quipe N\302\26020_AutoTech 2025 (long)_promo Ecole_transparent.png" and /dev/null differ
diff --git a/src/Simulateur/.idea/.gitignore b/src/Simulateur/.idea/.gitignore
deleted file mode 100644
index 13566b81..00000000
--- a/src/Simulateur/.idea/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-# Default ignored files
-/shelf/
-/workspace.xml
-# Editor-based HTTP Client requests
-/httpRequests/
-# Datasource local storage ignored files
-/dataSources/
-/dataSources.local.xml
diff --git a/src/Simulateur/.idea/Simulateur.iml b/src/Simulateur/.idea/Simulateur.iml
deleted file mode 100644
index 88391539..00000000
--- a/src/Simulateur/.idea/Simulateur.iml
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/Simulateur/.idea/inspectionProfiles/profiles_settings.xml b/src/Simulateur/.idea/inspectionProfiles/profiles_settings.xml
deleted file mode 100644
index 105ce2da..00000000
--- a/src/Simulateur/.idea/inspectionProfiles/profiles_settings.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/Simulateur/.idea/misc.xml b/src/Simulateur/.idea/misc.xml
deleted file mode 100644
index d9c6cb89..00000000
--- a/src/Simulateur/.idea/misc.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/Simulateur/.idea/modules.xml b/src/Simulateur/.idea/modules.xml
deleted file mode 100644
index 276197d7..00000000
--- a/src/Simulateur/.idea/modules.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/Simulateur/.idea/vcs.xml b/src/Simulateur/.idea/vcs.xml
deleted file mode 100644
index 6c0b8635..00000000
--- a/src/Simulateur/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/Simulateur/Apprentissage par renforcement Webots Gym StableBaselines 2022/controllers/controller_Gym/controller_Gym.py b/src/Simulateur/Apprentissage par renforcement Webots Gym StableBaselines 2022/controllers/controller_Gym/controller_Gym.py
deleted file mode 100644
index 4ed5f7fa..00000000
--- a/src/Simulateur/Apprentissage par renforcement Webots Gym StableBaselines 2022/controllers/controller_Gym/controller_Gym.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""Voiture autonome avec utilisation d'un
-LIDAR sur WEBOTS
-Auteur: Chrysanthe et Jessica
-"""
-
-import numpy as np
-import random
-import gymnasium as gym
-import time
-
-from stable_baselines3 import PPO
-from stable_baselines3.common.env_checker import check_env
-from stable_baselines3.common.vec_env import SubprocVecEnv
-
-from vehicle import Driver
-from controller import Lidar
-from controller import Field
-from controller import Supervisor
-
-
-#--------------GYM----------------------------
-
-#Création de l'environnement GYM
-class WebotsGymEnvironment(gym.Env):
- def __init__(self):
- #Initialisation du driver
- self.driver = Driver()
-
- basicTimeStep = int(self.driver.getBasicTimeStep())
- self.sensorTime = basicTimeStep//4
-
- #Paramètre de la voiture (position, etc..)
- self.voiture_robot = self.driver.getFromDef("vehicle")
-
- #Lidar
- self.lidar = self.driver.getDevice('lidar')
- self.lidar.enable(self.sensorTime)
- self.lidar.enablePointCloud()
-
- #Capteur de distance
- self.capteur_avant = self.driver.getDevice('front_center_sensor')
- self.capteur_gauche = self.driver.getDevice('side_left_sensor')
- self.capteur_droite = self.driver.getDevice('side_right_sensor')
- self.capteur_avant.enable(self.sensorTime)
- self.capteur_gauche.enable(self.sensorTime)
- self.capteur_droite.enable(self.sensorTime)
-
- #Capteur de balise
- self.capteur_balise = self.driver.getDevice('capteur_balise')
- self.capteur_balise.enable(self.sensorTime)
-
- self.action_space = gym.spaces.Discrete(5) #actions disponibles
- min = np.zeros(self.lidar.getNumberOfPoints())
- max = np.ones(self.lidar.getNumberOfPoints())
- self.observation_space = gym.spaces.Box(min, max, dtype=np.float32) #Etat venant du LIDAR
-
- self.trans_champs = self.voiture_robot.getField("translation")
- self.rot_champs = self.voiture_robot.getField("rotation") # idk why but if this goes befor Lidar it will not work
-
-
- #Vérification de l'état de la voiture
- def observe(self):
- try:
- tableau = self.lidar.getRangeImage()
- #Division par 10 pour que la valeur soient entre 0 et 1
- etat = np.array(tableau, dtype=np.float32)/10
- except: #En cas de non retour lidar
- print("Pas de retour du lidar")
- etat = np.zeros(self.lidar.getNumberOfPoints(), dtype=np.float32)
-
- return etat
-
- #Remise à 0 pour l'environnement GYM
- def reset(self, seed=0):
- #self.capteur_avant.disable()
- #self.capteur_gauche.disable()
- #self.capteur_droite.disable()
-
- #Valeur aléatoire
- x = random.uniform(1.5, 1.65)
- y = random.uniform(3.66, 3.8)
-
- #Fonction d'initialisation
- INITIAL_trans = [x, y, 0.0195182]
- INITIAL_rot=[-0.000257, 0.000618, 1 , -0.784]
- self.trans_champs.setSFVec3f(INITIAL_trans)
- self.rot_champs.setSFRotation(INITIAL_rot)
-
- time.sleep(0.3) #Temps de pause après réinitilialisation
-
- #self.capteur_avant.enable(self.sensorTime)
- #self.capteur_gauche.enable(self.sensorTime)
- #self.capteur_droite.enable(self.sensorTime)
- #Retour état
- obs = self.observe()
- #super().step()
- info = {}
- return obs, info
-
- #Fonction step de l'environnement GYM
- def step(self, action):
- self.driver.setSteeringAngle([-.4, -.1, 0, .1, .4][action])
- self.driver.setCruisingSpeed(1.0)
-
- obs = self.observe()
-
- reward = 0
- done = False
- truncated = False
-
- avant = self.capteur_avant.getValue()
- gauche = self.capteur_gauche.getValue()
- droite = self.capteur_droite.getValue()
- balise = self.capteur_balise.getValue()
-
- if avant >= 900 and not(done):
- print("Collision avant")
- reward = -100
- done = True
- elif ((avant >= 854 and gauche >= 896) or (avant >= 696 and gauche >= 910) or gauche >= 937) and not(done):
- print("Collision gauche")
- reward = -100
- done = True
- elif ((avant >= 850 and droite >= 893) or (avant >= 584 and droite >= 910) or droite >= 961) and not(done):
- print("Collision droite")
- reward = -100
- done = True
- elif balise > 700:
- done = False
- print("Balise passée")
- reward = 20
- else:
- done = False
- reward = 0
-
- self.driver.step()
-
- return obs, reward, done, truncated, {}
-
- #Fonction render de l'environnement GYM
- def render(self, mode="human", close=False):
- pass
-
-
-#----------------Programme principal--------------------
-def main():
- n_vehicles = 4
- # env = SubprocVecEnv([lambda: WebotsGymEnvironment() for i in range(n_vehicles)])
- env = WebotsGymEnvironment()
- check_env(env)
-
- logdir = "./Webots_tb/"
- #-- , tensorboard_log = logdir -- , tb_log_name = "PPO_voiture_webots"
-
- #Définition modèle avec paramètre par défaut
- model = PPO('MlpPolicy', env,
- n_steps=2048,
- n_epochs=10,
- batch_size=32,
- learning_rate=3e-3,
- verbose=1,
- device=device
- )
-
- #Entrainnement
- model.learn(total_timesteps=1e6)
-
- #Sauvegarde
- model.save("Voiture_autonome_Webots_PPO")
-
- #del model
-
- #Chargement des données d'apprentissage
- #model = PPO.load("Voiture_autonome_Webots_PPO")
-
- obs = env.reset()
-
- for _ in range(1000000):
- #Prédiction pour séléctionner une action à partir de l'observation
- action, _states = model.predict(obs, deterministic=True)
- obs, reward, done, info = env.step(action)
- if done:
- obs = env.reset()
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/src/Simulateur/Apprentissage par renforcement Webots Gym StableBaselines 2022/controllers/controller_Gym/controller_Gym_v2.py b/src/Simulateur/Apprentissage par renforcement Webots Gym StableBaselines 2022/controllers/controller_Gym/controller_Gym_v2.py
deleted file mode 100644
index c05ecff6..00000000
--- a/src/Simulateur/Apprentissage par renforcement Webots Gym StableBaselines 2022/controllers/controller_Gym/controller_Gym_v2.py
+++ /dev/null
@@ -1,213 +0,0 @@
-"""Voiture autonome avec utilisation d'un
-LIDAR sur WEBOTS
-Auteur : Chrysanthe et Jessica
-"""
-
-import numpy as np
-import random
-import gymnasium as gym
-import time
-
-from stable_baselines3 import PPO
-from stable_baselines3.common.env_checker import check_env
-
-from vehicle import Driver
-from controller import Lidar
-from controller import Field
-from controller import Supervisor
-
-
-#--------------GYM------- ---------------------
-
-#Création de l'environnement GYM
-class WebotsGymEnvironment(Driver, gym.Env) :
- def __init__(self):
- super().__init__() #Objet héritant la classe Driver
-
- basicTimeStep = int(super().getBasicTimeStep())
- self.sensorTime = basicTimeStep//4
-
- #Lidar
- self.lidar = Lidar("lidar")
-
- #Capteur LIDAR
- self.lidar.enable(self.sensorTime)
- self.lidar.enablePointCloud()
-
- self.action_space = gym.spaces.Discrete(5) #actions disponibles
-
- min = np.zeros(self.lidar.getNumberOfPoints())
- max = np.ones(self.lidar.getNumberOfPoints())
- self.observation_space = gym.spaces.Box(min, max, dtype=np.float32) #Etat venant du LIDAR
-
-
- #Paramètre de la voiture (position, etc..)
- self.voiture_robot = super().getFromDef("vehicle")
- self.trans_champs = self.voiture_robot.getField("translation")
- self.rot_champs = self.voiture_robot.getField("rotation")
-
-
- #Capteur de distance
- self.capteur_avant = super().getDevice('front_center_sensor')
- self.capteur_gauche = super().getDevice('side_left_sensor')
- self.capteur_droite = super().getDevice('side_right_sensor')
- self.capteur_avant.enable(self.sensorTime)
- self.capteur_gauche.enable(self.sensorTime)
- self.capteur_droite.enable(self.sensorTime)
-
- #Capteur de balise
- self.capteur_balise = super().getDevice('capteur_balise')
- self.capteur_balise.enable(self.sensorTime)
-
- #Vérification de l'état de la voiture
- def observe(self) :
- try :
- tableau = self.lidar.getRangeImage()
- #Division par 10 pour que la valeur soient entre 0 et 1
- etat = np.divide(np.array(tableau),10)
- print("etat : " + etat)
- except : #En cas de non retour lidar
- print("Pas de retour du lidar")
- etat = np.zeros(self.lidar.getNumberOfPoints())
-
- return np.array(etat).astype('float32')
-
- #Remise à 0
- def initialisation(self) :
- #self.capteur_avant.disable()
- #self.capteur_gauche.disable()
- #self.capteur_droite.disable()
-
- #Valeur aléatoire
- x = random.uniform(1.5, 1.65)
- y = random.uniform(3.66, 3.8)
-
- #Fonction d'initialisation
- INITIAL_trans = [x, y, 0.0195182]
- INITIAL_rot=[-0.000257, 0.000618, 1 , -0.784]
- self.trans_champs.setSFVec3f(INITIAL_trans)
- self.rot_champs.setSFRotation(INITIAL_rot)
-
- time.sleep(0.3) #Temps de pause après réinitilialisation
-
- #self.capteur_avant.enable(self.sensorTime)
- #self.capteur_gauche.enable(self.sensorTime)
- #self.capteur_droite.enable(self.sensorTime)
-
- #Remise à 0 pour l'environnement GYM
- def reset(self):
- self.initialisation()
- #Retour état
- obs = self.observe()
- #super().step()
- return obs
-
- #Fonction pour detection de collision et attribution des récompenses
- def evaluer(self):
- recompense = 0
- done = False
-
- avant = self.capteur_avant.getValue()
- gauche = self.capteur_gauche.getValue()
- droite = self.capteur_droite.getValue()
- balise = self.capteur_balise.getValue()
-
- if avant >= 900 and not(done) :
- print("Collision avant")
- recompense = -100
- done = True
- elif ((avant >= 854 and gauche >= 896) or (avant >= 696 and gauche >= 910) or gauche >= 937) and not(done) :
- print("Collision gauche")
- recompense = -100
- done = True
- elif ((avant >= 850 and droite >= 893) or (avant >= 584 and droite >= 910) or droite >= 961) and not(done) :
- print("Collision droite")
- recompense = -100
- done = True
- elif balise > 700 :
- done = False
- print("Balise passée")
- recompense = 200
- else :
- done = False
- recompense = 0
-
- return recompense, done
-
- #Fonction pour déplacer la voiture
- def rouler(self, action) :
- super().setCruisingSpeed(1.0)
-
- if action == 0 :
- super().setSteeringAngle(-0.4)
-
- elif action == 1 :
- super().setSteeringAngle(-0.1)
-
- elif action == 2 :
- super().setSteeringAngle(0)
-
- elif action == 3 :
- super().setSteeringAngle(0.1)
-
- elif action == 4 :
- super().setSteeringAngle(0.4)
-
- #Fonction step de l'environnement GYM
- def step(self, action):
- self.rouler(action)
-
- obs = self.observe()
-
- reward, done = self.evaluer()
-
- super().step()
-
- return obs, reward, done, {}
-
- #Fonction render de l'environnement GYM
- def render(self, mode="human", close=False):
- pass
-
-
-#----------------Programme principal--------------------
-def main() :
- env = WebotsGymEnvironment()
- check_env(env)
-
- logdir = "./Webots_tb/"
- #-- , tensorboard_log = logdir -- , tb_log_name = "PPO_voiture_webots"
-
- #Définition modèle avec paramètre par défaut
- model = PPO('MlpPolicy', env,
- n_steps = 256,
- ent_coef = 0.01,
- n_epochs = 5,
- batch_size= 32,
- learning_rate= 3e-3,
- verbose=1)
-
- #Entrainnement
- model.learn(total_timesteps=1e6)
-
- #Sauvegarde
- model.save("Voiture_autonome_Webots_PPO")
-
- #del model
-
- #Chargement des données d'apprentissage
- #model = PPO.load("Voiture_autonome_Webots_PPO")
-
- obs = env.reset()
-
- for _ in range(1000000):
- #Prédiction pour séléctionner une action à partir de l'observation
- action, _states = model.predict(obs, deterministic=True)
- obs, reward, done, info = env.step(action)
- if done:
- obs = env.reset()
-
-
-if __name__ == '__main__' :
- main()
-
diff --git a/src/Simulateur/DynamicActionPlotCallback.py b/src/Simulateur/DynamicActionPlotCallback.py
deleted file mode 100644
index 359b32b6..00000000
--- a/src/Simulateur/DynamicActionPlotCallback.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import matplotlib.pyplot as plt
-import numpy as np
-from stable_baselines3.common.callbacks import BaseCallback
-import torch
-import torch.nn.functional as F
-from config import *
-
-
-steering_true = (n_actions_steering + 1) // 2
-
-class DynamicActionPlotDistributionCallback(BaseCallback):
- def __init__(self, verbose=0):
- super().__init__(verbose)
- self.fig, self.ax = plt.subplots(4, 1, figsize=(10, 8))
-
- # Steering bars
- self.steering_bars = self.ax[0].bar(range(n_actions_steering), np.zeros(n_actions_steering), color='blue')
- self.steering_avg = [
- self.ax[0].plot([0, 0], [0, 1], color=(i/3, 1 - i/3, 0), label='Average')[0]
- for i in range(4)
- ]
- self.ax[0].set_ylim(0, 1) # Probabilities range from 0 to 1
- self.ax[0].set_title('Steering Action Probabilities')
-
- # Speed bars
- self.speed_bars = self.ax[1].bar(range(n_actions_speed), np.zeros(n_actions_speed), color='blue')
- self.speed_avg = self.ax[1].plot([0, 0], [0, 1], color='red', label='Average')[0]
- self.ax[1].set_ylim(0, 1) # Probabilities range from 0 to 1
- self.ax[1].set_title('Speed Action Probabilities')
-
- # LiDAR img
- self.lidar_img = self.ax[2].imshow(
- np.zeros((lidar_horizontal_resolution, lidar_horizontal_resolution)),
- cmap='gray', vmin=0, vmax=np.log(31)
- )
- self.ax[2].set_title('LiDAR Image')
-
- # Camera img
- self.camera_img = self.ax[3].imshow(
- np.zeros((camera_horizontal_resolution, camera_horizontal_resolution, 3)),
- cmap='RdYlGn', vmin=-1, vmax=1
- )
- self.ax[3].set_title('Camera Image')
-
- def _on_step(self) -> bool:
- global steering_true
- # Get the action probabilities
-
- obs = self.locals["obs_tensor"].clone().detach()
-
- self.lidar_img.set_array(np.log(1 + obs[0, 0, :, :].cpu().numpy()))
- self.camera_img.set_array(obs[0, 1, :, :].cpu().numpy())
- with torch.no_grad():
- latent = self.model.policy.features_extractor(obs)
- extracted = self.model.policy.mlp_extractor.policy_net(latent)
- output = self.model.policy.action_net(extracted)[0]
-
- T = [0.4, 0.6, 0.8, 1.0]
- steer_probs = torch.softmax(output[:n_actions_steering], dim=-1).to("cpu")
- speed_probs = torch.softmax(output[n_actions_steering:], dim=-1).to("cpu")
-
-
-
- # Update the probabilities
- for i, bar in enumerate(self.steering_bars):
- bar.set_height(steer_probs[i].item())
-
- for i in range(4):
- steer_probs_T = torch.softmax(output[:n_actions_steering] / T[i], dim=-1).to("cpu")
- steering_avg = (steer_probs_T / steer_probs_T.sum() * torch.arange(16)).sum().item()
- self.steering_avg[i].set_xdata([steering_avg, steering_avg])
-
-
-
- for i, bar in enumerate(self.speed_bars):
- bar.set_height(speed_probs[i].item())
-
- speed_avg = (speed_probs * torch.arange(n_actions_speed)).sum().item()
- self.speed_avg.set_xdata([speed_avg, speed_avg])
-
- plt.draw()
- plt.pause(1e-8)
-
- return True # Continue training
diff --git a/src/Simulateur/WebotsSimulationGymEnvironment.py b/src/Simulateur/WebotsSimulationGymEnvironment.py
deleted file mode 100644
index 213b0688..00000000
--- a/src/Simulateur/WebotsSimulationGymEnvironment.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import os
-from typing import *
-import numpy as np
-import gymnasium as gym
-
-from config import *
-
-
-class WebotsSimulationGymEnvironment(gym.Env):
- """
- One environment for each vehicle
-
- n: index of the vehicle
- supervisor: the supervisor of the simulation
- """
-
- def __init__(self, simulation_rank: int, vehicle_rank: int):
-
-
- super().__init__()
- self.simulation_rank = simulation_rank
- self.vehicle_rank = vehicle_rank
-
- self.handler = logging.FileHandler(f"/tmp/autotech/Voiture_{self.simulation_rank}_{self.vehicle_rank}.log")
- self.handler.setFormatter(FORMATTER)
- self.log = logging.getLogger("SERVER")
- self.log.setLevel(level=LOG_LEVEL)
- self.log.addHandler(self.handler)
-
-
- self.log.info("Initialisation started")
-
- # this is only true if lidar_horizontal_resolution = camera_horizontal_resolution
- box_min = np.zeros([2, context_size, lidar_horizontal_resolution], dtype=np.float32)
- box_max = np.ones([2, context_size, lidar_horizontal_resolution], dtype=np.float32) * 30
-
- self.observation_space = gym.spaces.Box(box_min, box_max, dtype=np.float32)
- self.action_space = gym.spaces.MultiDiscrete([n_actions_steering, n_actions_speed])
-
- if not os.path.exists("/tmp/autotech"):
- os.mkdir("/tmp/autotech")
-
- self.log.debug(f"Creation of the pipes")
-
- os.mkfifo(f"/tmp/autotech/{simulation_rank}_{vehicle_rank}toserver.pipe")
- os.mkfifo(f"/tmp/autotech/serverto{simulation_rank}_{vehicle_rank}.pipe")
- os.mkfifo(f"/tmp/autotech/{simulation_rank}_{vehicle_rank}tosupervisor.pipe")
-
- # --mode=fast --minimize --no-rendering --batch --stdout
- if vehicle_rank == 0 :
- os.system(f"""
- webots {__file__.rsplit('/', 1)[0]}/worlds/piste{simulation_rank % n_map}.wbt --mode=fast --minimize --batch --stdout &
- echo $! {simulation_rank}_{vehicle_rank} >>/tmp/autotech/simulationranks
- """)
-
- self.log.debug("Connection to the vehicle")
- self.fifo_w = open(f"/tmp/autotech/serverto{simulation_rank}_{vehicle_rank}.pipe", "wb")
- self.log.debug("Connection to the supervisor")
- self.fifo_r = open(f"/tmp/autotech/{simulation_rank}_{vehicle_rank}toserver.pipe", "rb")
-
- self.log.info("Initialisation finished\n")
-
- def reset(self, seed=0):
- # basically useless function
-
- # lidar data
- # this is true for lidar_horizontal_resolution = camera_horizontal_resolution
- self.context = obs = np.zeros([2, context_size, lidar_horizontal_resolution], dtype=np.float32)
- info = {}
- self.log.info(f"reset finished\n")
- return obs, info
-
- def step(self, action):
-
- self.log.info("Starting step")
- self.log.info(f"sending {action=}")
- self.fifo_w.write(action.tobytes())
- self.fifo_w.flush()
-
- # communication with the supervisor
- self.log.debug("trying to get info from supervisor")
- cur_state = np.frombuffer(self.fifo_r.read(np.dtype(np.float32).itemsize * (n_sensors + lidar_horizontal_resolution + camera_horizontal_resolution)), dtype=np.float32)
- self.log.info(f"received {cur_state=}")
- reward = np.frombuffer(self.fifo_r.read(np.dtype(np.float32).itemsize), dtype=np.float32)[0] # scalar
- self.log.info(f"received {reward=}")
- done = np.frombuffer(self.fifo_r.read(np.dtype(np.bool).itemsize), dtype=np.bool)[0] # scalar
- self.log.info(f"received {done=}")
- truncated = np.frombuffer(self.fifo_r.read(np.dtype(np.bool).itemsize), dtype=np.bool)[0] # scalar
- self.log.info(f"received {truncated=}")
- info = {}
-
- cur_state = np.nan_to_num(cur_state[n_sensors:], nan=0., posinf=30.)
-
- lidar_obs = cur_state[:lidar_horizontal_resolution]
- camera_obs = cur_state[lidar_horizontal_resolution:]
-
- self.context = obs = np.concatenate([
- self.context[:, 1:],
- [lidar_obs[None], camera_obs[None]]
- ], axis=1)
-
- self.log.info("step over")
-
- return obs, reward, done, truncated, info
-
-
diff --git a/src/Simulateur/config.py b/src/Simulateur/config.py
deleted file mode 100644
index 6db6d990..00000000
--- a/src/Simulateur/config.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# just a file that lets us define some constants that are used in multiple files the simulation
-from torch.cuda import is_available
-import logging
-
-
-n_map = 2
-n_simulations = 2
-n_vehicles = 2
-n_stupid_vehicles = 0
-n_actions_steering = 16
-n_actions_speed = 16
-n_sensors = 1
-lidar_max_range = 12.0
-device = "cuda" if is_available() else "cpu"
-
-context_size = 1
-lidar_horizontal_resolution = 1024 # DON'T CHANGE THIS VALUE PLS
-camera_horizontal_resolution = 1024 # DON'T CHANGE THIS VALUE PLS
-
-LOG_LEVEL = logging.DEBUG
-FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
diff --git a/src/Simulateur/controllers/controllerVehicleDriver/controllerVehicleDriver.py b/src/Simulateur/controllers/controllerVehicleDriver/controllerVehicleDriver.py
deleted file mode 100644
index 2caea5d7..00000000
--- a/src/Simulateur/controllers/controllerVehicleDriver/controllerVehicleDriver.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import numpy as np
-import psutil
-import time
-import os
-import re
-import sys
-import logging
-
-# add src/Simulateur to sys.path
-path = __file__.rsplit('/', 3)[0]
-sys.path.insert(0, path)
-
-from config import *
-from vehicle import Driver
-from enum import Enum,auto
-
-def too_close(lidar,dir):
- # if dir==True: we're near the right wall
- R=0.83
- l=len(lidar)
- straight=lidar[l//2]
- if dir:
- nearest=min(lidar[l//2:])
- else:
- nearest=min(lidar[:l//2])
- theta=np.arccos(nearest/straight)
- L=R*(1-np.sin(theta))
- return nearest < L
-
-class State(Enum):
- AI=auto()
- BACK=auto()
-
-class VehicleDriver(Driver):
- """
- This class is a subclass of the Driver class and is used to control the vehicle.
- It basically receives instructions from the controllerWorldSupervisor and follows them.
- """
-
- def __init__(self):
- super().__init__()
- self.state=State.AI
-
-
- basicTimeStep = int(self.getBasicTimeStep())
- self.sensorTime = basicTimeStep // 4
-
- self.v_min = 1
- self.v_max = 9
-
- self.i = int(self.getName().split("_")[-1])
- # Lidar
- self.lidar = self.getDevice("Hokuyo")
- self.lidar.enable(self.sensorTime)
- self.lidar.enablePointCloud()
-
- # Camera
- self.camera = self.getDevice("RASPI_Camera_V2")
- self.camera.enable(self.sensorTime)
-
- # Checkpoint sensor
- self.touch_sensor = self.getDevice("touch_sensor")
- self.touch_sensor.enable(self.sensorTime)
-
- # Communication
-
- proc = psutil.Process(os.getpid()) #current
- parent = proc.parent() #parent
- grandparent = parent.parent() if parent else None #grandparent
- pppid = str(grandparent.pid)
-
-
- self.simulation_rank = int(
- re.search(
- pppid + r" (\d+)",
- open("/tmp/autotech/simulationranks", "r").read(),
- re.MULTILINE
- ).group(1)
- )
-
- self.handler = logging.FileHandler(f"/tmp/autotech/Voiture_{self.simulation_rank}_{self.i}.log")
- self.handler.setFormatter(FORMATTER)
- self.log = logging.getLogger("CLIENT")
- self.log.setLevel(level=LOG_LEVEL)
- self.log.addHandler(self.handler)
-
- self.log.debug("Connection to the server")
- self.fifo_r = open(f"/tmp/autotech/serverto{self.simulation_rank}_{self.i}.pipe", "rb")
- self.log.debug("Connection with the supervisor")
- self.fifo_w = open(f"/tmp/autotech/{self.simulation_rank}_{self.i}tosupervisor.pipe", "wb")
-
-
-
-
- #Vérification de l"état de la voiture
- def observe(self):
- try:
- sensor_data = [np.array(self.touch_sensor.getValue(), dtype=np.float32)]
-
- lidar_data = np.array(self.lidar.getRangeImage(), dtype=np.float32)
-
- camera_data = np.array(self.camera.getImageArray(), dtype=np.float32)
- # shape = (1080, 1, 3)
- camera_data = camera_data.transpose(1, 2, 0)[0]
- # shape = (3, 1080)
- color = np.argmax(camera_data, axis=0)
- camera_data = (
- (color == 0).astype(np.float32)*-1 +
- (color == 1).astype(np.float32)*1 +
- (color == 2).astype(np.float32)*0
- )
- # red -> -1
- # green -> 1
- # blue -> 0
-
- return (
- sensor_data,
- lidar_data,
- camera_data
- )
- except:
- #En cas de non retour lidar
- return (
- np.array(self.touch_sensor.getValue(), dtype=np.float32),
- np.zeros(self.lidar.getNumberOfPoints(), dtype=np.float32)
- )
-
- #Fonction step de l"environnement GYM
- def step(self):
- match self.state:
- case State.AI:
- self.ai()
- case State.BACK:
- self.back()
-
- return super().step()
-
-
-
- def ai(self):
- # sends observation to the supervisor
- self.emitter.send(np.concatenate(self.observe()).tobytes())
-
- # First to be executed
-
- self.log.info("Starting step")
- obs = self.observe()
- self.log.info(f"Observe {obs=}")
- self.fifo_w.write(obs.tobytes())
- self.fifo_w.flush()
-
- self.log.debug("Trying to read action from the server")
- action = np.frombuffer(self.fifo_r.read(np.dtype(np.int64).itemsize * 2), dtype=np.int64)
- self.log.info(f"received {action=}")
-
- # Simulation step
-
- action_steering = np.linspace(-.4, .4, n_actions_steering, dtype=np.float32)[action[0], None]
- action_speed = np.linspace(self.v_min, self.v_max, n_actions_speed, dtype=np.float32)[action[1], None]
-
- cur_angle = self.getSteeringAngle()
- dt = self.getBasicTimeStep()
- omega = 20 # rad/s (max angular speed of the steering servo)
-
- action_steering = cur_angle + np.clip(action_steering - cur_angle, -omega * dt, omega * dt)
-
- self.setSteeringAngle(action_steering)
- self.setCruisingSpeed(action_speed)
-
- if self.touch_sensor.getValue():
- self.state=State.BACK
-
-
- def back(self):
- #si mur de "dir": braquer à "dir"" et reculer jusqu'à pouvoir réavancer (distance au mur à vérif)
- lidar,cam=self.observe()[1:]
- S=sum(cam)
- dir = S>0
- if dir:
- self.setSteeringAngle(0.33)
- if too_close(lidar,dir):
- self.setCruisingSpeed(-2)
- else: self.state=State.AI
- else:
- self.setSteeringAngle(-0.33)
- if too_close(lidar,dir):
- self.setCruisingSpeed(-2)
- else: self.state=State.AI
-
- def run(self):
- # this call is just there to make sure at least one step
- # is done in the entire simulation before we call lidar.getRangeImage()
- # otherwise it will crash the controller with the message:
- # WARNING: 'controllerVehicleDriver' controller crashed.
- # WARNING: controllerVehicleDriver: The process crashed some time after starting successfully.
- super().step()
- while self.step() != -1:
- pass
-
-
-#----------------Programme principal--------------------
-def main():
- driver = VehicleDriver()
- driver.log.info("Starting the vehicle driver\n")
- driver.run()
-
-
-if __name__ == "__main__":
- print("Starting the vehicle driver")
- main()
diff --git a/src/Simulateur/onnx_utils.py b/src/Simulateur/onnx_utils.py
deleted file mode 100644
index 7dd7dd5b..00000000
--- a/src/Simulateur/onnx_utils.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os
-from stable_baselines3 import PPO
-import torch.nn as nn
-import torch
-from config import *
-import numpy as np
-from CNN1DExtractor import CNN1DExtractor
-from TemporalResNetExtractor import TemporalResNetExtractor
-
-import onnxruntime as ort
-
-
-def get_true_model(model):
- return nn.Sequential(
- model.policy.features_extractor.net,
- model.policy.mlp_extractor.policy_net,
- model.policy.action_net
- ).to("cpu")
-
-
-def export_onnx(model):
- model.policy.eval()
- device = model.policy.device
- true_model = get_true_model(model)
- x = torch.randn(1, 2, context_size, lidar_horizontal_resolution)
-
- with torch.no_grad():
- torch.onnx.export(
- true_model,
- x,
- "model.onnx",
- input_names=["input"],
- output_names=["output"],
- dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}}
- )
-
- true_model.to(device)
- model.policy.to(device)
- model.policy.train()
-
-def run_onnx_model(session : ort.InferenceSession,x : np.ndarray):
-
- return session.run(None, {"input": x})[0]
-
-def test_onnx(model):
- device = model.policy.device
- model.policy.eval()
- true_model = get_true_model(model)
-
- loss_fn = nn.MSELoss()
- x = torch.randn(1000, 2, context_size, lidar_horizontal_resolution)
-
- try:
- session = ort.InferenceSession("model.onnx")
- except Exception as e:
- print(f"Error loading ONNX model: {e}")
- return
-
- with torch.no_grad():
- y_true_test = true_model(x)
-
- true_model.train()
- y_true_train = true_model(x)
- true_model.eval()
-
- y_onnx = run_onnx_model(session,x.cpu().numpy())
-
- loss_test = loss_fn(y_true_test, torch.tensor(y_onnx))
- loss_train = loss_fn(y_true_train, torch.tensor(y_onnx))
- print(f"onnx_test={loss_test}")
- print(f"onnx_train={loss_train}")
-
- true_model.to(device)
- model.policy.to(device)
- model.policy.train()
-
diff --git a/src/VisionCamera/ColorDetection.py b/src/VisionCamera/ColorDetection.py
deleted file mode 100644
index f4eeb780..00000000
--- a/src/VisionCamera/ColorDetection.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import cv2 as cv
-import numpy as np
-
-
-URL = "udp://<192.168.1.10>:8554"
-
-cap = cv.VideoCapture(URL)
-while(True):
- # Take each frame
- _, frame = cap.read()
- # Convert BGR to HSV
- hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
-
- # define range of blue color in HSV
-
- # upper and lower bounds for red color
- lower_red1 = np.array([0, 120, 70])
- upper_red1 = np.array([10, 255, 255])
-
- lower_red2 = np.array([170, 120, 70])
- upper_red2 = np.array([180, 255, 255])
-
- # upper and lower bounds for green color
- lower_green = np.array([40, 40, 40])
- upper_green = np.array([90, 255, 255])
-
- # Threshold the HSV image to get only blue colors
-
- # Create a red mask
- mask1 = cv.inRange(hsv, lower_red1, upper_red1)
- mask2 = cv.inRange(hsv, lower_red2, upper_red2)
-
- # Bitwise-OR the masks
- maskRed = mask1 | mask2
-
- # Create a green mask
- maskGreen = cv.inRange(hsv, lower_green, upper_green)
- #Bitwise-OR the masks
- mask = maskRed | maskGreen
-
- # Bitwise-AND mask and original image
- res = cv.bitwise_and(frame,frame, mask= mask)
-
- # Find contours
- contours_green, _ = cv.findContours(maskGreen, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
- contours_red, _ = cv.findContours(maskRed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
-
- if contours_green and contours_red:
- # Assume the largest contour for each color is the wall
- green_contour = max(contours_green, key=cv.contourArea)
- red_contour = max(contours_red, key=cv.contourArea)
-
- # Compute the centroids using image moments
- M_green = cv.moments(green_contour)
- M_red = cv.moments(red_contour)
-
- if M_green["m00"] > 0 and M_red["m00"] > 0:
- cx_green = int(M_green["m10"] / M_green["m00"])
- cx_red = int(M_red["m10"] / M_red["m00"])
-
- # Check if the green (left wall) is to the left of the red (right wall)
- if cx_green < cx_red:
- print("right direction")
- else:
- print("wrong direction")
-
-
- # Show the images
- cv.imshow('frame',frame)
- cv.imshow('mask',mask)
- cv.imshow('res',res)
-
- # press q to close the window
- if cv.waitKey(5) & 0xFF == ord('q'):
- break
-
-cap.release()
-cv.destroyAllWindows()
\ No newline at end of file
diff --git a/src/VisionCamera/ReadMe.md b/src/VisionCamera/ReadMe.md
deleted file mode 100755
index 5737f2a9..00000000
--- a/src/VisionCamera/ReadMe.md
+++ /dev/null
@@ -1 +0,0 @@
-Un approche de la vision non fini par [Markousi](https://github.com/Markousi). Se refer plutot a [Camera.py](../HL/Camera.py)
\ No newline at end of file
diff --git a/src/Simulateur/__init__.py b/src/high_level/README.md
similarity index 100%
rename from src/Simulateur/__init__.py
rename to src/high_level/README.md
diff --git a/src/HL/model_CNN1D.onnx b/src/high_level/models/model_CNN1DExtractor.onnx
similarity index 100%
rename from src/HL/model_CNN1D.onnx
rename to src/high_level/models/model_CNN1DExtractor.onnx
diff --git a/src/HL/model_RSNET2D.onnx b/src/high_level/models/model_CNN1DResNetExtractor.onnx
similarity index 100%
rename from src/HL/model_RSNET2D.onnx
rename to src/high_level/models/model_CNN1DResNetExtractor.onnx
diff --git a/src/HL/modelTemporal1.onnx b/src/high_level/models/model_TemporalResNetExtractor.onnx
similarity index 100%
rename from src/HL/modelTemporal1.onnx
rename to src/high_level/models/model_TemporalResNetExtractor.onnx
diff --git a/src/high_level/pyproject.toml b/src/high_level/pyproject.toml
new file mode 100644
index 00000000..10d9a6a0
--- /dev/null
+++ b/src/high_level/pyproject.toml
@@ -0,0 +1,45 @@
+[project]
+name = "high-level"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.12, <3.14"
+
+
+dependencies = [
+ # Note: picamera2 causes UV to auto-include rpi extras even when not requested
+ # This is likely a UV bug. Add back when needed:
+ "picamera2>=0.3.0",
+ # AI Inference
+ "onnxruntime>=1.8.0",
+ # Raspberry Pi specific hardware control
+ "rpi-hardware-pwm>=0.1.0",
+ "RPi.GPIO>=0.7.1",
+ "gpiozero>=1.6.0",
+ # I2C and SPI communication (Linux specific)
+ "smbus>=1.1.post2",
+ "spidev>=3.5",
+ # OLED display support
+ "luma.oled>=3.8.0",
+ "luma.core>=2.3.0",
+ "Adafruit-SSD1306>=1.6.0",
+ # Time-of-Flight sensor support
+ "adafruit-circuitpython-vl53l0x>=3.6.0",
+ "adafruit-blinka>=8.0.0",
+ # PS4 controller support
+ "pyPS4Controller>=1.2.0",
+ "zmq>=0.0.0",
+ "netifaces>=0.11.0",
+ "matplotlib>=3.10.5",
+ "opencv-python>=4.12.0.88",
+ "scipy>=1.15.3",
+ "websockets>=16.0",
+ "fastapi>=0.128.0",
+ "uvicorn>=0.40.0",
+]
+
+
+
+[build-system]
+requires = ["uv_build>=0.9.24,<0.10.0"]
+build-backend = "uv_build"
diff --git a/scripts/Live_display_lidar.py b/src/high_level/scripts/Live_display_lidar.py
similarity index 100%
rename from scripts/Live_display_lidar.py
rename to src/high_level/scripts/Live_display_lidar.py
diff --git a/scripts/Onetime_lidar.py b/src/high_level/scripts/Onetime_lidar.py
similarity index 100%
rename from scripts/Onetime_lidar.py
rename to src/high_level/scripts/Onetime_lidar.py
diff --git a/src/high_level/scripts/Simple.py b/src/high_level/scripts/Simple.py
new file mode 100644
index 00000000..c4d0ef2b
--- /dev/null
+++ b/src/high_level/scripts/Simple.py
@@ -0,0 +1,117 @@
+from high_level import Lidar
+
+import time
+from rpi_hardware_pwm import HardwarePWM
+
+IP = '192.168.0.10'
+PORT = 10940
+
+# parameters for speed_m_s function
+drive_direction = 1 # -1 for inverted ESCs or if a small ratio corresponds to forward motion
+pwm_stop_drive = 7.53
+dead_zone_drive = 0.5
+delta_pwm_max_drive = 1.1 # PWM value at which maximum speed is reached
+
+max_speed_m_s_hard = 8 # physical maximum speed of the car
+max_speed_m_s_soft = 2 # desired maximum speed
+
+
+# parameters for set_steering_degree function
+steering_direction = 1 # 1 for min PWM angle to the left, -1 for min PWM angle to the right
+steering_pwm_min = 6.91
+steering_pwm_max = 10.7
+steering_pwm_center = 8.805
+
+max_steering_angle_deg = +18 # to the left
+steering_angle_deg = 0
+
+pwm_drive = HardwarePWM(pwm_channel=0, hz=50, chip=2) # chip 2 on Pi 5 per documentation
+pwm_drive.start(pwm_stop_drive)
+
+def set_speed_m_s(speed_m_s):
+ if speed_m_s > max_speed_m_s_soft:
+ speed_m_s = max_speed_m_s_soft
+ elif speed_m_s < -max_speed_m_s_hard:
+ speed_m_s = -max_speed_m_s_hard
+
+ if speed_m_s == 0:
+ pwm_drive.change_duty_cycle(pwm_stop_drive)
+ elif speed_m_s > 0:
+ speed = speed_m_s * delta_pwm_max_drive / max_speed_m_s_hard
+ pwm_drive.change_duty_cycle(
+ pwm_stop_drive + drive_direction * (dead_zone_drive + speed)
+ )
+ elif speed_m_s < 0:
+ speed = speed_m_s * delta_pwm_max_drive / max_speed_m_s_hard
+ pwm_drive.change_duty_cycle(
+ pwm_stop_drive - drive_direction * (dead_zone_drive - speed)
+ )
+
+def reverse():
+ set_speed_m_s(-max_speed_m_s_hard)
+ time.sleep(0.2)
+ set_speed_m_s(0)
+ time.sleep(0.2)
+ set_speed_m_s(-1)
+
+pwm_steering = HardwarePWM(pwm_channel=1, hz=50, chip=2) # chip 2 on Pi 5 per documentation
+pwm_steering.start(steering_pwm_center)
+
+def set_steering_degree(angle_deg):
+ global steering_pwm_min
+ global steering_pwm_max
+ global steering_pwm_center
+
+ steering_pwm = (
+ steering_pwm_center
+ + steering_direction
+ * (steering_pwm_max - steering_pwm_min)
+ * angle_deg
+ / (2 * max_steering_angle_deg)
+ )
+
+ if steering_pwm > steering_pwm_max:
+ steering_pwm = steering_pwm_max
+ if steering_pwm < steering_pwm_min:
+ steering_pwm = steering_pwm_min
+
+ pwm_steering.change_duty_cycle(steering_pwm)
+
+# lidar connection and startup
+lidar = Lidar(IP, PORT)
+lidar.stop()
+lidar.startContinuous(0, 1080)
+
+lidar_table_mm = [0] * 360 # create a table of 360 zeros
+
+time.sleep(1) # lidar startup time
+
+try:
+ while True:
+ for angle in range(len(lidar_table_mm)):
+ # translation of lidar angle to table angle
+ if angle > 135 and angle < 225:
+ lidar_table_mm[angle] = float('nan')
+ else:
+ lidar_table_mm[angle] = lidar.rDistance[540 + (-angle * 4)]
+
+ # steering angle is the difference between lidar rays
+ # measured at -60 and +60 degrees
+ steering_angle_deg = 0.02 * (lidar_table_mm[60] - lidar_table_mm[-60])
+ print(lidar_table_mm[60], lidar_table_mm[-60], steering_angle_deg)
+
+ set_steering_degree(steering_angle_deg)
+ speed_m_s = 0.05
+ set_speed_m_s(speed_m_s)
+
+ time.sleep(0.1)
+ ##############################################
+except KeyboardInterrupt: # catch CTRL+C
+ speed_m_s = 0
+ set_speed_m_s(speed_m_s)
+ print("end of acquisitions")
+
+# stop and disconnect lidar and motors
+lidar.stop()
+pwm_steering.stop()
+pwm_drive.start(pwm_stop_drive)
diff --git a/scripts/ToF_print.py b/src/high_level/scripts/ToF_print.py
similarity index 100%
rename from scripts/ToF_print.py
rename to src/high_level/scripts/ToF_print.py
diff --git a/src/HL/animation/Car animation.svg b/src/high_level/scripts/animation/Car animation.svg
similarity index 100%
rename from src/HL/animation/Car animation.svg
rename to src/high_level/scripts/animation/Car animation.svg
diff --git a/src/HL/animation/README.MD b/src/high_level/scripts/animation/README.MD
similarity index 100%
rename from src/HL/animation/README.MD
rename to src/high_level/scripts/animation/README.MD
diff --git a/src/HL/animation/gen_animation.py b/src/high_level/scripts/animation/gen_animation.py
similarity index 100%
rename from src/HL/animation/gen_animation.py
rename to src/high_level/scripts/animation/gen_animation.py
diff --git a/src/HL/animation/gif_animate.py b/src/high_level/scripts/animation/gif_animate.py
similarity index 95%
rename from src/HL/animation/gif_animate.py
rename to src/high_level/scripts/animation/gif_animate.py
index e19e8a1d..11a85276 100644
--- a/src/HL/animation/gif_animate.py
+++ b/src/high_level/scripts/animation/gif_animate.py
@@ -1,9 +1,10 @@
import time
+
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import ssd1306
+from PIL import Image, ImageFont, ImageSequence
-from PIL import Image, ImageDraw, ImageFont, ImageSequence
# import smbus #type: ignore #ignore the module could not be resolved error because it is a linux only module
# bus = smbus.SMBus(1) # Create a new I2C bus
diff --git a/src/HL/animation/gif_to_framdata.py b/src/high_level/scripts/animation/gif_to_framdata.py
similarity index 100%
rename from src/HL/animation/gif_to_framdata.py
rename to src/high_level/scripts/animation/gif_to_framdata.py
diff --git a/src/HL/animation/image.jpg b/src/high_level/scripts/animation/image.jpg
similarity index 100%
rename from src/HL/animation/image.jpg
rename to src/high_level/scripts/animation/image.jpg
diff --git a/src/HL/animation/improved_race_car_animation.gif b/src/high_level/scripts/animation/improved_race_car_animation.gif
similarity index 100%
rename from src/HL/animation/improved_race_car_animation.gif
rename to src/high_level/scripts/animation/improved_race_car_animation.gif
diff --git a/src/HL/animation/png_display.py b/src/high_level/scripts/animation/png_display.py
similarity index 92%
rename from src/HL/animation/png_display.py
rename to src/high_level/scripts/animation/png_display.py
index ed5b2cb8..a9cbb83c 100644
--- a/src/HL/animation/png_display.py
+++ b/src/high_level/scripts/animation/png_display.py
@@ -1,8 +1,9 @@
import time
+
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import ssd1306
-from PIL import Image, ImageDraw, ImageFont
+from PIL import Image, ImageFont
# I2C configuration
serial = i2c(port=1, address=0x3C)
@@ -12,7 +13,6 @@
font = ImageFont.load_default()
-
# Convert SVG to PNG
png_path = "animation/image.jpg"
diff --git a/scripts/bluetooth_auto/bluethoot_mac.txt b/src/high_level/scripts/bluetooth_auto/bluethoot_mac.txt
similarity index 100%
rename from scripts/bluetooth_auto/bluethoot_mac.txt
rename to src/high_level/scripts/bluetooth_auto/bluethoot_mac.txt
diff --git a/scripts/bluetooth_auto/bluethootconnect.sh b/src/high_level/scripts/bluetooth_auto/bluethootconnect.sh
similarity index 100%
rename from scripts/bluetooth_auto/bluethootconnect.sh
rename to src/high_level/scripts/bluetooth_auto/bluethootconnect.sh
diff --git a/src/high_level/scripts/displayvoltage.py b/src/high_level/scripts/displayvoltage.py
new file mode 100644
index 00000000..0070a699
--- /dev/null
+++ b/src/high_level/scripts/displayvoltage.py
@@ -0,0 +1,59 @@
+import struct
+
+import smbus # type: ignore #ignore the module could not be resolved error because it is a linux only module
+from luma.core.interface.serial import i2c
+from luma.core.render import canvas
+from luma.oled.device import ssd1306
+from PIL import ImageFont
+
+bus = smbus.SMBus(1) # 1 indicates /dev/i2c-1
+
+# I2C address of the slave
+SLAVE_ADDRESS = 0x08
+# I2C configuration
+serial = i2c(port=1, address=0x3C)
+oled_display = ssd1306(serial)
+font = ImageFont.load_default()
+
+
+def write_data(data):
+ # Convert string to list of ASCII values
+ data_list = [ord(char) for char in data]
+ bus.write_i2c_block_data(SLAVE_ADDRESS, 0, data_list)
+
+
+def read_data(num_floats=3):
+
+ # Each float is 4 bytes
+ length = num_floats * 4
+ # Read a block of data from the slave
+ data = bus.read_i2c_block_data(SLAVE_ADDRESS, 0, length)
+ # Convert the byte data to floats
+ if len(data) >= length:
+ float_values = struct.unpack("f" * num_floats, bytes(data[:length]))
+ return list(float_values)
+ else:
+ raise ValueError("Not enough data received from I2C bus")
+
+
+def displayvoltage():
+ received = read_data(2) # Adjust length as needed
+ received = [round(elem, 2) for elem in received]
+ for i in range(len(received)):
+ if received[i] < 6:
+ received[i] = 0.0
+ # print(f"Received from slave: {received}")
+ with canvas(oled_display) as draw:
+ display_height = oled_display.height
+ text = f"LiP:{received[0]:.2f}V|NiH:{received[1]:.2f}V"
+ # _, text_height = draw.textsize(text, font=font) # Get the width and height of the text
+ # text_height = draw.textlength(text, font=font, direction="ttb")
+ text_height = 11
+ print(f"Text height: {text_height}")
+ text_y_position = display_height - text_height # Position text at the bottom
+ draw.text((0, text_y_position), text, fill="white", font=font)
+
+
+if __name__ == "__main__":
+ while True:
+ displayvoltage()
diff --git a/src/high_level/scripts/launch_serveur.py b/src/high_level/scripts/launch_serveur.py
new file mode 100644
index 00000000..9d2e729e
--- /dev/null
+++ b/src/high_level/scripts/launch_serveur.py
@@ -0,0 +1,27 @@
+# ---------------------------------------------------------------------------------------------------
+# main
+# ---------------------------------------------------------------------------------------------------
+import logging
+
+from high_level import Serveur
+
+if __name__ == "__main__":
+ logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
+ handlers=[
+ logging.FileHandler("/home/intech/CoVAPSy/covapsy.log"),
+ logging.StreamHandler(),
+ ],
+ )
+ log_serveur = logging.getLogger("__main__")
+ log_serveur.setLevel(level=logging.DEBUG)
+
+ log_serveur = logging.getLogger("src.HL")
+ log_serveur.setLevel(level=logging.DEBUG)
+
+ log_lidar = logging.getLogger("src.HL.actionneur_capteur.Lidar")
+ log_lidar.setLevel(level=logging.INFO)
+
+ boot = Serveur()
+ boot.main()
diff --git a/scripts/readSPI.py b/src/high_level/scripts/readSPI.py
similarity index 100%
rename from scripts/readSPI.py
rename to src/high_level/scripts/readSPI.py
diff --git a/scripts/remote_control_controller.py b/src/high_level/scripts/remote_control_controller.py
similarity index 64%
rename from scripts/remote_control_controller.py
rename to src/high_level/scripts/remote_control_controller.py
index 9dd2b2e5..6ea12bf5 100644
--- a/scripts/remote_control_controller.py
+++ b/src/high_level/scripts/remote_control_controller.py
@@ -1,47 +1,51 @@
-import pygame
-import zmq
-import time
-from threading import Thread
import socket
import struct
+import time
+from threading import Thread
+
+import pygame
+
###################################################
# Init ZMQ
###################################################
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-def envoie_donnee():
- global vitesse_m, direction
+
+def send_data():
+ global target_speed, direction
while True:
- packet = struct.pack("ff", vitesse_m, direction)
+ packet = struct.pack("ff", target_speed, direction)
sock.sendto(packet, ("192.168.1.10", 5556))
time.sleep(0.05)
+
###################################################
-# Paramètres véhicule
+# Vehicule control variables
###################################################
direction = 0
-vitesse_m = 0
+target_speed = 0
-vitesse_max_m_s_soft = 2
-vitesse_min_m_s_soft = -2
+max_target_speed = 2
+min_target_speed = -2
angle_degre_max = 18
+
def map_range(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
+
def set_direction_degre(angle_degre):
global direction
direction = angle_degre
- print(direction, vitesse_m)
+ print(direction, target_speed)
-def set_vitesse_m_ms(vit):
- global vitesse_m
- vitesse_m = vit
+def set_target_speed(vit):
+ global target_speed
+ target_speed = vit
-if __name__ == "__main__":
-
+if __name__ == "__main__":
###################################################
# Init pygame + manette
###################################################
@@ -49,17 +53,17 @@ def set_vitesse_m_ms(vit):
pygame.joystick.init()
if pygame.joystick.get_count() == 0:
- print("Aucune manette détectée")
+ print("no joystick detected")
exit(1)
joy = pygame.joystick.Joystick(0)
joy.init()
- print("Manette détectée:", joy.get_name())
+ print("joystick detected:", joy.get_name())
###################################################
# Boucle principale
###################################################
- Thread(target=envoie_donnee, daemon=True).start()
+ Thread(target=send_data, daemon=True).start()
try:
while True:
@@ -71,33 +75,33 @@ def set_vitesse_m_ms(vit):
# R2 = axis 5 (souvent 0..1)
# Stick gauche horizontal = axis 0 (-1..1)
- axis_lx = joy.get_axis(0) # Gauche droite
- axis_l2 = joy.get_axis(2) # Accélération inverse
- axis_r2 = joy.get_axis(5) # Accélération
+ axis_lx = joy.get_axis(0) # Gauche droite
+ axis_l2 = joy.get_axis(2) # Accélération inverse
+ axis_r2 = joy.get_axis(5) # Accélération
# Direction
direction = map_range(axis_lx, -1, 1, -angle_degre_max, angle_degre_max)
set_direction_degre(round(direction))
# Accélération
- accel = (axis_r2 + 1)/2
- brake = (axis_l2 + 1)/2
+ accel = (axis_r2 + 1) / 2
+ brake = (axis_l2 + 1) / 2
# Certaines manettes vont de -1..1, d'autres 0..1
# Avant
if accel > 0.05:
- vit = accel * vitesse_max_m_s_soft * 1000
- set_vitesse_m_ms(round(vit))
+ vit = accel * max_target_speed * 1000
+ set_target_speed(round(vit))
# Arrière
elif brake > 0.05:
- vit = brake * vitesse_min_m_s_soft * 1000
- set_vitesse_m_ms(round(vit))
- else :
- set_vitesse_m_ms(0)
+ vit = brake * min_target_speed * 1000
+ set_target_speed(round(vit))
+ else:
+ set_target_speed(0)
time.sleep(0.01)
except KeyboardInterrupt:
- print("Fin du programme.")
+ print("End of program.")
pygame.quit()
diff --git a/scripts/sender.py b/src/high_level/scripts/sender.py
similarity index 55%
rename from scripts/sender.py
rename to src/high_level/scripts/sender.py
index 3c8752ea..610d3de1 100644
--- a/scripts/sender.py
+++ b/src/high_level/scripts/sender.py
@@ -1,15 +1,17 @@
-from time import sleep
-import smbus #type: ignore #ignore the module could not be resolved error because it is a linux only module
import struct
+from time import sleep
+
+import smbus
SLAVE_ADDRESS = 0x08
# Create an SMBus instance
bus = smbus.SMBus(1) # 1 indicates /dev/i2c-1
+
def write_data(float_data):
# Convert the float to bytes
print(float_data)
- byte_data = struct.pack('f', float_data)
+ byte_data = struct.pack("f", float_data)
# Convert the bytes to a list of integers
int_data = list(byte_data)
print(int_data)
@@ -17,19 +19,7 @@ def write_data(float_data):
# Write the data to the I2C bus
bus.write_i2c_block_data(SLAVE_ADDRESS, int_data[0], int_data[1:4])
-def read_data(num_floats=3):
- # Each float is 4 bytes
- length = num_floats * 4
- # Read a block of data from the slave
- data = bus.read_i2c_block_data(SLAVE_ADDRESS, 0, length)
- # Convert the byte data to floats
- if len(data) >= length:
- float_values = struct.unpack('f' * num_floats, bytes(data[:length]))
- return list(float_values)
- else:
- raise ValueError("Not enough data received from I2C bus")
-
def read_data(num_floats=3):
# Each float is 4 bytes
length = num_floats * 4
@@ -37,14 +27,14 @@ def read_data(num_floats=3):
data = bus.read_i2c_block_data(SLAVE_ADDRESS, 0, length)
# Convert the byte data to floats
if len(data) >= length:
- float_values = struct.unpack('f' * num_floats, bytes(data[:length]))
+ float_values = struct.unpack("f" * num_floats, bytes(data[:length]))
return list(float_values)
else:
raise ValueError("Not enough data received from I2C bus")
-i=0.56
-while True :
+
+i = 0.56
+while True:
write_data(i)
sleep(0.5)
- i+=1
-
+ i += 1
diff --git a/scripts/setup.sh b/src/high_level/scripts/setup.sh
similarity index 100%
rename from scripts/setup.sh
rename to src/high_level/scripts/setup.sh
diff --git a/scripts/startup.sh b/src/high_level/scripts/startup.sh
similarity index 100%
rename from scripts/startup.sh
rename to src/high_level/scripts/startup.sh
diff --git a/scripts/envoie_camera_sur_web.py b/src/high_level/scripts/strream_camera_on_web.py
similarity index 99%
rename from scripts/envoie_camera_sur_web.py
rename to src/high_level/scripts/strream_camera_on_web.py
index b01bc5d0..a3218ce6 100644
--- a/scripts/envoie_camera_sur_web.py
+++ b/src/high_level/scripts/strream_camera_on_web.py
@@ -82,7 +82,7 @@ class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
daemon_threads = True
-if __name__ = "__name__":
+if __name__ == "__name__":
# Create Picamera2 instance and configure it
picam2 = Picamera2()
picam2.configure(picam2.create_video_configuration(main={"size": (640, 480)}))
diff --git a/src/high_level/scripts/systemctl/autotech_init.service b/src/high_level/scripts/systemctl/autotech_init.service
new file mode 100644
index 00000000..d38a227a
--- /dev/null
+++ b/src/high_level/scripts/systemctl/autotech_init.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Initialisation de la voiture
+After=network.target
+
+[Service]
+Type=simple
+User=intech
+WorkingDirectory=/home/intech/CoVAPSy/src/high_level
+ExecStart=/home/intech/.local/bin/uv run scripts/launch_serveur.py
+Restart=always
+RestartSec=2
+
+[Install]
+WantedBy=multi-user.target
diff --git a/src/high_level/scripts/systemctl/mediamtx_start.service b/src/high_level/scripts/systemctl/mediamtx_start.service
new file mode 100644
index 00000000..bdef7328
--- /dev/null
+++ b/src/high_level/scripts/systemctl/mediamtx_start.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=lancement de mediamtx
+After=network.target
+
+[Service]
+Type=simple
+User=intech
+WorkingDirectory=/home/intech/
+ExecStart=/home/intech/mediamtx
+Restart=always
+RestartSec=2
+
+[Install]
+WantedBy=multi-user.target
diff --git a/src/HL/Vision/Analyse_Image.py b/src/high_level/scripts/vision/Analyse_Image.py
similarity index 70%
rename from src/HL/Vision/Analyse_Image.py
rename to src/high_level/scripts/vision/Analyse_Image.py
index 37f64c3d..af6e16d3 100644
--- a/src/HL/Vision/Analyse_Image.py
+++ b/src/high_level/scripts/vision/Analyse_Image.py
@@ -1,11 +1,7 @@
import numpy as np
from PIL import Image
-COLOUR_KEY = {
- "green": 1,
- "red": -1,
- "none": 0
-}
+COLOUR_KEY = {"green": 1, "red": -1, "none": 0}
COLOR_THRESHOLD = 20 # Threshold for color intensity difference
Y_OFFSET = 0.5 # Offset for the y-axis in the image
counter = 1
@@ -15,6 +11,7 @@ def get_intensity(image, color):
"""Calculate the average intensity of a specific color channel in the image."""
return np.mean(image[:, :, color])
+
def is_running_in_reversed(image, LEFT_IS_GREEN=True):
"""
Check if the car is running in reverse.
@@ -22,21 +19,26 @@ def is_running_in_reversed(image, LEFT_IS_GREEN=True):
"""
print(image, type(image))
height, width, _ = image.shape
- left_half = image[:, :width // 2]
- right_half = image[:, width // 2:]
+ left_half = image[:, : width // 2]
+ right_half = image[:, width // 2 :]
left_red_intensity = np.mean(left_half[:, :, 0]) # Red channel in RGB
right_red_intensity = np.mean(right_half[:, :, 0]) # Red channel in RGB
-
+
# Allow to change the color of the left and right side
- return (left_red_intensity <= right_red_intensity) if LEFT_IS_GREEN else (left_red_intensity > right_red_intensity)
+ return (
+ (left_red_intensity <= right_red_intensity)
+ if LEFT_IS_GREEN
+ else (left_red_intensity > right_red_intensity)
+ )
+
def is_green_or_red(image):
"""
Check if the car is facing a green or red wall by analyzing the bottom half of the image.
"""
height, _, _ = image.shape
- bottom_half = image[height // 2:, :, :] # Slice the bottom half of the image
+ bottom_half = image[height // 2 :, :, :] # Slice the bottom half of the image
red_intensity = np.mean(bottom_half[:, :, 0]) # Red channel in RGB
green_intensity = np.mean(bottom_half[:, :, 1]) # Green channel in RGB
@@ -47,8 +49,8 @@ def is_green_or_red(image):
return COLOUR_KEY["red"]
return COLOUR_KEY["none"]
-
-def recreate_image_from_matrix(image, matrix,adjusted_width, vector_size=128):
+
+def recreate_image_from_matrix(image, matrix, adjusted_width, vector_size=128):
"""
Recreate an image from the matrix of -1, 0, and 1 and append it to the bottom of the sliced image.
"""
@@ -79,9 +81,12 @@ def recreate_image_from_matrix(image, matrix,adjusted_width, vector_size=128):
# Append the recreated image to the bottom of the sliced image
combined_image = np.vstack((image, recreated_image_resized))
- Image.fromarray(combined_image).convert("RGB").save(f"debug_combined_image{counter}.jpg")
+ Image.fromarray(combined_image).convert("RGB").save(
+ f"debug_combined_image{counter}.jpg"
+ )
counter += 1
+
def camera_matrix(image, vector_size=128):
"""
Create a matrix of -1, 0, and 1 for a line in the image. The matrix size is 128.
@@ -102,46 +107,59 @@ def camera_matrix(image, vector_size=128):
bucket_size = adjusted_width // vector_size
# Calculate red and green intensities for all segments at once
- reshaped_red = sliced_image[:, :, 0].reshape(sliced_image.shape[0], vector_size, bucket_size)
- reshaped_green = sliced_image[:, :, 1].reshape(sliced_image.shape[0], vector_size, bucket_size)
+ reshaped_red = sliced_image[:, :, 0].reshape(
+ sliced_image.shape[0], vector_size, bucket_size
+ )
+ reshaped_green = sliced_image[:, :, 1].reshape(
+ sliced_image.shape[0], vector_size, bucket_size
+ )
red_intensities = np.mean(reshaped_red, axis=(0, 2))
green_intensities = np.mean(reshaped_green, axis=(0, 2))
# Determine the color for each segment
- output_matrix[red_intensities > green_intensities + COLOR_THRESHOLD] = COLOUR_KEY["red"]
- output_matrix[green_intensities > red_intensities + COLOR_THRESHOLD] = COLOUR_KEY["green"]
- output_matrix[np.abs(red_intensities - green_intensities) <= COLOR_THRESHOLD] = COLOUR_KEY["none"]
+ output_matrix[red_intensities > green_intensities + COLOR_THRESHOLD] = COLOUR_KEY[
+ "red"
+ ]
+ output_matrix[green_intensities > red_intensities + COLOR_THRESHOLD] = COLOUR_KEY[
+ "green"
+ ]
+ output_matrix[np.abs(red_intensities - green_intensities) <= COLOR_THRESHOLD] = (
+ COLOUR_KEY["none"]
+ )
# Recreate the image from the matrix
recreate_image_from_matrix(sliced_image, output_matrix, adjusted_width, vector_size)
return output_matrix
+
if __name__ == "__main__":
# Example usage:
# open image
images_paths = [
- "src\HL\Vision\Image_Piste\image.jpg",
- "src\HL\Vision\Image_Piste\image2.jpg",
- "src\HL\Vision\Image_Piste\image3.jpg",
- "src\HL\Vision\Image_Piste\image4.png",
- "src\HL\Vision\Image_Piste\image5.png",
- "src\HL\Vision\Image_Piste\image6.png",
- "src\HL\Vision\Image_Piste\image7.png",
- "src\HL\Vision\Image_Piste\image8.png",
- "src\HL\Vision\Image_Piste\image9.png",
- "src\HL\Vision\Image_Piste\image10.png",
- "src\HL\Vision\Image_Piste\image11.png",
- "src\HL\Vision\Image_Piste\image12.png",
- "src\HL\Vision\Image_Piste\image13.png"
+ "src/HL/Vision/Image_Piste/image.jpg",
+ "src/HL/Vision/Image_Piste/image2.jpg",
+ "src/HL/Vision/Image_Piste/image3.jpg",
+ "src/HL/Vision/Image_Piste/image4.png",
+ "src/HL/Vision/Image_Piste/image5.png",
+ "src/HL/Vision/Image_Piste/image6.png",
+ "src/HL/Vision/Image_Piste/image7.png",
+ "src/HL/Vision/Image_Piste/image8.png",
+ "src/HL/Vision/Image_Piste/image9.png",
+ "src/HL/Vision/Image_Piste/image10.png",
+ "src/HL/Vision/Image_Piste/image11.png",
+ "src/HL/Vision/Image_Piste/image12.png",
+ "src/HL/Vision/Image_Piste/image13.png",
]
for image_path in images_paths:
# print(f"Processing image: {image_path}")
- pil_image = Image.open(image_path).convert("RGB") # Open and ensure it's in RGB format
+ pil_image = Image.open(image_path).convert(
+ "RGB"
+ ) # Open and ensure it's in RGB format
image = np.array(pil_image) # Convert to NumPy array
- is_green_or_red_bool= is_green_or_red(image)
+ is_green_or_red_bool = is_green_or_red(image)
is_running_in_reverse = is_running_in_reversed(image)
camera_matrix(image)
- print(f"Image: {image_path}, is_green_or_red: {is_green_or_red_bool}, is_running_in_reverse: {is_running_in_reverse}")
-
-
\ No newline at end of file
+ print(
+ f"Image: {image_path}, is_green_or_red: {is_green_or_red_bool}, is_running_in_reverse: {is_running_in_reverse}"
+ )
diff --git a/src/HL/Vision/Cam.py b/src/high_level/scripts/vision/Cam.py
similarity index 74%
rename from src/HL/Vision/Cam.py
rename to src/high_level/scripts/vision/Cam.py
index e9b61823..971529ab 100644
--- a/src/HL/Vision/Cam.py
+++ b/src/high_level/scripts/vision/Cam.py
@@ -1,8 +1,10 @@
-from picamera2 import Picamera2, Preview # type: ignore
-from PIL import Image # For saving images
-import time
import os
-import shutil
+import shutil
+import time
+
+from picamera2 import Picamera2
+from PIL import Image # For saving images
+
# Initialize the camera
# Set LIBCAMERA_LOG_LEVELS to suppress INFO logs
@@ -11,16 +13,19 @@
picam2 = Picamera2()
+
def run_test(resolution):
# Configure the camera for preview
- config = picam2.create_preview_configuration(main={"size": resolution}) # full sensor resolution (3280, 2464)
+ config = picam2.create_preview_configuration(
+ main={"size": resolution}
+ ) # full sensor resolution (3280, 2464)
picam2.configure(config)
# Start the camera
picam2.start()
# Parameters
-
+
frame_count = 0 # Counter to keep track of saved frames
save_dir = "Captured_Frames" # Directory to save frames
if os.path.exists(save_dir):
@@ -42,27 +47,27 @@ def run_test(resolution):
# Capture the frame
frame = picam2.capture_array()
capture_time = time.time() # Time after capturing the frame
- total_capture_time += (capture_time - init_time)
+ total_capture_time += capture_time - init_time
# Convert the frame to an image
image = Image.fromarray(frame)
array_time = time.time() # Time after converting the frame to an image
- total_array_time += (array_time - capture_time)
+ total_array_time += array_time - capture_time
# Convert the image to RGB
- #image = image.convert("RGB")
+ image = image.convert("RGB")
convert_time = time.time() # Time after converting the image to RGB
- total_convert_time += (convert_time - array_time)
+ total_convert_time += convert_time - array_time
# Create the file path
- #frame_path = os.path.join(save_dir, f"frame_{frame_count:04d}.jpg")
+ # frame_path = os.path.join(save_dir, f"frame_{frame_count:04d}.jpg")
path_creation_time = time.time() # Time after creating the path
- total_path_creation_time += (path_creation_time - convert_time)
+ total_path_creation_time += path_creation_time - convert_time
# Save the image
- #image.save(frame_path)
+ # image.save(frame_path)
save_time = time.time() # Time after saving the image
- total_save_time += (save_time - path_creation_time)
+ total_save_time += save_time - path_creation_time
frame_count += 1
picam2.stop() # Stop the camera
@@ -80,12 +85,14 @@ def run_test(resolution):
print(f"Average Convert time: {average_convert_time:.5f} seconds")
print(f"Average Path creation time: {average_path_creation_time:.5f} seconds")
print(f"Average Save time: {average_save_time:.5f} seconds")
- print(f"Total time for {N} iterations: {total_time:.5f} seconds. FPS: {N / total_time:.2f}")
-
-
+ print(
+ f"Total time for {N} iterations: {total_time:.5f} seconds. FPS: {N / total_time:.2f}"
+ )
+
+
if __name__ == "__main__":
N = 100 # Number of iterations to measure
-
- for resolution in [(3280, 2464), (1280, 720), (1920, 1080),(640, 480), (320, 240)]:
+
+ for resolution in [(3280, 2464), (1280, 720), (1920, 1080), (640, 480), (320, 240)]:
print(f"Testing resolution: {resolution}")
- run_test(resolution)
\ No newline at end of file
+ run_test(resolution)
diff --git a/src/HL/Vision/Image_Piste/image.jpg b/src/high_level/scripts/vision/Image_Piste/image.jpg
similarity index 100%
rename from src/HL/Vision/Image_Piste/image.jpg
rename to src/high_level/scripts/vision/Image_Piste/image.jpg
diff --git a/src/HL/Vision/Image_Piste/image10.png b/src/high_level/scripts/vision/Image_Piste/image10.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image10.png
rename to src/high_level/scripts/vision/Image_Piste/image10.png
diff --git a/src/HL/Vision/Image_Piste/image11.png b/src/high_level/scripts/vision/Image_Piste/image11.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image11.png
rename to src/high_level/scripts/vision/Image_Piste/image11.png
diff --git a/src/HL/Vision/Image_Piste/image12.png b/src/high_level/scripts/vision/Image_Piste/image12.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image12.png
rename to src/high_level/scripts/vision/Image_Piste/image12.png
diff --git a/src/HL/Vision/Image_Piste/image13.png b/src/high_level/scripts/vision/Image_Piste/image13.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image13.png
rename to src/high_level/scripts/vision/Image_Piste/image13.png
diff --git a/src/HL/Vision/Image_Piste/image2.jpg b/src/high_level/scripts/vision/Image_Piste/image2.jpg
similarity index 100%
rename from src/HL/Vision/Image_Piste/image2.jpg
rename to src/high_level/scripts/vision/Image_Piste/image2.jpg
diff --git a/src/HL/Vision/Image_Piste/image3.jpg b/src/high_level/scripts/vision/Image_Piste/image3.jpg
similarity index 100%
rename from src/HL/Vision/Image_Piste/image3.jpg
rename to src/high_level/scripts/vision/Image_Piste/image3.jpg
diff --git a/src/HL/Vision/Image_Piste/image4.png b/src/high_level/scripts/vision/Image_Piste/image4.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image4.png
rename to src/high_level/scripts/vision/Image_Piste/image4.png
diff --git a/src/HL/Vision/Image_Piste/image5.png b/src/high_level/scripts/vision/Image_Piste/image5.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image5.png
rename to src/high_level/scripts/vision/Image_Piste/image5.png
diff --git a/src/HL/Vision/Image_Piste/image6.png b/src/high_level/scripts/vision/Image_Piste/image6.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image6.png
rename to src/high_level/scripts/vision/Image_Piste/image6.png
diff --git a/src/HL/Vision/Image_Piste/image7.png b/src/high_level/scripts/vision/Image_Piste/image7.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image7.png
rename to src/high_level/scripts/vision/Image_Piste/image7.png
diff --git a/src/HL/Vision/Image_Piste/image8.png b/src/high_level/scripts/vision/Image_Piste/image8.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image8.png
rename to src/high_level/scripts/vision/Image_Piste/image8.png
diff --git a/src/HL/Vision/Image_Piste/image9.png b/src/high_level/scripts/vision/Image_Piste/image9.png
similarity index 100%
rename from src/HL/Vision/Image_Piste/image9.png
rename to src/high_level/scripts/vision/Image_Piste/image9.png
diff --git a/src/HL/Vision/Imagestream.py b/src/high_level/scripts/vision/Imagestream.py
similarity index 73%
rename from src/HL/Vision/Imagestream.py
rename to src/high_level/scripts/vision/Imagestream.py
index facd5f98..5c3979f4 100644
--- a/src/HL/Vision/Imagestream.py
+++ b/src/high_level/scripts/vision/Imagestream.py
@@ -1,9 +1,8 @@
-from picamera2 import Picamera2, Preview # type: ignore #ignore the module could not be resolved error because it is a rpi only module
-from PIL import Image # For saving images
-import time
import os
-import threading
+import time
+from picamera2 import Picamera2
+from PIL import Image
def main():
@@ -11,19 +10,20 @@ def main():
picam2 = Picamera2()
# Configure the camera for preview
- config = picam2.create_preview_configuration(main={"size": (1920, 1080)}, format= "BGR888")
+ config = picam2.create_preview_configuration(
+ main={"size": (1920, 1080), "format": "BGR888"}
+ )
picam2.configure(config)
# Start the camera
picam2.start()
-
Start_time = time.time() # Start time for the image stream
try:
frame_count = 0 # Counter to keep track of saved frames
save_dir = "Captured_Frames" # Directory to save frames
os.makedirs(save_dir, exist_ok=True) # Create the directory if it doesn't exist
-
+
while True:
frame = picam2.capture_array()
image = Image.fromarray(frame)
@@ -31,18 +31,21 @@ def main():
frame_path = os.path.join(save_dir, f"frame_{frame_count:04d}.jpg")
image.save(frame_path)
frame_count += 1
-
+
# Display the frame in a window
# cv2.imshow("Image Stream", frame)
-
+
# Exit the stream when 'q' is pressed
if time.time() - Start_time > 1:
- print(f"Captured {frame_count} frames in {time.time() - Start_time:.2f} seconds")
+ print(
+ f"Captured {frame_count} frames in {time.time() - Start_time:.2f} seconds"
+ )
frame_count = 0
Start_time = time.time()
finally:
# Stop the camera and close the window
picam2.stop()
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff --git a/scripts/writeSPI.py b/src/high_level/scripts/writeSPI.py
similarity index 100%
rename from scripts/writeSPI.py
rename to src/high_level/scripts/writeSPI.py
diff --git a/src/high_level/src/actionneur_capteur/__init__.py b/src/high_level/src/actionneur_capteur/__init__.py
new file mode 100644
index 00000000..ff69f327
--- /dev/null
+++ b/src/high_level/src/actionneur_capteur/__init__.py
@@ -0,0 +1,6 @@
+from .camera import Camera
+from .lidar import Lidar
+from .masterI2C import I2c_arduino
+from .tof import ToF
+
+__all__ = ['Camera', 'Lidar', 'I2c_arduino', 'ToF']
diff --git a/src/HL/actionneur_capteur/Camera.py b/src/high_level/src/actionneur_capteur/camera.py
similarity index 68%
rename from src/HL/actionneur_capteur/Camera.py
rename to src/high_level/src/actionneur_capteur/camera.py
index c561f547..9ff2a0e4 100644
--- a/src/HL/actionneur_capteur/Camera.py
+++ b/src/high_level/src/actionneur_capteur/camera.py
@@ -1,124 +1,122 @@
-import cv2
-from picamera2 import Picamera2 # type: ignore
-from PIL import Image
-import numpy as np
-import os
+import logging
import logging as log
+import os
import threading
-import shutil
-import scipy as sp
import time
-import logging
+from threading import Thread
+import cv2
+import numpy as np
+import scipy as sp
+from cv2.typing import MatLike
from picamera2 import Picamera2
from picamera2.encoders import JpegEncoder
-from picamera2.outputs import FileOutput
+from picamera2.outputs import FileOutput, Output
+from PIL import Image
+
+from high_level.autotech_constant import (
+ CAMERA_QUALITY,
+ CAMERA_STREAM_ON_START,
+ FRAME_RATE,
+ PORT_STREAMING_CAMERA,
+ SIZE_CAMERA_X,
+ SIZE_CAMERA_Y,
+ STREAM_PATH,
+)
+from programs.camera_serv import StreamHandler, StreamOutput, StreamServer, frame_buffer
+from programs.program import Program
+
N_IMAGES = 100 # Number of images to capture
SAVE_DIR = "Captured_Frames" # Directory to save frames
DEBUG_DIR = "Debug" # Directory for debug images
DEBUG_DIR_wayfinding = "Debug_Wayfinding" # Directory for wayfinding debug images
-COLOUR_KEY = {
- "green": 1,
- "red": -1,
- "none": 0
-}
+COLOUR_KEY = {"green": 1, "red": -1, "none": 0}
COLOR_THRESHOLD = 20 # Threshold for color intensity difference
Y_OFFSET = -80 # Offset for the y-axis in the image
-from picamera2.outputs import Output
class JpegCallback(Output):
def __init__(self, parent_cam):
super().__init__()
self.parent = parent_cam
- def outputframe(self, frame, keyframe=True):
+ def outputframe(
+ self, frame, keyframe=True, timestamp=None, packet=None, audio=False
+ ):
# frame = bytes JPEG
self.parent._on_new_frame(frame)
-
-
-from src.HL.programme.Camera_serv import StreamServer, StreamHandler, StreamOutput, frame_buffer
-from src.HL.programme.programme import Program
-from src.HL.Autotech_constant import PORT_STREAMING_CAMERA, SIZE_CAMERA_X, SIZE_CAMERA_Y, FRAME_RATE, CAMERA_QUALITY, STREAM_PATH, CAMERA_STREAM_ON_START
-
class ProgramStreamCamera(Program):
- def __init__(self,serveur):
+ def __init__(self, serveur):
super().__init__()
self.log = logging.getLogger(__name__)
self.serveur = serveur
self.running = False
self.controls_car = False
-
+
if CAMERA_STREAM_ON_START:
self.start()
-
@property
def camera(self):
# accès dynamique
return self.serveur.camera
-
def start(self):
cam = self.camera
if cam is None:
self.log.error("Camera not initialized yet")
return
-
+
self.running = True
self.camera.start_stream()
-
+
def kill(self):
self.running = False
self.camera.stop_stream()
-
-
class Camera:
def __init__(self, size=(SIZE_CAMERA_X, SIZE_CAMERA_Y), port=PORT_STREAMING_CAMERA):
self.size = size
self.port = port
self.streaming = False
- self.stream_thread = None
- self.picam2 = None
+ self.stream_thread: Thread
+ self.picam2: Picamera2
- self.last_frame = None
+ self.last_frame: MatLike
self.debug_counter = 0
self.image_no = 0
-
# Démarrage en mode "acquisition locale sans stream"
self._start_local_capture()
-
# ----------------------------------------------------------
# Capture locale (sans MJPEG server)
# ----------------------------------------------------------
def _start_local_capture(self):
self.picam2 = Picamera2()
config = self.picam2.create_video_configuration(
- main={"size": self.size}, # plus large, moins zoomé
- controls={"FrameRate": FRAME_RATE} # FPS stable
+ main={"size": self.size}, # plus large, moins zoomé
+ controls={"FrameRate": FRAME_RATE}, # FPS stable
)
self.picam2.configure(config)
self.output = StreamOutput()
# Qualité JPEG custom
- self.picam2.start_recording(JpegEncoder(q=CAMERA_QUALITY), FileOutput(self.output))
+ self.picam2.start_recording(
+ JpegEncoder(q=CAMERA_QUALITY), FileOutput(self.output)
+ )
# thread lecture last_frame
self.capture_thread = threading.Thread(
- target=self._update_last_frame_loop,
- daemon=True
+ target=self._update_last_frame_loop, daemon=True
)
self.capture_thread.start()
-
def _update_last_frame_loop(self):
"""Récupère en continu la dernière frame JPEG."""
while True:
@@ -129,15 +127,15 @@ def _update_last_frame_loop(self):
self.last_frame = cv2.cvtColor(np_frame, cv2.COLOR_BGR2RGB)
time.sleep(0.01)
-
# ----------------------------------------------------------
# Contrôle streaming MJPEG
# ----------------------------------------------------------
def start_stream(self):
if self.streaming:
return
- import src.HL.programme.Camera_serv
- src.HL.programme.Camera_serv.streaming_enabled = True
+ import programs.camera_serv
+
+ programs.camera_serv.streaming_enabled = True
self.httpd = StreamServer(("", self.port), StreamHandler)
@@ -152,27 +150,24 @@ def run_server():
self.stream_thread.start()
self.streaming = True
-
-
def stop_stream(self):
if not self.streaming:
return
- import src.HL.programme.Camera_serv
- src.HL.programme.Camera_serv.streaming_enabled = False
+ import programs.camera_serv
+
+ programs.camera_serv.streaming_enabled = False
print("[INFO] Shutting down MJPEG server...")
self.httpd.shutdown()
self.httpd.server_close()
- self.stream_thread.join()
+ if self.stream_thread is not None:
+ self.stream_thread.join()
self.streaming = False
print("[INFO] Stream stopped.")
-
-
-
def toggle_stream(self):
if self.streaming:
print("[INFO] Stopping stream")
@@ -181,14 +176,12 @@ def toggle_stream(self):
print("[INFO] Starting stream")
self.start_stream()
-
# ----------------------------------------------------------
# Interface publique
# ----------------------------------------------------------
def get_last_image(self):
return self.last_frame
-
def camera_matrix(self, vector_size=128, image=None):
"""
Create a matrix of -1, 0, and 1 for a line in the image. The matrix size is 128.
@@ -200,7 +193,13 @@ def camera_matrix(self, vector_size=128, image=None):
raise ValueError("Vector size cannot be greater than image width")
# Slice the middle 5% of the image height
- sliced_image = image[height // 2 - height // 40 + Y_OFFSET: height // 2 + height // 40 + Y_OFFSET, :, :]
+ sliced_image = image[
+ height // 2 - height // 40 + Y_OFFSET : height // 2
+ + height // 40
+ + Y_OFFSET,
+ :,
+ :,
+ ]
# Ensure the width of the sliced image is divisible by vector_size
adjusted_width = (width // vector_size) * vector_size
@@ -211,25 +210,40 @@ def camera_matrix(self, vector_size=128, image=None):
bucket_size = adjusted_width // vector_size
# Calculate red and green intensities for all segments at once
- reshaped_red = sliced_image[:, :, 0].reshape(sliced_image.shape[0], vector_size, bucket_size)
- reshaped_green = sliced_image[:, :, 1].reshape(sliced_image.shape[0], vector_size, bucket_size)
+ reshaped_red = sliced_image[:, :, 0].reshape(
+ sliced_image.shape[0], vector_size, bucket_size
+ )
+ reshaped_green = sliced_image[:, :, 1].reshape(
+ sliced_image.shape[0], vector_size, bucket_size
+ )
red_intensities = np.mean(reshaped_red, axis=(0, 2))
green_intensities = np.mean(reshaped_green, axis=(0, 2))
# Determine the color for each segment
- output_matrix[red_intensities > green_intensities + COLOR_THRESHOLD] = COLOUR_KEY["red"]
- output_matrix[green_intensities > red_intensities + COLOR_THRESHOLD] = COLOUR_KEY["green"]
- output_matrix[np.abs(red_intensities - green_intensities) <= COLOR_THRESHOLD] = COLOUR_KEY["none"]
+ output_matrix[red_intensities > green_intensities + COLOR_THRESHOLD] = (
+ COLOUR_KEY["red"]
+ )
+ output_matrix[green_intensities > red_intensities + COLOR_THRESHOLD] = (
+ COLOUR_KEY["green"]
+ )
+ output_matrix[
+ np.abs(red_intensities - green_intensities) <= COLOR_THRESHOLD
+ ] = COLOUR_KEY["none"]
# Recreate the image from the matrix
if log.getLogger().isEnabledFor(log.DEBUG):
- path= os.path.join(DEBUG_DIR, f"debug_combined_image{self.debug_counter}.jpg")
- self.recreate_image_from_matrix(sliced_image, output_matrix, adjusted_width, vector_size).save(path)
-
+ path = os.path.join(
+ DEBUG_DIR, f"debug_combined_image{self.debug_counter}.jpg"
+ )
+ self.recreate_image_from_matrix(
+ sliced_image, output_matrix, adjusted_width, vector_size
+ ).save(path)
return output_matrix
-
- def recreate_image_from_matrix(self, image, matrix, adjusted_width, vector_size=128):
+
+ def recreate_image_from_matrix(
+ self, image, matrix, adjusted_width, vector_size=128
+ ):
"""
Recreate an image from the matrix of -1, 0, and 1 and append it to the bottom of the sliced image.
"""
@@ -261,27 +275,36 @@ def recreate_image_from_matrix(self, image, matrix, adjusted_width, vector_size=
combined_image = np.vstack((image, recreated_image_resized))
self.debug_counter += 1
return Image.fromarray(combined_image).convert("RGB")
-
- def is_green_or_red(self,lidar):
+
+ def is_green_or_red(self, lidar):
"""
Check if the car is facing a green or red wall by analyzing the bottom half of the image.
"""
image = self.get_last_image()
height, _, _ = image.shape
- bottom_half = image[height // 2:, :, :] # Slice the bottom half of the image
- lidar= np.max(sp.ndimage.zoom(lidar[595:855], image.shape[1]/len(lidar[595:855]),mode="nearest")[None,:],0) # Resize lidar data to match the image size
+ bottom_half = image[height // 2 :, :, :] # Slice the bottom half of the image
+ lidar = np.max(
+ sp.ndimage.zoom(
+ lidar[595:855], image.shape[1] / len(lidar[595:855]), mode="nearest"
+ )[None, :],
+ 0,
+ ) # Resize lidar data to match the image size
print((lidar < 0.5).sum())
print(f"min lidar: {lidar.min()}, max lidar: {lidar.max()}")
- red_intensity = np.mean(bottom_half[:, :, 0]*(lidar < 0.5)) # Red channel in RGB
- green_intensity = np.mean(bottom_half[:, :, 1]*(lidar < 0.5)) # Green channel in RGB
+ red_intensity = np.mean(
+ bottom_half[:, :, 0] * (lidar < 0.5)
+ ) # Red channel in RGB
+ green_intensity = np.mean(
+ bottom_half[:, :, 1] * (lidar < 0.5)
+ ) # Green channel in RGB
if green_intensity > red_intensity + COLOR_THRESHOLD:
return COLOUR_KEY["green"]
elif red_intensity > green_intensity + COLOR_THRESHOLD:
return COLOUR_KEY["red"]
return COLOUR_KEY["none"]
-
- def is_running_in_reversed(self, image = None, LEFT_IS_GREEN=True):
+
+ def is_running_in_reversed(self, image=None, LEFT_IS_GREEN=True):
"""
Check if the car is running in reverse.
If the car is in reverse, green will be on the right side of the image and red on the left.
@@ -293,36 +316,58 @@ def is_running_in_reversed(self, image = None, LEFT_IS_GREEN=True):
# If there are no green or no red pixels, return False
return False
green_indices = (matrix == COLOUR_KEY["green"]) * np.arange(1, len(matrix) + 1)
- average_green_index = np.mean(green_indices[green_indices > 0]) # Average index of green
+ average_green_index = np.mean(
+ green_indices[green_indices > 0]
+ ) # Average index of green
red_indices = (matrix == COLOUR_KEY["red"]) * np.arange(1, len(matrix) + 1)
- average_red_index = np.mean(red_indices[red_indices > 0]) # Average index of redcolor is red
-
+ average_red_index = np.mean(
+ red_indices[red_indices > 0]
+ ) # Average index of redcolor is red
+
if LEFT_IS_GREEN and average_red_index > average_green_index:
if log.getLogger().isEnabledFor(log.DEBUG):
log.debug(f"green: {average_green_index}, red: {average_red_index}")
- vector_size = 128
+ vector_size = 128
self.debug_counter += 1
height, width, _ = image.shape
- sliced_image = image[height // 2 - height // 40 + Y_OFFSET: height // 2 + height // 40 + Y_OFFSET, :, :]
+ sliced_image = image[
+ height // 2 - height // 40 + Y_OFFSET : height // 2
+ + height // 40
+ + Y_OFFSET,
+ :,
+ :,
+ ]
# Ensure the width of the sliced image is divisible by vector_size
adjusted_width = (width // vector_size) * vector_size
sliced_image = sliced_image[:, :adjusted_width, :]
- debug_slice_image=self.recreate_image_from_matrix(sliced_image, matrix, adjusted_width, vector_size)
-
- debug_slice_image.save(os.path.join(DEBUG_DIR_wayfinding, f"wrong_direction_{self.debug_counter}_slice.jpg"))
- Image.fromarray(image).convert("RGB").save(os.path.join(DEBUG_DIR_wayfinding, f"wrong_direction{self.debug_counter}.jpg"))
+ debug_slice_image = self.recreate_image_from_matrix(
+ sliced_image, matrix, adjusted_width, vector_size
+ )
+
+ debug_slice_image.save(
+ os.path.join(
+ DEBUG_DIR_wayfinding,
+ f"wrong_direction_{self.debug_counter}_slice.jpg",
+ )
+ )
+ Image.fromarray(image).convert("RGB").save(
+ os.path.join(
+ DEBUG_DIR_wayfinding, f"wrong_direction{self.debug_counter}.jpg"
+ )
+ )
return True
elif not LEFT_IS_GREEN and average_green_index > average_red_index:
return True
+
if __name__ == "__main__":
log.basicConfig(level=log.DEBUG)
camera = Camera()
- print("Attente frame...")
+ print("Waiting frame...")
while camera.get_last_image() is None:
time.sleep(0.05)
@@ -330,7 +375,7 @@ def is_running_in_reversed(self, image = None, LEFT_IS_GREEN=True):
matrix = camera.camera_matrix()
print("camera_matrix OK")
- input("Appuyer pour lancer le stream...")
+ input("Press to start the stream...")
camera.toggle_stream()
while True:
diff --git a/src/HL/actionneur_capteur/Lidar.py b/src/high_level/src/actionneur_capteur/lidar.py
similarity index 75%
rename from src/HL/actionneur_capteur/Lidar.py
rename to src/high_level/src/actionneur_capteur/lidar.py
index 42195572..9c8722e0 100644
--- a/src/HL/actionneur_capteur/Lidar.py
+++ b/src/high_level/src/actionneur_capteur/lidar.py
@@ -21,19 +21,20 @@
# SOFTWARE.
-
-import socket
-import os
import _thread as thread
-import numpy as np
-import matplotlib.pyplot as plt
import logging
+import os
+import socket
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+class Lidar:
+ measureMsgHeads = {"ME", "GE", "MD", "GD"}
-class Lidar():
- measureMsgHeads = {'ME', 'GE', 'MD', 'GD'}
-
def deg2theta(self, deg):
- return deg / 360 * 2 * np.pi
+ return deg / 360 * 2 * np.pi
def makeSocket(self, ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -43,20 +44,20 @@ def makeSocket(self, ip, port):
# decode 3 byte integer data line
def decodeDistance(self, data):
-
+
def partition(n: int, lst):
for i in range(0, len(lst), n):
- yield lst[i:i + n]
+ yield lst[i : i + n]
# remove checksum bytes for every 65 bytes of data
- parts = [''.join(part[:-1]) for part in list(partition(65, data))]
- # repack data
- data = ''.join(parts)
+ parts = ["".join(part[:-1]) for part in list(partition(65, data))]
+ # repack data
+ data = "".join(parts)
# transform 3-byte int
ns = [ord(d) - 0x30 for d in data]
- ns = [f'{d:06b}' for d in ns]
+ ns = [f"{d:06b}" for d in ns]
ns = list(partition(3, ns))
- ns = [int(''.join(c3), 2) for c3 in ns]
+ ns = [int("".join(c3), 2) for c3 in ns]
# set out-of-range value to zero
ns = [0 if n == 65533 else n for n in ns]
@@ -78,41 +79,39 @@ def __init__(self, ip, port, startStep=0):
# Current step id, for decoding polar distance data
self.mStep = startStep
# Array of distance
- self.rDistance = np.zeros(1081-startStep, dtype=int)
+ self.rDistance = np.zeros(1081 - startStep, dtype=int)
# Buffer to receive packets
self.buf = ""
- self.expectedPacketSize = 65*50 + 44 # TODO hardcoded for full range measurement
+ self.expectedPacketSize = (
+ 65 * 50 + 44
+ ) # TODO hardcoded for full range measurement
- ids = np.arange(1081-startStep)
- self.xTheta = self.deg2theta((ids + startStep) * 270.0 / 1080 + 45 - 90)
+ ids = np.arange(1081 - startStep)
+ self.xTheta = self.deg2theta((ids + startStep) * 270.0 / 1080 + 45 - 90)
self.sock = self.makeSocket(ip, port)
self.__startReader__()
-
def send(self, cmd: str):
self.sock.sendall(cmd.encode())
-
def startPlotter(self, autorange=False):
-
-
def toCartesian(xTheta, xR):
X = np.cos(xTheta) * xR
Y = np.sin(xTheta) * xR
- return X,Y
+ return X, Y
plt.show()
- fig = plt.figure()
+ plt.figure()
axc = plt.subplot(121)
- axp = plt.subplot(122, projection='polar')
+ axp = plt.subplot(122, projection="polar")
# axp.set_thetamax(deg2theta(45))
# axp.set_thetamax(deg2theta(270 + 45))
axp.grid(True)
- self.log.info('Plotter started, press any key to exit')
+ self.log.info("Plotter started, press any key to exit")
- self.log.debug(f'{self.xTheta}, {self.rDistance}')
+ self.log.debug(f"{self.xTheta}, {self.rDistance}")
while True:
X, Y = toCartesian(self.xTheta, self.rDistance)
@@ -133,48 +132,44 @@ def toCartesian(xTheta, xR):
if plt.waitforbuttonpress(timeout=0.02):
os._exit(0)
-
-
# Change hokuyo IP address, requires reboot
- def changeIP(self, ip: str, gateway: str, netmask='255.255.255.0'):
+ def changeIP(self, ip: str, gateway: str, netmask="255.255.255.0"):
def formatZeros(addr):
- return ''.join([n.rjust(3, '0') for n in addr.split('.')])
+ return "".join([n.rjust(3, "0") for n in addr.split(".")])
ip = formatZeros(ip)
gateway = formatZeros(gateway)
netmask = formatZeros(netmask)
- cmd = f'$IP{ip}{netmask}{gateway}\r\n'
- self.log.debug(f'ChangeIP cmd: {cmd}')
+ cmd = f"$IP{ip}{netmask}{gateway}\r\n"
+ self.log.debug(f"ChangeIP cmd: {cmd}")
self.send(cmd)
# Start continous read mode
def startContinuous(self, start: int, end: int, withIntensity=False):
- head = 'ME' if withIntensity else 'MD'
- cmd = f'{head}{start:04d}{end:04d}00000\r\n'
+ head = "ME" if withIntensity else "MD"
+ cmd = f"{head}{start:04d}{end:04d}00000\r\n"
self.log.debug(cmd)
self.head = cmd.strip()
self.send(cmd)
-
# Start single read
def singleRead(self, start: int, end: int, withIntensity=False):
- head = 'GE' if withIntensity else 'GD'
- cmd = f'{head}{start:04d}{end:04d}01000\r\n'
- self.send( cmd)
+ head = "GE" if withIntensity else "GD"
+ cmd = f"{head}{start:04d}{end:04d}01000\r\n"
+ self.send(cmd)
def stop(self):
- cmd = 'QT\r\n'
- self.send( cmd)
+ cmd = "QT\r\n"
+ self.send(cmd)
def reboot(self):
- cmd = 'RB\r\n'
+ cmd = "RB\r\n"
self.send(cmd)
self.send(cmd)
-
def handleMsgLine(self, line):
if line == self.head:
- self.measuring = True
+ self.measuring = True
self.skip = 0
self.mStep = self.startStep
return True
@@ -188,7 +183,7 @@ def handleMsgLine(self, line):
# self.log.debug(f'buf size {len(self.buf)}')
if len(self.buf) >= self.expectedPacketSize:
self.decodeDistance(self.buf)
- self.buf = ''
+ self.buf = ""
return True
return False
@@ -198,19 +193,19 @@ def handleMeasuring(msg):
lines = msg.split()
for line in lines:
if not self.handleMsgLine(line):
- self.log.debug(f'ignore {line}')
+ self.log.debug(f"ignore {line}")
def loop():
try:
while True:
try:
- m, _ =self.sock.recvfrom(1024)
+ m, _ = self.sock.recvfrom(1024)
msg = m.decode()
handleMeasuring(msg)
- except socket.timeout as e:
- self.log.error('Read timeout, sensor disconnected?')
+ except socket.timeout:
+ self.log.error("Read timeout, sensor disconnected?")
os._exit(1)
finally:
self.sock.close()
- thread.start_new_thread(loop, ())
\ No newline at end of file
+ thread.start_new_thread(loop, ())
diff --git a/src/HL/actionneur_capteur/masterI2C.py b/src/high_level/src/actionneur_capteur/masterI2C.py
similarity index 69%
rename from src/HL/actionneur_capteur/masterI2C.py
rename to src/high_level/src/actionneur_capteur/masterI2C.py
index ae9f9171..3005211a 100644
--- a/src/HL/actionneur_capteur/masterI2C.py
+++ b/src/high_level/src/actionneur_capteur/masterI2C.py
@@ -1,7 +1,7 @@
import smbus #type: ignore #ignore the module could not be resolved error because it is a linux only module
import time
import struct
-from Autotech_constant import I2C_SLEEP_RECEIVED, I2C_NUMBER_DATA_RECEIVED, I2C_SLEEP_ERROR_LOOP, SLAVE_ADDRESS
+from high_level.autotech_constant import I2C_SLEEP_RECEIVED, I2C_NUMBER_DATA_RECEIVED, I2C_SLEEP_ERROR_LOOP, SLAVE_ADDRESS
import logging
import threading
@@ -13,13 +13,13 @@ def __init__(self,serveur):
self.send_running = True
self.receive_running = True
- #voltage des lipos
+ #battery info
self.voltage_lipo = 0
self.voltage_nimh = 0
- #initialisation du bus i2c
+ #initialisation of i2c bus
self.bus = smbus.SMBus(1) # 1 indicates /dev/i2c-1
- self.log.info("I2C: bus ouvert sur /dev/i2c-1")
+ self.log.info("I2C: bus opened on /dev/i2c-1")
@@ -28,9 +28,9 @@ def __init__(self,serveur):
threading.Thread(target=self.start_received, daemon=True).start()
def start_send(self):
- """Envoie vitesse/direction régulièrement au microcontroleur. (toute les frames actuellement)"""
+ """send speed and direction to the microcontroller regularly."""
time.sleep(1) # Give some time for the target_speed and direction to be set
- self.log.info("Thread I2C loop démarré")
+ self.log.info("Thread I2C loop started")
while self.send_running:
try :
data = struct.pack('= length:
float_values = struct.unpack('f' * I2C_NUMBER_DATA_RECEIVED, bytes(data[:length]))
- list_valeur = list(float_values)
+ list_values = list(float_values)
# on enregistre les valeur
- self.voltage_lipo = list_valeur[0]
- self.voltage_nimh = list_valeur[1]
- self.current_speed = list_valeur[2]
+ self.voltage_lipo = list_values[0]
+ self.voltage_nimh = list_values[1]
+ self.current_speed = list_values[2]
else:
- self.log.warning("I2C: taille inattendue (%d au lieu de %d)", len(data), length)
+ self.log.warning("I2C: unexpected size (%d but %d excepted)", len(data), length)
time.sleep(I2C_SLEEP_RECEIVED)
\ No newline at end of file
diff --git a/src/HL/actionneur_capteur/ToF.py b/src/high_level/src/actionneur_capteur/tof.py
similarity index 98%
rename from src/HL/actionneur_capteur/ToF.py
rename to src/high_level/src/actionneur_capteur/tof.py
index 2fc75b5e..a6395ffa 100644
--- a/src/HL/actionneur_capteur/ToF.py
+++ b/src/high_level/src/actionneur_capteur/tof.py
@@ -1,4 +1,3 @@
-import time
import board
import busio
from adafruit_vl53l0x import VL53L0X
diff --git a/src/high_level/src/high_level/__init__.py b/src/high_level/src/high_level/__init__.py
new file mode 100644
index 00000000..95dce6d6
--- /dev/null
+++ b/src/high_level/src/high_level/__init__.py
@@ -0,0 +1,4 @@
+from .serveur import Serveur
+from .backend import BackendAPI
+
+__all__ = ["Serveur", "BackendAPI"]
\ No newline at end of file
diff --git a/src/HL/Autotech_constant.py b/src/high_level/src/high_level/autotech_constant.py
similarity index 100%
rename from src/HL/Autotech_constant.py
rename to src/high_level/src/high_level/autotech_constant.py
diff --git a/src/HL/backend.py b/src/high_level/src/high_level/backend.py
similarity index 79%
rename from src/HL/backend.py
rename to src/high_level/src/high_level/backend.py
index 168bf1d0..0579cf27 100644
--- a/src/HL/backend.py
+++ b/src/high_level/src/high_level/backend.py
@@ -15,18 +15,16 @@
from fastapi import WebSocket
from fastapi.websockets import WebSocketDisconnect
-from src.HL.programme.programme import Program
-from Autotech_constant import (
- LOGGING_LEVEL,
+from programs.program import Program
+from high_level.autotech_constant import (
PORT_STREAMING_CAMERA,
- STREAM_PATH,
BACKEND_ON_START,
)
class BackendAPI(Program):
"""
- Programme backend web de contrôle.
- - Respecte Program: start/kill/running/controls_car
+ backend web for control and debug.
+ - Respect Program: start/kill/running/controls_car
- Expose une API REST:
GET /api/status
GET /api/programs
@@ -34,7 +32,7 @@ class BackendAPI(Program):
POST /api/programs/{id}/start
POST /api/programs/{id}/kill
GET /api/stream/camera
- - Sert un frontend statique (optionnel) via / (index.html)
+ - Give a static frontend by index.html
"""
def __init__(
@@ -51,7 +49,7 @@ def __init__(
self.server = server
self.controls_car = False
self.running = False
- self.lidar_yaw = 0 # rotation fixe pour que "devant" = haut écran
+ self.lidar_yaw = 0 # for lidar coordinate correction
self.host = host
@@ -61,7 +59,7 @@ def __init__(
self.app = FastAPI(title="CoVAPSy Remote Control API", version="1.0.0")
- # CORS: pratique si ton frontend est sur un autre port / autre machine
+ # CORS
if cors_allow_origins is None:
cors_allow_origins = ["*"]
self.app.add_middleware(
@@ -72,15 +70,12 @@ def __init__(
allow_headers=["*"],
)
- # Frontend statique (optionnel)
- # - si site_dir contient un index.html, il sera servi à "/"
if site_dir:
self.app.mount("/static", StaticFiles(directory=site_dir), name="static")
@self.app.get("/", response_class=HTMLResponse)
def index():
- # Le frontend peut être /static/index.html ou /static/controle.html selon ton choix
- # Ici on renvoie juste une page qui redirige vers /static/
+ # the frontend is served at /static/index.html
return """
CoVAPSy
@@ -94,32 +89,31 @@ def index():
self._setup_routes()
- time.sleep(1) # Petit délai pour s'assurer que tout est prêt avant de démarrer
+ time.sleep(1) # litle delay to ensure server is ready
if BACKEND_ON_START:
self.start()
# ----------------------------
- # Helpers: lecture état voiture
+ # Helpers: reading data from server
# ----------------------------
def _arduino(self):
- # server.arduino_I2C est une property dans ton Serveur
+ # server.arduino_I2C is a property returning the I2C Arduino instance
return getattr(self.server, "arduino_I2C", None)
def _get_telemetry(self) -> Dict[str, Any]:
ard = self._arduino()
- # On protège tout: si pas initialisé, on renvoie 0
voltage_lipo = getattr(ard, "voltage_lipo", 0.0) if ard else 0.0
voltage_nimh = getattr(ard, "voltage_nimh", 0.0) if ard else 0.0
current_speed = getattr(ard, "current_speed", 0.0) if ard else 0.0
- # Programme qui contrôle la voiture actuellement
- last_ctrl = int(getattr(self.server, "last_programme_control", 0))
- programmes = getattr(self.server, "programme", [])
+ # Programm which currently controls the car
+ last_ctrl = int(getattr(self.server, "last_program_control", 0))
+ programs = getattr(self.server, "programs", [])
prog_name = None
- if isinstance(programmes, list) and 0 <= last_ctrl < len(programmes):
- prog_name = type(programmes[last_ctrl]).__name__
+ if isinstance(programs, list) and 0 <= last_ctrl < len(programs):
+ prog_name = type(programs[last_ctrl]).__name__
target_speed = float(getattr(self.server, "target_speed", 0.0))
direction = float(getattr(self.server, "direction", 0.0))
@@ -130,22 +124,22 @@ def _get_telemetry(self) -> Dict[str, Any]:
"nimh": voltage_nimh
},
"car": {
- "vitesse_reelle": current_speed,
- "vitesse_demandee": target_speed,
- "direction_demandee": direction,
- "programme_controle": prog_name,
- "programme_id": last_ctrl
+ "current_speed": current_speed,
+ "target_speed": target_speed,
+ "direction": direction,
+ "car_control": prog_name,
+ "program_id": last_ctrl
},
"timestamp": time.time(),
}
def _list_programs(self) -> List[Dict[str, Any]]:
- programmes = getattr(self.server, "programme", [])
+ programs = getattr(self.server, "programs", [])
out: List[Dict[str, Any]] = []
- if not isinstance(programmes, list):
+ if not isinstance(programs, list):
return out
- for i, p in enumerate(programmes):
+ for i, p in enumerate(programs):
out.append(
{
"id": i,
@@ -173,7 +167,7 @@ def _get_lidar_points_cartesian(self):
r = np.asarray(lidar.rDistance)
n = r.shape[0]
- # angles du lidar (repère capteur)
+ # lidar angle values
theta_lidar = np.linspace(
-3*np.pi/4, # -135°
+3*np.pi/4, # +135°
@@ -181,11 +175,11 @@ def _get_lidar_points_cartesian(self):
endpoint=True
)
- # correction d’orientation
+ # orientation correction
theta_world = theta_lidar + self.lidar_yaw
# projection
- # Repère monde: +Y = devant le lidar
+ # cartésien coordonates
x = -np.sin(theta_world) * r
y = np.cos(theta_world) * r
@@ -217,8 +211,8 @@ def programs():
@self.app.post("/api/programs/{prog_id}/toggle")
def toggle_program(prog_id: int):
- programmes = getattr(self.server, "programme", [])
- if not isinstance(programmes, list) or not (0 <= prog_id < len(programmes)):
+ programs = getattr(self.server, "programs", [])
+ if not isinstance(programs, list) or not (0 <= prog_id < len(programs)):
raise HTTPException(status_code=404, detail="Unknown program id")
self.server.start_process(prog_id)
@@ -227,11 +221,11 @@ def toggle_program(prog_id: int):
@self.app.post("/api/programs/{prog_id}/start")
def start_program(prog_id: int):
- programmes = getattr(self.server, "programme", [])
- if not isinstance(programmes, list) or not (0 <= prog_id < len(programmes)):
+ programs = getattr(self.server, "programs", [])
+ if not isinstance(programs, list) or not (0 <= prog_id < len(programs)):
raise HTTPException(status_code=404, detail="Unknown program id")
- p = programmes[prog_id]
+ p = programs[prog_id]
if getattr(p, "running", False):
return {"status": "already_running", "program_id": prog_id}
@@ -240,11 +234,11 @@ def start_program(prog_id: int):
@self.app.post("/api/programs/{prog_id}/kill")
def kill_program(prog_id: int):
- programmes = getattr(self.server, "programme", [])
- if not isinstance(programmes, list) or not (0 <= prog_id < len(programmes)):
+ programs = getattr(self.server, "programs", [])
+ if not isinstance(programs, list) or not (0 <= prog_id < len(programs)):
raise HTTPException(status_code=404, detail="Unknown program id")
- p = programmes[prog_id]
+ p = programs[prog_id]
if not getattr(p, "running", False):
return {"status": "already_stopped", "program_id": prog_id}
diff --git a/src/high_level/src/high_level/serveur.py b/src/high_level/src/high_level/serveur.py
new file mode 100644
index 00000000..b352115c
--- /dev/null
+++ b/src/high_level/src/high_level/serveur.py
@@ -0,0 +1,246 @@
+import logging
+import textwrap
+import time
+
+import smbus
+from gpiozero import LED, Button, Buzzer
+from luma.core.interface.serial import i2c
+from luma.core.render import canvas
+from luma.oled.device import ssd1306
+from PIL import Image, ImageDraw, ImageFont
+
+from actionneur_capteur.camera import Camera, ProgramStreamCamera
+from actionneur_capteur.lidar import Lidar
+from actionneur_capteur.masterI2C import I2c_arduino
+from actionneur_capteur.tof import ToF
+from high_level.autotech_constant import TEXT_HEIGHT
+from programs.car import Ai_Programme
+from programs.initialisation import Initialisation
+from programs.poweroff import Poweroff
+
+# programs import
+from programs.ps4_controller_program import PS4ControllerProgram
+from programs.remote_control import RemoteControl
+from programs.ssh_programme import SshProgramme
+from programs.utils.ssh import check_ssh_connections
+
+from .backend import BackendAPI
+
+
+class Serveur:
+ def __init__(self):
+ self.log = logging.getLogger(__name__)
+ # initialisation of different modules
+ self.log.info("Server initialization")
+
+ # initialisation of GPIO buttons, LEDs, buzzer
+ self.bp_next = Button("GPIO5", bounce_time=0.1)
+ self.bp_entre = Button("GPIO6", bounce_time=0.1)
+
+ self.led1 = LED("GPIO17")
+ self.led2 = LED("GPIO27")
+ self.buzzer = Buzzer("GPIO26")
+ self.log.info("GPIO: boutons, LEDs, buzzer initialized")
+
+ self.serial = i2c(port=1, address=0x3C)
+ self.device = ssd1306(self.serial)
+ self.bus = smbus.SMBus(1) # 1 indicates /dev/i2c-1
+ self.log.info("I2C: bus open on /dev/i2c-1")
+
+ # initialisation of time command
+ self.initial_time = time.time()
+ self.last_cmd_time = time.time()
+
+ # data of the car
+ self.process_output = ""
+ self.last_program_control = 0
+ self.process = None
+ self.temp = None
+
+ self.initialisation_module = Initialisation(
+ self, Camera, Lidar, ToF, I2c_arduino
+ )
+
+ self.programs = [
+ SshProgramme(),
+ self.initialisation_module,
+ Ai_Programme(self),
+ PS4ControllerProgram(),
+ RemoteControl(),
+ ProgramStreamCamera(self),
+ BackendAPI(
+ self,
+ host="0.0.0.0",
+ port=8001,
+ site_dir="/home/intech/CoVAPSy/src/high_level/src/site_controle",
+ ),
+ Poweroff(),
+ ]
+ self.log.debug("Programs ready: %s", [type(p).__name__ for p in self.programs])
+
+ # donnée de l'écran
+ self.Screen = 0
+ self.State = 0
+ self.scroll_offset = 3
+
+ @property
+ def camera(self):
+ return self.initialisation_module.camera
+
+ @property
+ def lidar(self):
+ return self.initialisation_module.lidar
+
+ @property
+ def tof(self):
+ return self.initialisation_module.tof
+
+ @property
+ def arduino_I2C(self):
+ return self.initialisation_module.arduino_I2C
+
+ @property
+ def target_speed(self):
+ return self.programs[self.last_program_control].target_speed
+
+ @property
+ def direction(self):
+ return self.programs[self.last_program_control].direction
+
+ # -----------------------------------------------------------------------------------------------------
+ # Screen display functions
+ # -----------------------------------------------------------------------------------------------------
+ def affichage_oled(self, selected: int): # test not use
+ im = Image.new("1", (128, 64), "black")
+ draw = ImageDraw.Draw(im)
+ font = ImageFont.load_default()
+
+ for num, i in enumerate(
+ range(
+ max(selected - self.scroll_offset, 0),
+ min(len(self.programs), selected + self.scroll_offset),
+ )
+ ):
+ y = num * TEXT_HEIGHT
+
+ if i == selected:
+ draw.rectangle((0, y, 127, y + TEXT_HEIGHT), fill="white")
+ draw.text((3, y), self.programs[i]["name"], fill="black", font=font)
+ else:
+ draw.text((3, y), self.programs[i]["name"], fill="white", font=font)
+
+ with canvas(self.device) as display:
+ display.bitmap((0, 0), im, fill="white")
+
+ def make_voltage_im(self):
+ """cCreate an image showing the battery voltages to be pasted on the main display"""
+ if self.arduino_I2C is not None:
+ received = [self.arduino_I2C.voltage_lipo, self.arduino_I2C.voltage_nimh]
+ else:
+ received = [0.0, 0.0]
+
+ # filter out values below 6V and round to 2 decimal places
+ received = [round(elem, 2) if elem > 6 else 0.0 for elem in received]
+ text = f"LiP:{received[0]:.2f}V|NiH:{received[1]:.2f}V"
+ im = Image.new("1", (128, TEXT_HEIGHT), "black")
+ draw = ImageDraw.Draw(im)
+ font = ImageFont.load_default()
+ draw.text((3, 0), text, fill="white", font=font)
+ return im
+
+ def display_combined_im(self, text: str):
+ """function to display text with battery voltages on the oled screen"""
+ im = Image.new("1", (128, 64), "black")
+ draw = ImageDraw.Draw(im)
+ font = ImageFont.load_default()
+
+ # Wrap the text to fit within the width of the display
+ wrapped_text = textwrap.fill(text, width=20) # Adjust width as needed
+ draw.text((3, 0), wrapped_text, fill="white", font=font)
+
+ voltage_im = self.make_voltage_im()
+ im.paste(voltage_im, (0, 64 - TEXT_HEIGHT))
+
+ with canvas(self.device) as draw:
+ draw.bitmap((0, 0), im, fill="white")
+
+ def Idle(self):
+ """
+ Manages the screen display based on the current or chosen function.
+ Screen changes are managed by the button functions just below.
+ """
+ if check_ssh_connections():
+ self.led1.on()
+
+ if not check_ssh_connections():
+ self.led1.off()
+
+ if self.Screen < len(self.programs):
+ text = self.programs[self.Screen].display()
+ self.display_combined_im(text)
+
+ def bouton_next(self):
+ """go to next screen on oled display"""
+ self.Screen += 1
+ if self.Screen >= len(self.programs):
+ self.Screen = 0
+
+ def bouton_entre(self, num=None):
+ """take action on the current screen on display and start the program"""
+ if num is not None:
+ self.Screen = num
+ self.State = self.Screen
+ self.start_process(self.Screen)
+
+ # ---------------------------------------------------------------------------------------------------
+ # Processus
+ # ---------------------------------------------------------------------------------------------------
+
+ def start_process(self, number_program):
+ """Starts the program referenced by its number:
+ if it is a program that controls the car, it kills the old program that was controlling,
+ otherwise the program is started or stopped depending on whether it was already running or stopped before"""
+ self.log.info(
+ "User action: program %d (%s)",
+ number_program,
+ type(self.programs[number_program]).__name__,
+ )
+ if self.programs[number_program].running:
+ self.programs[number_program].kill()
+ if self.programs[number_program].controls_car:
+ self.last_program_control = 0
+ self.log.warning(
+ "Car control changed: %s -> %s",
+ type(self.programs[number_program]).__name__,
+ type(self.programs[self.last_program_control]).__name__,
+ )
+
+ self.log.info(
+ "Program %s stopped", type(self.programs[number_program]).__name__
+ )
+
+ elif self.programs[number_program].controls_car:
+ self.programs[self.last_program_control].kill()
+ self.programs[number_program].start()
+ self.log.warning(
+ "Car control changed: %s -> %s",
+ type(self.programs[self.last_program_control]).__name__,
+ type(self.programs[number_program]).__name__,
+ )
+ self.last_program_control = number_program
+
+ else:
+ self.programs[number_program].start()
+
+ # ---------------------------------------------------------------------------------------------------
+ # car function
+ # ---------------------------------------------------------------------------------------------------
+
+ def main(self):
+ self.bp_next.when_pressed = self.bouton_next
+ self.bp_entre.when_pressed = self.bouton_entre
+
+ self.log.info("Server main loop started")
+
+ while True:
+ self.Idle()
diff --git a/src/high_level/src/programs/__init__.py b/src/high_level/src/programs/__init__.py
new file mode 100644
index 00000000..ad3ea422
--- /dev/null
+++ b/src/high_level/src/programs/__init__.py
@@ -0,0 +1,20 @@
+from .camera_serv import StreamHandler, StreamOutput, StreamServer, frame_buffer
+from .car import Car
+from .initialisation import Initialisation
+from .poweroff import Poweroff
+from .program import Program # in first because other programs depend on it
+from .remote_control import RemoteControl
+from .ssh_programme import SshProgramme
+
+__all__ = [
+ "Program",
+ "Car",
+ "RemoteControl",
+ "Initialisation",
+ "Poweroff",
+ "SshProgramme",
+ "StreamServer",
+ "StreamHandler",
+ "StreamOutput",
+ "frame_buffer",
+]
diff --git a/src/HL/programme/Camera_serv.py b/src/high_level/src/programs/camera_serv.py
similarity index 96%
rename from src/HL/programme/Camera_serv.py
rename to src/high_level/src/programs/camera_serv.py
index 447b203d..e84801c3 100644
--- a/src/HL/programme/Camera_serv.py
+++ b/src/high_level/src/programs/camera_serv.py
@@ -5,7 +5,7 @@
import socketserver
from threading import Condition
-from src.HL.Autotech_constant import STREAM_PATH # si tu veux un path configurable
+from high_level.autotech_constant import STREAM_PATH
streaming_enabled = True
diff --git a/src/HL/programme/Car.py b/src/high_level/src/programs/car.py
similarity index 70%
rename from src/HL/programme/Car.py
rename to src/high_level/src/programs/car.py
index afce745d..4ed12ece 100644
--- a/src/HL/programme/Car.py
+++ b/src/high_level/src/programs/car.py
@@ -1,42 +1,45 @@
-import time
-import onnxruntime as ort
-from scipy.special import softmax
-import numpy as np
-from gpiozero import LED, Button
import logging
-import smbus # type: ignore #ignore the module could not be resolved error because it is a linux only module
-import struct
+import time
from threading import Thread
-from src.HL.programme.programme import Program
+
+import numpy as np
+import onnxruntime as ort
# Import constants from HL.Autotech_constant to share them between files and ease of use
-from ..Autotech_constant import MAX_ANGLE, CRASH_DIST, MODEL_PATH, SOCKET_ADRESS, REAR_BACKUP_DIST, LIDAR_DATA_SIGMA, LIDAR_DATA_AMPLITUDE, LIDAR_DATA_OFFSET
-from src.HL.programme.scripts.Driver import Driver
+from high_level.autotech_constant import (
+ CRASH_DIST,
+ LIDAR_DATA_AMPLITUDE,
+ LIDAR_DATA_OFFSET,
+ LIDAR_DATA_SIGMA,
+ MAX_ANGLE,
+ MODEL_PATH,
+ REAR_BACKUP_DIST,
+)
+
+from .program import Program
+from .utils.driver import Driver
class Car:
- def __init__(self,driving_strategy, serveur):
+ def __init__(self, driving_strategy, serveur):
self.log = logging.getLogger(__name__)
-
- """Initialize the car's components."""
self.target_speed = 0 # Speed in millimeters per second
self.direction = 0 # Steering angle in degrees
self.serveur = serveur
self.reverse_count = 0
- def _initialize_ai():
- """Initialize the AI session."""
- try:
- self.ai_session = ort.InferenceSession(MODEL_PATH)
- self.log.info("AI session initialized successfully")
- except Exception as e:
- self.log.error(f"Error initializing AI session: {e}")
- raise
+
# Initialize AI session
- _initialize_ai()
-
+ try:
+ self.ai_session = ort.InferenceSession(MODEL_PATH)
+ self.log.info("AI session initialized successfully")
+ except Exception as e:
+ self.log.error(f"Error initializing AI session: {e}")
+ raise
+
self.driving = driving_strategy
self.log.info("Car initialization complete")
+
# accès dynamique aux capteurs
@property
def camera(self):
@@ -50,16 +53,16 @@ def lidar(self):
def tof(self):
return self.serveur.tof
-
def stop(self):
self.target_speed = 0
self.direction = 0
self.log.info("Arrêt du moteur")
-
def has_Crashed(self):
-
- small_distances = [d for d in self.lidar.rDistance[200:880] if 0 < d < CRASH_DIST] # 360 to 720 is the front of the car. 1/3 of the fov of the lidar
+
+ small_distances = [
+ d for d in self.lidar.rDistance[200:880] if 0 < d < CRASH_DIST
+ ] # 360 to 720 is the front of the car. 1/3 of the fov of the lidar
self.log.debug(f"Distances: {small_distances}")
if len(small_distances) > 2:
# min_index = self.lidar.rDistance.index(min(small_distances))
@@ -73,27 +76,32 @@ def has_Crashed(self):
def turn_around(self):
"""Turn the car around."""
self.log.info("Turning around")
-
+
self.target_speed = 0
self.direction = MAX_ANGLE
- self.target_speed = -2 #blocing call
- time.sleep(1.8) # Wait for the car to turn around
+ self.target_speed = -2 # blocing call
+ time.sleep(1.8) # Wait for the car to turn around
if self.camera.is_running_in_reversed():
self.turn_around()
-
-
def main(self):
# récupération des données du lidar. On ne prend que les 1080 premières valeurs et on ignore la dernière par facilit" pour l'ia
if self.camera is None or self.lidar is None or self.tof is None:
self.log.debug("Capteurs pas encore prêts")
print("Capteurs pas encore prêts")
return
- lidar_data = (self.lidar.rDistance[:1080]/1000)
- lidar_data_ai= (lidar_data-0.5)*(
- LIDAR_DATA_OFFSET + LIDAR_DATA_AMPLITUDE * np.exp(-1/2*((np.arange(1080) - 135) / LIDAR_DATA_SIGMA**2))
- ) #convertir en mètre et ajouter un bruit gaussien #On traffique les données fournit a l'IA
- self.direction, self.target_speed = self.driving(lidar_data_ai) #l'ai prend des distance en mètre et non en mm
+ lidar_data = self.lidar.rDistance[:1080] / 1000
+ lidar_data_ai = (
+ (lidar_data - 0.5)
+ * (
+ LIDAR_DATA_OFFSET
+ + LIDAR_DATA_AMPLITUDE
+ * np.exp(-1 / 2 * ((np.arange(1080) - 135) / LIDAR_DATA_SIGMA**2))
+ )
+ ) # convertir en mètre et ajouter un bruit gaussien #On traffique les données fournit a l'IA
+ self.direction, self.target_speed = self.driving(
+ lidar_data_ai
+ ) # l'ai prend des distance en mètre et non en mm
self.log.debug(f"Min Lidar: {min(lidar_data)}, Max Lidar: {max(lidar_data)}")
if self.camera.is_running_in_reversed():
@@ -105,27 +113,29 @@ def main(self):
self.reverse_count = 0
if self.has_Crashed():
print("Obstacle détecté")
- color= self.camera.is_green_or_red(lidar_data)
+ color = self.camera.is_green_or_red(lidar_data)
if color == 0:
- small_distances = [d for d in self.lidar.rDistance if 0 < d < CRASH_DIST]
+ small_distances = [
+ d for d in self.lidar.rDistance if 0 < d < CRASH_DIST
+ ]
if len(small_distances) == 0:
self.log.info("Aucun obstacle détecté")
return
min_index = np.argmin(small_distances)
- direction = MAX_ANGLE if min_index < 540 else -MAX_ANGLE #540 is the middle of the lidar
- color = direction/direction
+ direction = (
+ MAX_ANGLE if min_index < 540 else -MAX_ANGLE
+ ) # 540 is the middle of the lidar
+ color = direction / direction
self.log.info("Obstacle détecté, Lidar Fallback")
if color == -1:
self.log.info("Obstacle rouge détecté")
if color == 1:
self.log.info("Obstacle vert détecté")
- angle= -color*MAX_ANGLE
+ angle = -color * MAX_ANGLE
self.target_speed = -2
self.direction = angle
-
-
class Ai_Programme(Program):
def __init__(self, serveur):
super().__init__()
@@ -138,29 +148,34 @@ def __init__(self, serveur):
@property
def target_speed(self):
- if self.GR86 == None:
+ if self.GR86 is None:
return 0
return self.GR86.target_speed
-
+
@property
def direction(self):
- if self.GR86 == None:
+ if self.GR86 is None:
return 0
return self.GR86.direction
def run(self):
while self.running:
try:
- self.GR86.main()
+ if self.GR86 is not None:
+ self.GR86.main()
except Exception as e:
self.log.error(f"Erreur IA: {e}")
self.running = False
-
+
def start(self):
if self.running:
return
- if self.serveur.camera is None or self.serveur.lidar is None or self.serveur.tof is None:
+ if (
+ self.serveur.camera is None
+ or self.serveur.lidar is None
+ or self.serveur.tof is None
+ ):
print("Capteurs non initialisés")
return
@@ -177,10 +192,11 @@ def start(self):
self.running = True
Thread(target=self.run, daemon=True).start()
-
def stop(self):
self.running = False
- self.GR86.stop()
+ if self.GR86 is not None:
+ self.GR86.stop()
+
"""
if __name__ == '__main__': # non fonctionnelle
@@ -210,4 +226,4 @@ def stop(self):
GR86.stop()
logging.error("Erreur inconnue")
raise e # re-raise the exception to see the error message
- """
\ No newline at end of file
+ """
diff --git a/src/HL/programme/module_initialisation.py b/src/high_level/src/programs/initialisation.py
similarity index 59%
rename from src/HL/programme/module_initialisation.py
rename to src/high_level/src/programs/initialisation.py
index 80ee032b..32dd8219 100644
--- a/src/HL/programme/module_initialisation.py
+++ b/src/high_level/src/programs/initialisation.py
@@ -1,8 +1,14 @@
-from src.HL.programme.programme import Program
+from .program import Program
import threading
-from ..Autotech_constant import SOCKET_ADRESS
+from high_level.autotech_constant import SOCKET_ADRESS
import logging
+from enum import Enum
+
+class ProgramState(Enum):
+ INITIALIZATION = 1
+ RUNNING = 2
+ STOPPED = 3
class Initialisation(Program):
def __init__(self,server, camera, lidar, tof, I2C):
@@ -13,9 +19,9 @@ def __init__(self,server, camera, lidar, tof, I2C):
self.lidar = None
self.tof = None
self.arduino_I2C_init = 0
- self.camera_init = 0
- self.lidar_init = 0
- self.tof_init = 0
+ self.camera_init = ProgramState.INITIALIZATION
+ self.lidar_init = ProgramState.INITIALIZATION
+ self.tof_init = ProgramState.INITIALIZATION
threading.Thread(target=self.init_camera, args=(camera,), daemon=True).start()
threading.Thread(target=self.init_lidar, args=(lidar,), daemon=True).start()
@@ -25,20 +31,20 @@ def __init__(self,server, camera, lidar, tof, I2C):
def init_I2C_arduino(self,I2C,server):
try:
self.arduino_I2C = I2C(server)
- self.arduino_I2C_init = 1
+ self.arduino_I2C_init = ProgramState.RUNNING
self.log.info("I2C Arduino initialized successfully")
except Exception as e:
- self.arduino_I2C_init = 2
+ self.arduino_I2C_init = ProgramState.STOPPED
self.log.error("I2C Arduino init error : " + str(e))
def init_camera(self,camera):
try:
self.camera = camera()
- self.camera_init = 1
+ self.camera_init = ProgramState.RUNNING
self.log.info("Camera initialized successfully")
except Exception as e:
- self.camera_init = 2
+ self.camera_init = ProgramState.STOPPED
self.log.error("Camera init error : " + str(e))
def init_lidar(self,lidar):
@@ -47,52 +53,41 @@ def init_lidar(self,lidar):
self.lidar.stop()
self.lidar.startContinuous(0, 1080)
self.log.info("Lidar initialized successfully")
- self.lidar_init = 1
+ self.lidar_init = ProgramState.RUNNING
except Exception as e:
- self.lidar_init = 2
+ self.lidar_init = ProgramState.STOPPED
self.log.error("Lidar init error : " + str(e))
def init_tof(self,tof):
try:
self.tof = tof()
- self.tof_init = 1
+ self.tof_init = ProgramState.RUNNING
self.log.info("Camera initialized successfully")
except Exception as e:
- self.tof_init = 2
+ self.tof_init = ProgramState.STOPPED
self.log.error("Tof init error : " + str(e))
def display(self):
- text = "\ncamera: "
- if self.camera_init == 0:
- text += "(en cour)"
- elif self.camera_init == 1:
- text += "ready."
- elif self.camera_init == 2:
- text += "error"
+ def state_to_text(state):
+ match state:
+ case ProgramState.INITIALIZATION:
+ return "(en cour)"
+ case ProgramState.RUNNING:
+ return "ready."
+ case ProgramState.STOPPED:
+ return "error"
+
+ return "unknown"
+
+ text = "\ncamera: "
+ text += state_to_text(self.camera_init)
text+= "\n lidar: "
- if self.lidar_init == 0:
- text += "(en cour)"
- elif self.lidar_init == 1:
- text += "ready."
- elif self.lidar_init == 2:
- text += "error"
-
+ text += state_to_text(self.lidar_init)
text+= "\n tof:"
- if self.tof_init == 0:
- text += "(en cour)"
- elif self.tof_init == 1:
- text += "ready."
- elif self.tof_init == 2:
- text += "error"
-
+ text += state_to_text(self.tof_init)
text+= "\n Arduino:"
- if self.arduino_I2C_init == 0:
- text += "(en cour)"
- elif self.arduino_I2C_init == 1:
- text += "ready."
- elif self.arduino_I2C_init == 2:
- text += "error"
-
+ text += state_to_text(self.arduino_I2C_init)
+
return text
\ No newline at end of file
diff --git a/src/HL/programme/Poweroff.py b/src/high_level/src/programs/poweroff.py
similarity index 92%
rename from src/HL/programme/Poweroff.py
rename to src/high_level/src/programs/poweroff.py
index 19abd3b9..7d54e33a 100644
--- a/src/HL/programme/Poweroff.py
+++ b/src/high_level/src/programs/poweroff.py
@@ -1,6 +1,6 @@
import subprocess
import os
-from src.HL.programme.programme import Program
+from programs.program import Program
import logging
class Poweroff(Program):
diff --git a/src/high_level/src/programs/program.py b/src/high_level/src/programs/program.py
new file mode 100644
index 00000000..4ef4f532
--- /dev/null
+++ b/src/high_level/src/programs/program.py
@@ -0,0 +1,33 @@
+import logging
+
+from high_level.autotech_constant import LOGGING_LEVEL
+
+""" Base class for all programs """
+
+
+class Program:
+ controls_car: bool # whether the program controls the car or not to know if we need to stop the old program that controls the car
+ running: bool # base state of the program (on car startup)
+
+ @property
+ def target_speed(self) -> float: ...
+
+ @property
+ def direction(self) -> float: ...
+
+ def __init__(self):
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(LOGGING_LEVEL)
+
+ def kill(self):
+ pass
+
+ def start(self):
+ pass
+
+ def display(self):
+ name = self.__class__.__name__
+ if self.running:
+ return f"{name} \n (running)"
+ else:
+ return name
diff --git a/scripts/commande_PS4.py b/src/high_level/src/programs/ps4_controller_program.py
similarity index 70%
rename from scripts/commande_PS4.py
rename to src/high_level/src/programs/ps4_controller_program.py
index a6a0428a..01759138 100644
--- a/scripts/commande_PS4.py
+++ b/src/high_level/src/programs/ps4_controller_program.py
@@ -1,24 +1,21 @@
from pyPS4Controller.controller import Controller
import time
-import os
from threading import Thread
-from src.HL.programme.programme import Program
-from src.HL.Autotech_constant import MAX_ANGLE
+from .program import Program
+from high_level.autotech_constant import MAX_ANGLE, MAX_CONTROL_SPEED, MIN_CONTROL_SPEED
import logging
-###################################################
-#Intialisation du protocole zmq
-##################################################
-def envoie_donnee(Voiture): #si utilisation de la voiture directement
+
+def envoie_donnee(Voiture): #if __name__ == "__main__":
print("lancement de l'i2c")
import smbus
import struct
- from src.HL.Autotech_constant import SLAVE_ADDRESS
+ from high_level.autotech_constant import SLAVE_ADDRESS
bus = smbus.SMBus(1)
while True:
try :
- data = struct.pack(' 0):
- self.vitesse_mms = 0
+ self.speed_mms = 0
else:
- self.vitesse_mms = vit
+ self.speed_mms = vit
- def on_L2_release(self): #arrete la voiture lorsque L2 est arrété d'étre préssé.
- self.vitesse_mms = 0
+ def on_L2_release(self): # Stop the car when L2 is released
+ self.speed_mms = 0
def on_L3_up(self,value):
pass
diff --git a/src/HL/programme/RemoteControl.py b/src/high_level/src/programs/remote_control.py
similarity index 65%
rename from src/HL/programme/RemoteControl.py
rename to src/high_level/src/programs/remote_control.py
index f0ebdee2..d058282f 100644
--- a/src/HL/programme/RemoteControl.py
+++ b/src/high_level/src/programs/remote_control.py
@@ -1,13 +1,13 @@
-from src.HL.programme.programme import Program
+from programs.program import Program
import struct
import socket
import threading
-import time
import logging
-from src.HL.Autotech_constant import PORT_REMOTE_CONTROL
+from high_level.autotech_constant import PORT_REMOTE_CONTROL
+
class RemoteControl(Program):
- """ ce programme permet de prendre le control de la voiture à distance en utilsant des packet udp
- on peut prendre le controle avec le script remote_controle_controller.py"""
+ """ This program allows remote control of the car using UDP packets.
+ You can take control with the script remote_control_controller.py"""
def __init__(self):
super().__init__()
self.log = logging.getLogger(__name__)
@@ -16,14 +16,14 @@ def __init__(self):
self.target_speed = 0
self.direction = 0
- #initialisation
+ # Initialization
self.public = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.public.bind(("0.0.0.0", PORT_REMOTE_CONTROL))
- self.log.info("Initialisation de Remote control fini")
+ self.log.info("Remote control initialization finished")
def car_controle(self,sock):
- """lance le controle depuis le pc."""
+ """Starts control from the PC."""
sock.settimeout(0.1)
while self.running:
@@ -38,5 +38,5 @@ def start(self):
threading.Thread(target=self.car_controle, args=(self.public,), daemon=True).start()
def kill(self):
- """fait sortir le thread de sa boucle"""
+ """Exits the thread from its loop"""
self.running = False
\ No newline at end of file
diff --git a/src/HL/programme/SshProgramme.py b/src/high_level/src/programs/ssh_programme.py
similarity index 85%
rename from src/HL/programme/SshProgramme.py
rename to src/high_level/src/programs/ssh_programme.py
index f08381f7..ad220a98 100644
--- a/src/HL/programme/SshProgramme.py
+++ b/src/high_level/src/programs/ssh_programme.py
@@ -1,10 +1,10 @@
-from src.HL.programme.programme import Program
-from src.HL.programme.scripts.get_ip import check_ssh_connections
+from programs.program import Program
+from programs.utils.ssh import check_ssh_connections
import socket
import time
class SshProgramme(Program):
- """Montre le menu SSH de la voiture et force vitesse/direction à 0"""
+ """Give information about SSH connections and IP address and if the car is in standby mode (when this program is running)"""
def __init__(self):
super().__init__()
diff --git a/src/high_level/src/programs/utils/__init__.py b/src/high_level/src/programs/utils/__init__.py
new file mode 100644
index 00000000..bcbcc503
--- /dev/null
+++ b/src/high_level/src/programs/utils/__init__.py
@@ -0,0 +1,4 @@
+from .driver import Driver
+from .ssh import check_ssh_connections
+
+__all__ = ['Driver', 'check_ssh_connections']
\ No newline at end of file
diff --git a/src/HL/programme/scripts/Driver.py b/src/high_level/src/programs/utils/driver.py
similarity index 70%
rename from src/HL/programme/scripts/Driver.py
rename to src/high_level/src/programs/utils/driver.py
index 375eb5ba..22a1465c 100644
--- a/src/HL/programme/scripts/Driver.py
+++ b/src/high_level/src/programs/utils/driver.py
@@ -1,13 +1,18 @@
+import logging
import math
-from matplotlib import pyplot as plt
-import scipy as sp
-from scipy.special import softmax
+
import numpy as np
import onnxruntime as ort
-import logging
-
-from src.HL.Autotech_constant import SPEED_LOOKUP, ANGLE_LOOKUP, MODEL_PATH, Temperature
+import scipy as sp
+from matplotlib import pyplot as plt
+from scipy.special import softmax
+from high_level.autotech_constant import (
+ ANGLE_LOOKUP,
+ MODEL_PATH,
+ SPEED_LOOKUP,
+ Temperature,
+)
class Driver:
@@ -21,45 +26,51 @@ def __init__(self, context_size=0, horizontal_size=0):
if self.log.getLogger().isEnabledFor(self.log.DEBUG):
self.fig, self.ax = plt.subplots(4, 1, figsize=(10, 8))
- self.steering_bars = self.ax[0].bar(range(16), np.zeros(16), color='blue')
+ self.steering_bars = self.ax[0].bar(range(16), np.zeros(16), color="blue")
self.steering_avg = [
- self.ax[0].plot([0, 0], [0, 1], color=(i/3, 1 - i/3, 0), label='Average')[0]
+ self.ax[0].plot(
+ [0, 0], [0, 1], color=(i / 3, 1 - i / 3, 0), label="Average"
+ )[0]
for i in range(4)
]
- self.ax[0].set_ylim(0, 1) # Probabilities range from 0 to 1
- self.ax[0].set_title('Steering Action Probabilities')
+ self.ax[0].set_ylim(0, 1) # Probabilities range from 0 to 1
+ self.ax[0].set_title("Steering Action Probabilities")
# Speed bars
- self.speed_bars = self.ax[1].bar(range(16), np.zeros(16), color='blue')
- self.speed_avg = self.ax[1].plot([0, 0], [0, 1], color='red', label='Average')[0]
+ self.speed_bars = self.ax[1].bar(range(16), np.zeros(16), color="blue")
+ self.speed_avg = self.ax[1].plot(
+ [0, 0], [0, 1], color="red", label="Average"
+ )[0]
self.ax[1].set_ylim(0, 1) # Probabilities range from 0 to 1
- self.ax[1].set_title('Speed Action Probabilities')
+ self.ax[1].set_title("Speed Action Probabilities")
# LiDAR img
self.lidar_img = self.ax[2].imshow(
- np.zeros((128, 128)),
- cmap='gray', vmin=0, vmax=np.log(31)
+ np.zeros((128, 128)), cmap="gray", vmin=0, vmax=np.log(31)
)
- self.ax[2].set_title('LiDAR Image')
+ self.ax[2].set_title("LiDAR Image")
# Camera img
self.camera_img = self.ax[3].imshow(
- np.zeros((128, 128, 3)),
- cmap='RdYlGn', vmin=-1, vmax=1
+ np.zeros((128, 128, 3)), cmap="RdYlGn", vmin=-1, vmax=1
)
- self.ax[3].set_title('Camera Image')
+ self.ax[3].set_title("Camera Image")
def load_model(self):
if self._loaded:
return
self.log.info("Chargement du modèle IA...")
self.ai_session = ort.InferenceSession(MODEL_PATH)
- self.context = np.zeros([2, self.context_size, self.horizontal_size], dtype=np.float32 )
+ self.context = np.zeros(
+ [2, self.context_size, self.horizontal_size], dtype=np.float32
+ )
self._loaded = True
- self.log.info("Modèle IA chargé")
+ self.log.info("Modèle IA chargé")
def reset(self):
- self.context = np.zeros([2, self.context_size, self.horizontal_size], dtype=np.float32)
+ self.context = np.zeros(
+ [2, self.context_size, self.horizontal_size], dtype=np.float32
+ )
def omniscent(self, lidar_data, camera_data):
return self.ai_update_lidar_camera(lidar_data, camera_data)
@@ -75,22 +86,23 @@ def ai_update_lidar_camera(self, lidar_data, camera_data):
raise RuntimeError("Driver non initialisé (modèle IA non chargé)")
self.log.info(f"MIN MAX lidar_data: {(min(lidar_data), max(lidar_data))}")
- lidar_data = sp.ndimage.zoom(
- np.array(lidar_data, dtype=np.float32),
- 128/len(lidar_data)
- ) / 1000 * 0.8
+ lidar_data = (
+ sp.ndimage.zoom(
+ np.array(lidar_data, dtype=np.float32), 128 / len(lidar_data)
+ )
+ / 1000
+ * 0.8
+ )
camera_data = sp.ndimage.zoom(
- np.array(camera_data, dtype=np.float32),
- 128/len(camera_data)
+ np.array(camera_data, dtype=np.float32), 128 / len(camera_data)
)
- self.context = np.concatenate([
- self.context[:, 1:],
- [lidar_data[None], camera_data[None]]
- ], axis=1)
+ self.context = np.concatenate(
+ [self.context[:, 1:], [lidar_data[None], camera_data[None]]], axis=1
+ )
# 2 vectors direction and speed. direction is between hard left at index 0 and hard right at index 1. speed is between min speed at index 0 and max speed at index 1
- vect = self.ai_session.run(None, {'input': self.context[None]})[0][0]
+ vect = self.ai_session.run(None, {"input": self.context[None]})[0][0]
vect_dir, vect_prop = vect[:16], vect[16:] # split the vector in 2
vect_dir = softmax(vect_dir) # distribution de probabilité
@@ -117,13 +129,12 @@ def ai_update_lidar_camera(self, lidar_data, camera_data):
plt.draw()
plt.pause(1e-8)
-
print(" ".join([f"{x:.1f}" for x in vect_dir]))
print(" ".join([f"{x:.1f}" for x in vect_prop]), flush=True)
- angle = sum(ANGLE_LOOKUP*vect_dir) # moyenne pondérée des angles
+ angle = sum(ANGLE_LOOKUP * vect_dir) # moyenne pondérée des angles
# moyenne pondérée des vitesses
- vitesse = sum(SPEED_LOOKUP*vect_prop)
+ vitesse = sum(SPEED_LOOKUP * vect_prop)
return angle, vitesse
@@ -132,40 +143,40 @@ def ai_update_lidar(self, lidar_data):
raise RuntimeError("Driver non initialisé (modèle IA non chargé)")
lidar_data = np.array(lidar_data, dtype=np.float32) * 1.6
# 2 vectors direction and speed. direction is between hard left at index 0 and hard right at index 1. speed is between min speed at index 0 and max speed at index 1
- vect = self.ai_session.run(None, {'input': lidar_data[None]})[0][0]
+ vect = self.ai_session.run(None, {"input": lidar_data[None]})[0][0]
vect_dir, vect_prop = vect[:16], vect[16:] # split the vector in 2
- vect_dir = softmax(vect_dir/Temperature) # distribution de probabilité
+ vect_dir = softmax(vect_dir / Temperature) # distribution de probabilité
vect_prop = softmax(vect_prop)
- angle = sum(ANGLE_LOOKUP*vect_dir) # moyenne pondérée des angles
+ angle = sum(ANGLE_LOOKUP * vect_dir) # moyenne pondérée des angles
# moyenne pondérée des vitesses
- vitesse = sum(SPEED_LOOKUP*vect_prop)
+ vitesse = sum(SPEED_LOOKUP * vect_prop)
return angle, vitesse
-
-
def farthest_distants(self, lidar_data):
# Initialize variables
lidar_data_mm = [0] * 360 # Assuming 360 degrees for the lidar data
filter_size = 15
max_value = 0
max_index = 0
- closest_distance = float('inf')
+ closest_distance = float("inf")
average_distance = 0
valid_distance_count = 0
# Translate lidar angles to table angles
for angle in range(len(lidar_data_mm)):
if 135 < angle < 225:
- lidar_data_mm[angle] = float('nan')
+ lidar_data_mm[angle] = float("nan")
else:
lidar_data_mm[angle] = lidar_data[540 + (-angle * 4)]
# Find the maximum value in the lidar data
for i in range(-45, 45):
- sum_values = sum(lidar_data_mm[n] for n in range(i - filter_size, i + filter_size))
+ sum_values = sum(
+ lidar_data_mm[n] for n in range(i - filter_size, i + filter_size)
+ )
if sum_values > max_value:
max_value = sum_values
max_index = i
@@ -179,7 +190,9 @@ def farthest_distants(self, lidar_data):
valid_distance_count += 1
if current_distance < closest_distance:
closest_distance = current_distance
- average_distance = average_distance / valid_distance_count if valid_distance_count != 0 else 0
+ average_distance = (
+ average_distance / valid_distance_count if valid_distance_count != 0 else 0
+ )
speed = average_distance * 0.002
print("speed =", speed)
diff --git a/src/high_level/src/programs/utils/ssh.py b/src/high_level/src/programs/utils/ssh.py
new file mode 100644
index 00000000..1a76dcbb
--- /dev/null
+++ b/src/high_level/src/programs/utils/ssh.py
@@ -0,0 +1,15 @@
+import subprocess
+
+
+def check_ssh_connections():
+ result = subprocess.run(["who"], stdout=subprocess.PIPE)
+ output = result.stdout.decode("utf-8")
+ ssh_connections = [line for line in output.split("\n") if "pts/" in line]
+ return len(ssh_connections) > 0 # bool
+
+
+if __name__ == "__main__":
+ if check_ssh_connections():
+ print("There are active SSH connections.")
+ else:
+ print("No active SSH connections.")
diff --git a/src/HL/site_controle/app.js b/src/high_level/src/site_controle/app.js
similarity index 87%
rename from src/HL/site_controle/app.js
rename to src/high_level/src/site_controle/app.js
index 575b3332..dc85ed51 100644
--- a/src/HL/site_controle/app.js
+++ b/src/high_level/src/site_controle/app.js
@@ -1,4 +1,4 @@
-const API = ""; // même origine (FastAPI)
+const API = ""; // same origin (FastAPI)
const speedHistory = {
real: [],
demand: [],
@@ -15,7 +15,7 @@ function drawSpeedChart() {
ctx.clearRect(0, 0, w, h);
- // axes
+ // axes (drawing axes)
ctx.strokeStyle = "#444";
ctx.beginPath();
ctx.moveTo(40, 10);
@@ -32,7 +32,7 @@ function drawSpeedChart() {
);
const yZero = h / 2;
const scaleY = (h - 40) / (2 * maxAbs);
- // ligne y=0
+ // y=0 line
ctx.strokeStyle = "#666";
ctx.setLineDash([5, 5]);
ctx.beginPath();
@@ -56,15 +56,15 @@ function drawSpeedChart() {
ctx.stroke();
}
- drawCurve(speedHistory.demand, "#ffaa00"); // consigne
- drawCurve(speedHistory.real, "#00ff88"); // réel
+ drawCurve(speedHistory.demand, "#ffaa00"); // target
+ drawCurve(speedHistory.real, "#00ff88"); // actual
- // légende
+ // legend
ctx.fillStyle = "#ffaa00";
- ctx.fillText("Consigne", w - 100, 20);
+ ctx.fillText("Target", w - 100, 20);
ctx.fillStyle = "#00ff88";
- ctx.fillText("Réelle", w - 100, 35);
+ ctx.fillText("Actual", w - 100, 35);
ctx.font = "10px monospace";
}
@@ -83,7 +83,7 @@ function drawSteering(directionDeg) {
const centerY = h * 0.85;
const radius = h * 0.65;
- /* -------- Rapporteur (-18° à +18°) -------- */
+ /* -------- Protractor (-18° to +18°) -------- */
ctx.strokeStyle = "#444";
ctx.lineWidth = 1;
@@ -99,7 +99,7 @@ function drawSteering(directionDeg) {
);
ctx.stroke();
- /* -------- Graduations -------- */
+ /* -------- Graduations (Marks) -------- */
for (let d = -18; d <= 18; d += 6) {
const a = (d - 90) * Math.PI / 180;
@@ -123,7 +123,7 @@ function drawSteering(directionDeg) {
);
}
- /* -------- Tige (direction demandée) -------- */
+ /* -------- Rod (target direction) -------- */
const angleRad = (directionDeg - 90) * Math.PI / 180;
ctx.strokeStyle = "#00ff88";
@@ -136,7 +136,7 @@ function drawSteering(directionDeg) {
);
ctx.stroke();
- /* -------- Centre -------- */
+ /* -------- Center -------- */
ctx.fillStyle = "#00ff88";
ctx.beginPath();
ctx.arc(centerX, centerY, 5, 0, Math.PI * 2);
@@ -176,17 +176,17 @@ function updateTelemetry(t) {
document.getElementById("lipo").textContent = t.battery.lipo.toFixed(2);
document.getElementById("nimh").textContent = t.battery.nimh.toFixed(2);
document.getElementById("vitesse").textContent =
- t.car.vitesse_reelle.toFixed(2);
+ t.car.current_speed.toFixed(2);
document.getElementById("target_speed").textContent =
- t.car.vitesse_demandee.toFixed(2);
+ t.car.target_speed.toFixed(2);
document.getElementById("direction").textContent =
- t.car.direction_demandee.toFixed(2);
+ t.car.direction.toFixed(2);
document.getElementById("active_program").textContent =
- t.car.programme_controle ?? "Aucun";
- speedHistory.real.push(t.car.vitesse_reelle);
- speedHistory.demand.push(t.car.vitesse_demandee);
+ t.car.car_control ?? "None";
+ speedHistory.real.push(t.car.current_speed);
+ speedHistory.demand.push(t.car.target_speed);
if (speedHistory.real.length > speedHistory.maxPoints) {
speedHistory.real.shift();
@@ -194,7 +194,7 @@ function updateTelemetry(t) {
}
drawSpeedChart();
- drawSteering(t.car.direction_demandee);
+ drawSteering(t.car.direction);
}
@@ -249,7 +249,7 @@ function initLidar(retryDelay = 1000) {
ctx.save();
ctx.translate(canvas.width / 2, canvas.height / 2);
- /* ---------- Grille ---------- */
+ /* ---------- Grid ---------- */
const circleStepMM = 100; // 10 cm
const circleCount = 15;
@@ -263,13 +263,13 @@ function initLidar(retryDelay = 1000) {
ctx.stroke();
if (i % (circleCount / 3) === 0){
- // label distance
+ // distance label
ctx.fillStyle = "#777";
ctx.font = "10px monospace";
ctx.fillText(`${i * 10} cm`, r + 2, 0);
}
}
- // FOV du lidar (270°)
+ // Lidar FOV (270°)
ctx.strokeStyle = "#444";
ctx.lineWidth = 1;
@@ -278,7 +278,7 @@ function initLidar(retryDelay = 1000) {
const fovRadius = 1500 * scale;
ctx.beginPath();
- // ligne gauche
+ // left line
ctx.moveTo(0, 0);
ctx.lineTo(
Math.sin(fovMin) * fovRadius,
@@ -286,7 +286,7 @@ function initLidar(retryDelay = 1000) {
);
ctx.stroke();
- // ligne droite
+ // right line
ctx.beginPath();
ctx.moveTo(0, 0);
ctx.lineTo(
@@ -294,7 +294,7 @@ function initLidar(retryDelay = 1000) {
-Math.cos(fovMax) * fovRadius
);
ctx.stroke();
- /* ---------- Axes ---------- */
+ /* ---------- Axes (Axis) ---------- */
ctx.strokeStyle = "#555";
ctx.beginPath();
ctx.moveTo(-canvas.width / 2, 0);
@@ -306,7 +306,7 @@ function initLidar(retryDelay = 1000) {
ctx.lineTo(0, canvas.height / 2);
ctx.stroke();
- /* ---------- Points LIDAR ---------- */
+ /* ---------- LIDAR Points ---------- */
ctx.fillStyle = "#00ff88";
for (let i = 0; i < data.x.length; i++) {
ctx.fillRect(
@@ -319,7 +319,7 @@ function initLidar(retryDelay = 1000) {
ctx.restore();
};
}catch(e){
- console.error("Erreur dans LIDAR WS onmessage:", e);
+ console.error("Error in LIDAR WS onmessage:", e);
}
ws.onclose = () => {
@@ -339,7 +339,7 @@ function initTelemetryWS() {
updateTelemetry(data);
};
}catch(e){
- console.error("Erreur dans Telemetry WS onmessage:", e);
+ console.error("Error in Telemetry WS onmessage:", e);
}
ws.onclose = () => {
console.warn("Telemetry WS disconnected, retrying...");
@@ -372,14 +372,14 @@ async function init() {
camLink.href = camUrl;
} else {
- console.warn("Element #camera introuvable au moment d'init");
+ console.warn("Element #camera not found at initialization");
}
initLidar();
initTelemetryWS();
loadProgramsOnce();
} catch (e) {
- console.error("Erreur dans init:", e);
+ console.error("Error in init:", e);
}
}
diff --git a/src/HL/site_controle/index.html b/src/high_level/src/site_controle/index.html
similarity index 100%
rename from src/HL/site_controle/index.html
rename to src/high_level/src/site_controle/index.html
diff --git a/src/HL/site_controle/lidar.html b/src/high_level/src/site_controle/lidar.html
similarity index 92%
rename from src/HL/site_controle/lidar.html
rename to src/high_level/src/site_controle/lidar.html
index 2cfc0256..0f7eb0cc 100644
--- a/src/HL/site_controle/lidar.html
+++ b/src/high_level/src/site_controle/lidar.html
@@ -9,7 +9,7 @@
-
+