creazione esperimento molle

This commit is contained in:
2026-04-30 13:53:00 +02:00
commit 8621fb1736
16 changed files with 34399 additions and 0 deletions

283
.gitignore vendored Normal file
View File

@@ -0,0 +1,283 @@
# L'inzio di questo file è specifico per ogniuno di noi
Funzioni\ matlab.txt
prova*
test*
__pycache*
## Core latex/pdflatex auxiliary files:
*.aux
*.lof
*.log
*.lot
*.fls
*.out
*.toc
*.fmt
*.fot
*.cb
*.cb2
.*.lb
## Intermediate documents:
*.dvi
*.xdv
*-converted-to.*
## Bibliography auxiliary files (bibtex/biblatex/biber):
*.bbl
*.bcf
*.blg
*-blx.aux
*-blx.bib
*.run.xml
## Build tool auxiliary files:
*.fdb_latexmk
*.synctex
*.synctex(busy)
*.synctex.gz
*.synctex.gz(busy)
*.pdfsync
## Build tool directories for auxiliary files
# latexrun
latex.out/
## Auxiliary and intermediate files from other packages:
# algorithms
*.alg
*.loa
# achemso
acs-*.bib
# amsthm
*.thm
# beamer
*.nav
*.pre
*.snm
*.vrb
# changes
*.soc
# comment
*.cut
# cprotect
*.cpt
# elsarticle (documentclass of Elsevier journals)
*.spl
# endnotes
*.ent
# fixme
*.lox
# feynmf/feynmp
*.mf
*.mp
*.t[1-9]
*.t[1-9][0-9]
*.tfm
#(r)(e)ledmac/(r)(e)ledpar
*.end
*.?end
*.[1-9]
*.[1-9][0-9]
*.[1-9][0-9][0-9]
*.[1-9]R
*.[1-9][0-9]R
*.[1-9][0-9][0-9]R
*.eledsec[1-9]
*.eledsec[1-9]R
*.eledsec[1-9][0-9]
*.eledsec[1-9][0-9]R
*.eledsec[1-9][0-9][0-9]
*.eledsec[1-9][0-9][0-9]R
# glossaries
*.acn
*.acr
*.glg
*.glo
*.gls
*.glsdefs
*.lzo
*.lzs
# uncomment this for glossaries-extra (will ignore makeindex's style files!)
# *.ist
# gnuplottex
*-gnuplottex-*
# gregoriotex
*.gaux
*.glog
*.gtex
# htlatex
*.4ct
*.4tc
*.idv
*.lg
*.trc
*.xref
# hyperref
*.brf
# knitr
*-concordance.tex
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
# *.tikz
*-tikzDictionary
# listings
*.lol
# luatexja-ruby
*.ltjruby
# makeidx
*.idx
*.ilg
*.ind
# minitoc
*.maf
*.mlf
*.mlt
*.mtc[0-9]*
*.slf[0-9]*
*.slt[0-9]*
*.stc[0-9]*
# minted
_minted*
*.pyg
# morewrites
*.mw
# newpax
*.newpax
# nomencl
*.nlg
*.nlo
*.nls
# pax
*.pax
# pdfpcnotes
*.pdfpc
# sagetex
*.sagetex.sage
*.sagetex.py
*.sagetex.scmd
# scrwfile
*.wrt
# sympy
*.sout
*.sympy
sympy-plots-for-*.tex/
# pdfcomment
*.upa
*.upb
# pythontex
*.pytxcode
pythontex-files-*/
# tcolorbox
*.listing
# thmtools
*.loe
# TikZ & PGF
*.dpth
*.md5
*.auxlock
# todonotes
*.tdo
# vhistory
*.hst
*.ver
# easy-todo
*.lod
# xcolor
*.xcp
# xmpincl
*.xmpi
# xindy
*.xdy
# xypic precompiled matrices and outlines
*.xyc
*.xyd
# endfloat
*.ttt
*.fff
# Latexian
TSWLatexianTemp*
## Editors:
# WinEdt
*.bak
*.sav
# Texpad
.texpadtmp
# LyX
*.lyx~
# Kile
*.backup
# gummi
.*.swp
# KBibTeX
*~[0-9]*
# TeXnicCenter
*.tps
# auto folder when using emacs and auctex
./auto/*
*.el
# expex forward references with \gathertags
*-tags.tex
# standalone packages
*.sta
# Makeindex log files
*.lpz
# xwatermark package
*.xwm

603
UI.ipynb Normal file

File diff suppressed because one or more lines are too long

265
calc_k.ipynb Normal file

File diff suppressed because one or more lines are too long

3695
dati/molla1leggeraRete.csv Normal file

File diff suppressed because it is too large Load Diff

4725
dati/molla1pesanteCD.csv Normal file

File diff suppressed because it is too large Load Diff

4597
dati/molla1pesanteRete.csv Normal file

File diff suppressed because it is too large Load Diff

4526
dati/molla2leggeraCD50Hz.csv Normal file

File diff suppressed because it is too large Load Diff

4704
dati/molla2leggeraRete.csv Normal file

File diff suppressed because it is too large Load Diff

4886
dati/molla2pesanteCD.csv Normal file

File diff suppressed because it is too large Load Diff

4665
dati/molla2pesanteRete.csv Normal file

File diff suppressed because it is too large Load Diff

35
dati_picchi.ipynb Normal file
View File

@@ -0,0 +1,35 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e0a45f01",
"metadata": {},
"source": [
"# Import dati sonar"
]
},
{
"cell_type": "markdown",
"id": "b5198c57",
"metadata": {},
"source": [
"# Visualizzazione"
]
},
{
"cell_type": "markdown",
"id": "b3b88e57",
"metadata": {},
"source": [
"# Selezione picchi e creazione df"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

7
k1.csv Normal file
View File

@@ -0,0 +1,7 @@
m,h,uh
67.97,277.473,0.014
87.94,269.256,0.012
107.76,260.878,0.012
127.69,252.804,0.012
147.36,244.570,0.014
167.25,236.235,0.015
1 m h uh
2 67.97 277.473 0.014
3 87.94 269.256 0.012
4 107.76 260.878 0.012
5 127.69 252.804 0.012
6 147.36 244.570 0.014
7 167.25 236.235 0.015

6
k2.csv Normal file
View File

@@ -0,0 +1,6 @@
m,h,uh
28.12,446.392,0.012
48.06,385.616,0.017
67.97,324.984,0.015
87.94,263.3196,0.0010
107.76,202.291,0.026
1 m h uh
2 28.12 446.392 0.012
3 48.06 385.616 0.017
4 67.97 324.984 0.015
5 87.94 263.3196 0.0010
6 107.76 202.291 0.026

283
osc_attr.ipynb Normal file
View File

@@ -0,0 +1,283 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "9c25e271",
"metadata": {},
"outputs": [],
"source": [
"import importlib\n",
"from statlib import *\n",
"\n",
"g = 9.806\n",
"ug = 0.004"
]
},
{
"cell_type": "markdown",
"id": "233e717f",
"metadata": {},
"source": [
"# Calcolo w0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e1efad4a",
"metadata": {},
"outputs": [],
"source": [
"# k e m varie configurazioni\n",
"\n",
"# k [N/m]\n",
"k1 = 23.631\n",
"uk1 = 0.017\n",
"\n",
"k2 = 3.2053\n",
"uk2 = 0.0013\n",
"\n",
"# m [g]\n",
"m_CD = [63.13, 83.04, 103.02, 122.83, 142.77, 162.44, 182.33]\n",
"m_rete = [58.96, 78.86, 98.85, 118.66, 138.61, 158.26, 178.16]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "81d935ac",
"metadata": {},
"outputs": [],
"source": [
"# Creazione Data\n",
"\n",
"df = pd.DataFrame()\n",
"df[\"m_CD\"] = m_CD\n",
"df[\"um_CD\"] = 0.01/np.sqrt(12)\n",
"df[\"m_rete\"] = m_rete\n",
"df[\"um_rete\"] = 0.01/np.sqrt(12)\n",
"df[\"k1\"] = k1\n",
"df[\"uk1\"] = uk1\n",
"df[\"k2\"] = k2\n",
"df[\"uk2\"] = uk2\n",
"\n",
"param_sistema = Data(df)\n",
"param_sistema.analisi_stat = df"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b2c2af7",
"metadata": {},
"outputs": [],
"source": [
"# Calcolo w0 varie cnfigurazioni\n",
"\n",
"m_CD, m_rete, k1, k2 = sp.symbols('m_CD, m_rete, k1, k2', positive=True)\n",
"\n",
"w0_1_CD = sp.sqrt(k1/(m_CD/1000))\n",
"w0_1_rete = sp.sqrt(k1/(m_rete/1000))\n",
"w0_2_CD = sp.sqrt(k2/(m_CD/1000))\n",
"w0_2_rete = sp.sqrt(k2/(m_rete/1000))\n",
"\n",
"param_sistema = param_sistema.calc_var(w0_1_CD, \"w0_1_CD\")\n",
"param_sistema = param_sistema.calc_var(w0_1_rete, \"w0_1_rete\")\n",
"param_sistema = param_sistema.calc_var(w0_2_CD, \"w0_2_CD\")\n",
"param_sistema = param_sistema.calc_var(w0_2_rete, \"w0_2_rete\")"
]
},
{
"cell_type": "markdown",
"id": "ecf23a81",
"metadata": {},
"source": [
"# Creazione classi dati\n",
"\n",
"Selezione picchi delle oscillazioni"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c040f35e",
"metadata": {},
"outputs": [],
"source": [
"config = [molla1_leggera_rete_inizio, molla1_leggera_rete_fine, molla1_leggera_rete_mid,\n",
" molla1_pesante_rete_inizio, molla1_pesante_rete_fine, molla1_pesante_rete_mid,\n",
" molla1_leggera_CD_inizio, molla1_leggera_CD_fine, molla1_leggera_CD_mid,\n",
" molla1_pesante_CD_inizio, molla1_pesante_CD_fine, molla1_pesante_CD_mid,\n",
" molla2_leggera_rete_inizio, molla2_leggera_rete_fine, molla2_leggera_rete_mid,\n",
" molla2_pesante_rete_inizio, molla2_pesante_rete_fine, molla2_pesante_rete_mid,\n",
" molla2_leggera_CD_inizio, molla2_leggera_CD_fine, molla2_leggera_CD_mid,\n",
" molla2_pesante_CD_inizio, molla2_pesante_CD_fine, molla2_pesante_CD_mid]"
]
},
{
"cell_type": "markdown",
"id": "3b522bfe",
"metadata": {},
"source": [
"# Rgressione lineare"
]
},
{
"cell_type": "markdown",
"id": "30f903b5",
"metadata": {},
"source": [
"## Attrito proporzionale a v"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "64e483e1",
"metadata": {},
"outputs": [],
"source": [
"# Calcolo simbolico x e y\n",
"\n",
"# x = n-1\n",
"n = sp.Symbol('n', positive=True)\n",
"x = n-1\n",
"\n",
"# y = ln(h1/hn)\n",
"h1, hn = sp.symbols('h1 hn', positive=True)\n",
"y = sp.ln(h1/hn)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0df03fc",
"metadata": {},
"outputs": [],
"source": [
"# Calcolo numerico x e y\n",
"\n",
"for data in config:\n",
" data = data.calc_var(x, \"x\")\n",
" data = data.calc_var(y, \"y\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "497ed583",
"metadata": {},
"outputs": [],
"source": [
"# Regressione\n",
"\n",
"for data in config:\n",
" data = data.reg_lin(stampa_param=True, plot_regressione=True, calc_residui=True,\n",
" x_label=\"\", y_label=\"\", r_label=\"\",\n",
" titolo_reg=\"\", titolo_residui=\"\",\n",
" cd_A=4, cd_B=4, scala_barre=1)"
]
},
{
"cell_type": "markdown",
"id": "453d5b53",
"metadata": {},
"source": [
"## Attrito proporzionale a v^2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cdb8d336",
"metadata": {},
"outputs": [],
"source": [
"# Calcolo simbolico x e y\n",
"\n",
"# x = n-1\n",
"n = sp.Symbol('n', positive=True)\n",
"x = n-1\n",
"\n",
"# y = 1/hn - 1/h1\n",
"h1, hn = sp.symbols('h1 hn', positive=True)\n",
"y = 1/hn - 1/h1"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "84ad3d64",
"metadata": {},
"outputs": [],
"source": [
"# Calcolo numerico x e y\n",
"\n",
"for data in config:\n",
" data = data.calc_var(x, \"x\")\n",
" data = data.calc_var(y, \"y\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8fee50b3",
"metadata": {},
"outputs": [],
"source": [
"# Regressione\n",
"\n",
"for data in config:\n",
" data = data.reg_lin(stampa_param=True, plot_regressione=True, calc_residui=True,\n",
" x_label=\"\", y_label=\"\", r_label=\"\",\n",
" titolo_reg=\"\", titolo_residui=\"\",\n",
" cd_A=4, cd_B=4, scala_barre=1)"
]
},
{
"cell_type": "markdown",
"id": "dc3ce36f",
"metadata": {},
"source": [
"# Stima errore campionamento"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "513736c3",
"metadata": {},
"outputs": [],
"source": [
"A_max =\n",
"w0_max =\n",
"fc = 50 # [Hz]\n",
"\n",
"err_picco_max = A_max * (1 - np.cos(w0_max / (2*fc)))\n",
"print(f\"errore massimo individuazione picco > {err_picco_max}\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

706
statistica.ipynb Normal file
View File

@@ -0,0 +1,706 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e1f82c70",
"metadata": {},
"source": [
"# Import librerie"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "b2b806a7",
"metadata": {},
"outputs": [],
"source": [
"# Heavy lifting\n",
"import numpy as np\n",
"import pandas as pd\n",
"from scipy import stats\n",
"\n",
"# Mostrare i dati\n",
"import ipysheet\n",
"from IPython.display import display\n",
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"\n",
"# Calcolo simbolico\n",
"import sympy as sp"
]
},
{
"cell_type": "markdown",
"id": "96b0e93d",
"metadata": {},
"source": [
"# Compatibilità"
]
},
{
"cell_type": "markdown",
"id": "e50c6d18",
"metadata": {},
"source": [
"**compat( x1, x2, u1, u2 )**\n",
"\n",
"*input:*\n",
" - x1, x2: quantità\n",
" - u1, u2: incertezze associate\n",
"\n",
"*output:*\n",
" - k: fattore di compatibilità (scarto normalizzato)\n",
" - diff: differenza x1 - x2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d07a554d",
"metadata": {},
"outputs": [],
"source": [
"def compat(x1, x2, u1, u2):\n",
" \n",
" diff = x1 - x2\n",
" k = np.abs(diff) / np.sqrt(u1**2 + u2**2)\n",
" return k, diff"
]
},
{
"cell_type": "markdown",
"id": "bc3e1539",
"metadata": {},
"source": [
"# Classe Data\n",
"\n",
"*attributi:*\n",
"\n",
"- campione: dati grezzi\n",
"- analisi_stat:\n",
" - valori medi\n",
" - incertezze strumentali, statistiche e complessive\n",
" - lunghezza campione\n",
" - outlier\n",
" - residui con incertezza\n",
"- param_reg:\n",
" - A, B\n",
" - uA, uB, covAB\n",
" - chi², P"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "2649eb4e",
"metadata": {},
"outputs": [],
"source": [
"class Data:\n",
"\n",
" def __init__(self,\n",
" campione,\n",
" analisi_stat=None, param_reg=None):\n",
" \n",
" self.campione = campione\n",
" self.analisi_stat = analisi_stat\n",
" self.param_reg = param_reg"
]
},
{
"cell_type": "markdown",
"id": "a21e942b",
"metadata": {},
"source": [
"## Analisi con criterio di Chauvenet"
]
},
{
"cell_type": "markdown",
"id": "050285f6",
"metadata": {},
"source": [
"**data.stat_chauv( prefissi, err_strumentale )**\n",
"\n",
"*input:*\n",
" - prefix: nome/lista di nomi delle variabili da analizzare\n",
" - err_strumentale: incertezza/lista di incertezze da risoluzione strumentale\n",
"\n",
"*output data.analisi_stat:* \n",
" - \\<prefix>: media del campione\n",
" - u\\<prefix>_strum: incertezza da risoluzione strumentale\n",
" - u\\<prefix>_stat: incertezza statistica (s/sqrt(N))\n",
" - u\\<prefix>: incertezza complessiva\n",
" - n\\<prexix>: lunghezza del campione\n",
" - out\\<prefix>: lista outlier"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "91931bb9",
"metadata": {},
"outputs": [],
"source": [
"# Probabilità\n",
"def p_t_student(valori, err_strumentale):\n",
"\n",
" n = len(valori)\n",
" GdL = n - 1\n",
"\n",
" media = valori.mean()\n",
" s = valori.std(ddof=1)\n",
"\n",
" s = np.sqrt(s**2 + err_strumentale**2)\n",
" return 2 * (1 - stats.t.cdf(np.abs(valori - media) / s, df=GdL))\n",
"\n",
"\n",
"# Indice del peggiore outlier o None\n",
"def trova_outlier(valori, err_strumentale):\n",
"\n",
" n = len(valori)\n",
" soglia = 1 / (2 * n)\n",
" p = p_t_student(valori, err_strumentale)\n",
" idx_min = np.argmin(p)\n",
"\n",
" if p[idx_min] < soglia:\n",
" return idx_min\n",
" \n",
" return None\n",
"\n",
"\n",
"# Rimozione outlier\n",
"def rimuovi_outlier(valori, err_strumentale):\n",
" \n",
" rimossi = []\n",
" campione = valori.copy()\n",
"\n",
" while len(campione) > 2:\n",
" idx = trova_outlier(campione, err_strumentale)\n",
"\n",
" if idx is None: # nessun outlier: stop\n",
" break\n",
"\n",
" rimossi.append(campione[idx])\n",
" campione = np.delete(campione, idx)\n",
"\n",
" return campione, rimossi"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70c37b24",
"metadata": {},
"outputs": [],
"source": [
"# Analisi statistica con criterio di Chauvenet\n",
"def stat_chauv(self, prefissi, err_strumentale):\n",
"\n",
" self.analisi_stat = pd.DataFrame()\n",
" campione = self.campione\n",
" \n",
" \n",
" if isinstance(prefissi, str):\n",
" prefissi = [prefissi]\n",
" if isinstance(err_strumentale, (int, float)):\n",
" err_strumentale = [err_strumentale] * len(prefissi)\n",
"\n",
" for prefix, err_strum in zip(prefissi, err_strumentale):\n",
" cols = [col for col in campione.columns if col.startswith(prefix)]\n",
"\n",
" for i, row in campione.iterrows():\n",
" valori = row[cols].dropna().values.astype(float)\n",
" campione, rimossi = rimuovi_outlier(valori, err_strum)\n",
"\n",
" n = len(campione)\n",
" s = campione.std(ddof=1)\n",
" s = s / np.sqrt(n)\n",
" u = np.sqrt(s**2 + err_strum**2)\n",
"\n",
" self.analisi_stat.at[i, prefix] = campione.mean()\n",
" self.analisi_stat.at[i, f\"u{prefix}_strum\"] = err_strum\n",
" self.analisi_stat.at[i, f\"u{prefix}_stat\"] = s\n",
" self.analisi_stat.at[i, f\"u{prefix}\"] = u\n",
" self.analisi_stat.at[i, f\"n{prefix}\"] = n\n",
" self.analisi_stat.at[i, f\"out{prefix}\"] = rimossi\n",
"\n",
" return self\n",
"\n",
"\n",
"# Aggiunta a classe Data\n",
"Data.stat_chauv = stat_chauv"
]
},
{
"cell_type": "markdown",
"id": "79742eb5",
"metadata": {},
"source": [
"## Calcolo variabile\n",
"Calcola una nuova variabile a partire dai valori medi e propagando le incertezze in data.analisi_stat\n",
"\n",
"**calc_var( funz, nome_risultato )**\n",
"\n",
"*input:*\n",
" - funz: funzione simbolica della variabile da calcolare\n",
" - nome_risultato: stringa con nome della variabile da calcolare\n",
"\n",
"*output data.analisi_stat:* \n",
" - \\<nome_risultato>: valore della variabile in ogni punto sperimentale\n",
" - u\\<nome_risultato>: incertezza\n",
" - %u\\<nome_risultato>_\\<var>: contributo percentuale di ogni incertezza propagata"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc54a677",
"metadata": {},
"outputs": [],
"source": [
"def calc_var(self, funz, nome_risultato):\n",
"\n",
" # Estrazione e ordinamento variabili simboliche\n",
" vars_list = sorted(list(funz.free_symbols), key=lambda s: s.name)\n",
" \n",
" # Creazione incertezze e contributi simbolici\n",
" u = {var: sp.Symbol(f'u{var.name}', positive=True) for var in vars_list}\n",
" contr = [sp.diff(funz, var)*u[var] for var in vars_list]\n",
" \n",
" # Calcolo simbolico incertezza propagata\n",
" u_prop = 0\n",
" for contributo in contr:\n",
" u_prop += contributo**2 \n",
" u_prop = sp.sqrt(u_prop)\n",
"\n",
" # Creazione lista incertezze simboliche\n",
" u_list = [u[var] for var in vars_list]\n",
" \n",
" # Funzioni numeriche\n",
" funz_fn = sp.lambdify(vars_list, funz, 'numpy')\n",
" u_prop_fn = sp.lambdify((vars_list, u_list), u_prop, 'numpy')\n",
" contr_fn = sp.lambdify((vars_list, u_list), contr, 'numpy')\n",
" \n",
" # Estrazione dati da analisi_stat\n",
" vars_num = [self.analisi_stat[var.name].values.astype(float) for var in vars_list]\n",
" u_num = [self.analisi_stat[f'u{var.name}'].values.astype(float) for var in vars_list]\n",
"\n",
" # Calcolo numerico risultati\n",
" risultato_val = funz_fn(*vars_num)\n",
" risultato_u = u_prop_fn(vars_num, u_num)\n",
" risultato_contr = (np.array(contr_fn(vars_num, u_num)) / risultato_u)**2 * 100\n",
"\n",
" # Aggiunta risultati ad analisi_stat\n",
" self.analisi_stat[nome_risultato] = risultato_val\n",
" self.analisi_stat[f\"u{nome_risultato}\"] = risultato_u\n",
" for i, var in enumerate(vars_list):\n",
" self.analisi_stat[f\"%u{nome_risultato}_{var.name}\"] = risultato_contr[i]\n",
" \n",
" return self\n",
"\n",
"# Aggiunta a classe Data\n",
"Data.calc_var = calc_var"
]
},
{
"cell_type": "markdown",
"id": "86fcfbe5",
"metadata": {},
"source": [
"## Regressione lineare\n",
"y = Ax + By"
]
},
{
"cell_type": "markdown",
"id": "8e6a2c2e",
"metadata": {},
"source": [
"**data.reg_lin( stampa_param=True, plot_regressione=True, calc_residui=True,** \n",
"&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;**x_label=\"\", y_label=\"\", r_label=\"\",** \n",
"&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;**titolo_reg=\"\", titolo_residui=\"\",** \n",
"&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&nbsp;**cd_A=4, cd_B=4, scala_barre=1 )** \n",
"\n",
"*input data.analisi_stat:*\n",
" - x, y: campioni x e y\n",
" - ux, uy: relative incertezze\n",
"\n",
"*input:*\n",
" - stampa_param: opzione stampa parametri regressione\n",
" - plot_regressione: opzione plot regressione lineare\n",
" - residui: opzione calcolo e plot residui\n",
" - x_label, y_label, r_label: etichette assi plot regressione e residui\n",
" - titolo_reg, titolo_residui: titolo plot regressione e residui\n",
" - cd_A, cd_B: cifre decimali visualizzazione parametri\n",
" - scala barre: scala ingrandimento barre di errore nella regressione\n",
"\n",
"*output param_reg:*\n",
" - A, B: parametri regressione\n",
" - uA, uB, covAB: relative incertezze e covarianze\n",
" - chi², P: chi quadro e relativa probabilità\n",
"\n",
"*output analisi_stat:*\n",
" - r, ur: residui e relativa incertezza"
]
},
{
"cell_type": "markdown",
"id": "6ccd1e25",
"metadata": {},
"source": [
"### Residui"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "7c815197",
"metadata": {},
"outputs": [],
"source": [
"# Plot residui\n",
" \n",
"def plot_residui(x, r,\n",
" ux, ur,\n",
" x_label, r_label, titolo):\n",
" \n",
" fig, ax = plt.subplots(figsize=(8, 5))\n",
"\n",
" # Residui con barre d'errore\n",
" ax.errorbar(\n",
" x, r,\n",
" xerr=ux, yerr=ur,\n",
" fmt='o', color=sns.color_palette()[0],\n",
" ecolor='gray', elinewidth=1, capsize=3,\n",
" markersize=5, label=\"Residui\"\n",
" )\n",
"\n",
" # Linea dello zero\n",
" ax.axhline(0, color='red', linestyle='--', linewidth=1)\n",
"\n",
" # Estetica\n",
" sns.despine(ax=ax)\n",
" ax.set_xlabel(x_label)\n",
" ax.set_ylabel(r_label)\n",
" ax.set_title(titolo)\n",
" ax.legend()\n",
" ax.grid(True, linestyle=':', linewidth=0.5, alpha=0.7)\n",
"\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "53ba0123",
"metadata": {},
"outputs": [],
"source": [
"# Calcolo residui\n",
"\n",
"def residui(analisi_stat,\n",
" A_num, B_num,\n",
" uA_num, uB_num, covAB_num):\n",
"\n",
" # Variabili simboliche\n",
" A, B, x, y = sp.symbols('A B x y', real=True)\n",
" uA, uB, ux, uy = sp.symbols('uA uB ux uy', positive=True)\n",
" covAB = sp.symbols('covAB', real=True)\n",
"\n",
" # Residuo: r = y - (Ax + B)\n",
" r = y - (A*x + B)\n",
"\n",
" # Propagazione errore (con covarianza A,B)\n",
" dr_dA = sp.diff(r, A)\n",
" dr_dB = sp.diff(r, B)\n",
" dr_dx = sp.diff(r, x)\n",
" dr_dy = sp.diff(r, y)\n",
"\n",
" u_r = sp.sqrt(\n",
" (dr_dA * uA)**2 +\n",
" (dr_dB * uB)**2 +\n",
" (dr_dx * ux)**2 +\n",
" (dr_dy * uy)**2 +\n",
" 2 * dr_dA * dr_dB * covAB\n",
" )\n",
"\n",
" # Funzioni numeriche\n",
" r_fn = sp.lambdify((x , y , A , B ), r, 'numpy')\n",
" u_r_fn = sp.lambdify(\n",
" (x , y , ux , uy , A , B , uA , uB , covAB ),\n",
" u_r , 'numpy'\n",
" )\n",
"\n",
" # Calcolo numerico\n",
" analisi_stat[\"r\"] = r_fn(\n",
" analisi_stat[\"x\"],\n",
" analisi_stat[\"y\"],\n",
" A_num,\n",
" B_num\n",
" )\n",
"\n",
" analisi_stat[\"ur\"] = u_r_fn(\n",
" analisi_stat[\"x\"], analisi_stat[\"y\"],\n",
" analisi_stat[\"ux\"], analisi_stat[\"uy\"],\n",
" A_num, B_num,\n",
" uA_num, uB_num, covAB_num\n",
" )\n",
"\n",
" return analisi_stat"
]
},
{
"cell_type": "markdown",
"id": "20b61f26",
"metadata": {},
"source": [
"### Regressione"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "299faa92",
"metadata": {},
"outputs": [],
"source": [
"# Plot regressione\n",
"\n",
"def plot_reg(x, y, ux, uy,\n",
" A, B, uA, uB, P,\n",
" x_label, y_label, titolo,\n",
" cd_A=4, cd_B=4, scala_barre=1):\n",
"\n",
" fig, ax = plt.subplots(figsize=(8, 5))\n",
"\n",
" x_fit = np.linspace(x.min(), x.max(), 300)\n",
" y_fit = A * x_fit + B\n",
"\n",
" ax.errorbar(\n",
" x, y,\n",
" xerr = scala_barre * ux,\n",
" yerr = scala_barre * uy,\n",
" fmt='o', color=sns.color_palette()[0],\n",
" ecolor='gray', elinewidth=1, capsize=3,\n",
" markersize=5, label=f\"Dati (barre errore x{scala_barre})\"\n",
" )\n",
" ax.plot(\n",
" x_fit, y_fit,\n",
" color='red', linewidth=1.5,\n",
" label = f\"$A={A:.{cd_A}f}\\\\pm{uA:.{cd_A}f}$\\n\"\n",
" f\"$B={B:.{cd_B}f}\\\\pm{uB:.{cd_B}f}$\\n\"\n",
" f\"$P(chi², ∞)={P:.4f}$\\n\"\n",
" )\n",
"\n",
" sns.despine(ax=ax)\n",
" ax.set_xlabel(x_label)\n",
" ax.set_ylabel(y_label)\n",
" ax.set_title(titolo)\n",
" ax.legend(fontsize=9)\n",
" ax.grid(True, linestyle=':', linewidth=0.5, alpha=0.7)\n",
"\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9d3f0378",
"metadata": {},
"outputs": [],
"source": [
"# Funzioni regressione\n",
"\n",
"def uy_equiv(x, y, ux, uy):\n",
" \n",
" # Stima iniziale di A con sola uy\n",
" sy2 = uy**2\n",
" Sw = np.sum(1 / sy2)\n",
" Sx = np.sum(x / sy2)\n",
" Sxx = np.sum(x**2 / sy2)\n",
" Sy = np.sum(y / sy2)\n",
" Sxy = np.sum(x * y / sy2)\n",
" delta = Sxx * Sw - Sx**2\n",
" A_est = (Sxy * Sw - Sx * Sy) / delta\n",
"\n",
" # Propagazione\n",
" u_eq = np.sqrt(uy**2 + A_est**2 * ux**2)\n",
" return u_eq\n",
"\n",
"\n",
"def reg_lin(self,\n",
" stampa_param=True, plot_regressione=True, calc_residui=True,\n",
" x_label=\"\", y_label=\"\", r_label=\"\",\n",
" titolo_reg=\"\", titolo_residui=\"\",\n",
" cd_A=4, cd_B=4, scala_barre=1):\n",
" \n",
" x = np.asarray(self.analisi_stat[\"x\"], dtype=float)\n",
" y = np.asarray(self.analisi_stat[\"y\"], dtype=float)\n",
" ux = np.asarray(self.analisi_stat[\"ux\"], dtype=float)\n",
" uy = np.asarray(self.analisi_stat[\"uy\"], dtype=float)\n",
"\n",
" # Propagazione errore x → y\n",
" uy_eq = uy_equiv(x, y, ux, uy)\n",
"\n",
" # Somme pesate\n",
" w = 1.0 / uy_eq**2\n",
" Sw = np.sum(w)\n",
" Sx = np.sum(w * x)\n",
" Sxx = np.sum(w * x**2)\n",
" Sy = np.sum(w * y)\n",
" Sxy = np.sum(w * x * y)\n",
" delta = Sxx * Sw - Sx**2\n",
"\n",
" # Parametri\n",
" A = (Sxy * Sw - Sx * Sy) / delta\n",
" B = (Sxx * Sy - Sxy * Sx) / delta\n",
" uA = np.sqrt(Sw / delta)\n",
" uB = np.sqrt(Sxx / delta)\n",
" covAB = -Sx / delta\n",
"\n",
" # Chi quadro\n",
" x2 = np.sum((y - A * x - B)**2 / uy_eq**2)\n",
" dof = len(x) - 2\n",
" P = stats.chi2.sf(x2, dof)\n",
"\n",
" # Raccolta parametri\n",
" self.param_reg = pd.DataFrame()\n",
" self.param_reg[\"A\"] = A\n",
" self.param_reg[\"B\"] = B\n",
" self.param_reg[\"uA\"] = uA\n",
" self.param_reg[\"uB\"] = uB\n",
" self.param_reg[\"covAB\"] = covAB\n",
" self.param_reg[\"x2\"] = x2\n",
" self.param_reg[\"P\"] = P\n",
"\n",
" # Stampa\n",
" if stampa_param == True:\n",
" print(\"Ax + B : \")\n",
" print(f\"A = {A:.{cd_A}f} ± {uA:.{cd_A}f}\")\n",
" print(f\"B = {B:.{cd_B}f} ± {uB:.{cd_B}f}\")\n",
" print(f\"covAB = {covAB:.6f}\")\n",
" print(f\"chi² = {x2:.2f}\")\n",
" print(f\"P(chi², ∞) = {P:.2f}\")\n",
" \n",
" # Plot\n",
" if plot_regressione == True:\n",
" plot_reg(x, y, ux, uy,\n",
" A, B, uA, uB, P,\n",
" x_label, y_label, titolo_reg,\n",
" cd_A, cd_B, scala_barre)\n",
" \n",
" # Residui\n",
" if calc_residui == True:\n",
" self.analisi_stat = residui(self.analisi_stat, A, B, uA, uB, covAB)\n",
" plot_residui(x, self.analisi_stat[\"r\"],\n",
" ux, self.analisi_stat[\"ur\"],\n",
" x_label, r_label, titolo_residui)\n",
"\n",
" return self\n",
"\n",
"\n",
"# Aggiunta a classe Data\n",
"Data.reg_lin = reg_lin"
]
},
{
"cell_type": "markdown",
"id": "b311d9e2",
"metadata": {},
"source": [
"# Plot gaussiane"
]
},
{
"cell_type": "markdown",
"id": "170849f1",
"metadata": {},
"source": [
"**plot_gauss( gaussiane )**\n",
"\n",
"*input gaussiane:* \n",
"[ (mi, sigma, colore, label),... ]\n",
" - mi: valore medio (misura)\n",
" - sigma: deviazione standard (incertezza)\n",
" - colore: indice colore nella palette\n",
" - label: etichetta per la didascalia"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "f515a2bb",
"metadata": {},
"outputs": [],
"source": [
"def plot_gauss(gaussiane):\n",
" \n",
" # Creazione figura\n",
" plt.figure(figsize=(12, 7))\n",
"\n",
" # Creazione asse x\n",
" xMin = float('inf')\n",
" xMax = float('-inf')\n",
"\n",
" for mu, sigma, _, _ in gaussiane:\n",
" minimoLocale = mu - 4 * sigma\n",
" massimoLocale = mu + 4 * sigma\n",
" \n",
" if minimoLocale < xMin:\n",
" xMin = minimoLocale\n",
" \n",
" if massimoLocale > xMax:\n",
" xMax = massimoLocale\n",
" \n",
" x = np.linspace(xMin, xMax, 1000)\n",
"\n",
" # Ciclo gaussiane\n",
" for mu, sigma, colore, etichetta in gaussiane:\n",
" y = stats.norm.pdf(x, mu, sigma)\n",
" plt.plot(x, y, color=sns.color_palette()[colore], linewidth=1, label=etichetta)\n",
" \n",
" puntiLinee = [mu - sigma, mu, mu + sigma]\n",
" for px in puntiLinee:\n",
" py = stats.norm.pdf(px, mu, sigma) \n",
" plt.vlines(x=px,\n",
" ymin=0, ymax=py,\n",
" colors=sns.color_palette()[colore],\n",
" linestyles='dashed', linewidth=1)\n",
" \n",
" # Dettagli estetici finali\n",
" plt.ylim(bottom=0)\n",
" plt.title('Confronto dati')\n",
" plt.xlabel('k')\n",
" plt.legend()\n",
"\n",
" # Mostriamo il grafico\n",
" plt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

413
statlib.py Normal file
View File

@@ -0,0 +1,413 @@
# Heavy lifting
import numpy as np
import pandas as pd
from scipy import stats
# Mostrare i dati
import ipysheet
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
# Calcolo simbolico
import sympy as sp
def compat(x1, x2, u1,u2):
diff = x1 - x2
k = np.abs(diff) / np.sqrt(u1**2 + u2**2)
return k, diff
class Data:
def __init__(self,
campione,
analisi_stat=None, param_reg=None):
self.campione = campione
self.analisi_stat = analisi_stat
self.param_reg = param_reg
# Probabilità
def p_t_student(valori, err_strumentale):
n = len(valori)
GdL = n - 1
media = valori.mean()
s = valori.std(ddof=1)
s = np.sqrt(s**2 + err_strumentale**2)
return 2 * (1 - stats.t.cdf(np.abs(valori - media) / s, df=GdL))
# Indice del peggiore outlier o None
def trova_outlier(valori, err_strumentale):
n = len(valori)
soglia = 1 / (2 * n)
p = p_t_student(valori, err_strumentale)
idx_min = np.argmin(p)
if p[idx_min] < soglia:
return idx_min
return None
# Rimozione outlier
def rimuovi_outlier(valori, err_strumentale):
rimossi = []
campione = valori.copy()
while len(campione) > 2:
idx = trova_outlier(campione, err_strumentale)
if idx is None: # nessun outlier: stop
break
rimossi.append(campione[idx])
campione = np.delete(campione, idx)
return campione, rimossi
# Analisi statistica con criterio di Chauvenet
def stat_chauv(self, prefissi, err_strumentale):
self.analisi_stat = pd.DataFrame()
campione = self.campione
if isinstance(prefissi, str):
prefissi = [prefissi]
if isinstance(err_strumentale, (int, float)):
err_strumentale = [err_strumentale] * len(prefissi)
for prefix, err_strum in zip(prefissi, err_strumentale):
cols = [col for col in campione.columns if col.startswith(prefix)]
for i, row in campione.iterrows():
valori = row[cols].dropna().values.astype(float)
campione, rimossi = rimuovi_outlier(valori, err_strum)
n = len(campione)
s = campione.std(ddof=1)
s = s / np.sqrt(n)
u = np.sqrt(s**2 + err_strum**2)
self.analisi_stat.at[i, prefix] = campione.mean()
self.analisi_stat.at[i, f"u{prefix}_strum"] = err_strum
self.analisi_stat.at[i, f"u{prefix}_stat"] = s
self.analisi_stat.at[i, f"u{prefix}"] = u
self.analisi_stat.at[i, f"n{prefix}"] = n
self.analisi_stat.at[i, f"out{prefix}"] = rimossi
return self
# Aggiunta a classe Data
Data.stat_chauv = stat_chauv
def calc_var(self, funz, nome_risultato):
# Estrazione e ordinamento variabili simboliche
vars_list = sorted(list(funz.free_symbols), key=lambda s: s.name)
# Creazione incertezze e contributi simbolici
u = {var: sp.Symbol(f'u{var.name}', positive=True) for var in vars_list}
contr = [sp.diff(funz, var)*u[var] for var in vars_list]
# Calcolo simbolico incertezza propagata
u_prop = 0
for contributo in contr:
u_prop += contributo**2
u_prop = sp.sqrt(u_prop)
# Creazione lista incertezze simboliche
u_list = [u[var] for var in vars_list]
# Funzioni numeriche
funz_fn = sp.lambdify(vars_list, funz, 'numpy')
u_prop_fn = sp.lambdify((vars_list, u_list), u_prop, 'numpy')
contr_fn = sp.lambdify((vars_list, u_list), contr, 'numpy')
# Estrazione dati da analisi_stat
vars_num = [self.analisi_stat[var.name].values.astype(float) for var in vars_list]
u_num = [self.analisi_stat[f'u{var.name}'].values.astype(float) for var in vars_list]
# Calcolo numerico risultati
risultato_val = funz_fn(*vars_num)
risultato_u = u_prop_fn(vars_num, u_num)
risultato_contr = (np.array(contr_fn(vars_num, u_num)) / risultato_u)**2 * 100
# Aggiunta risultati ad analisi_stat
self.analisi_stat[nome_risultato] = risultato_val
self.analisi_stat[f"u{nome_risultato}"] = risultato_u
for i, var in enumerate(vars_list):
self.analisi_stat[f"%u{nome_risultato}_{var.name}"] = risultato_contr[i]
return self
# Aggiunta a classe Data
Data.calc_var = calc_var
# Plot residui
def plot_residui(x, r,
ux, ur,
x_label, r_label, titolo):
fig, ax = plt.subplots(figsize=(8, 5))
# Residui con barre d'errore
ax.errorbar(
x, r,
xerr=ux, yerr=ur,
fmt='o', color=sns.color_palette()[0],
ecolor='gray', elinewidth=1, capsize=3,
markersize=5, label="Residui"
)
# Linea dello zero
ax.axhline(0, color='red', linestyle='--', linewidth=1)
# Estetica
sns.despine(ax=ax)
ax.set_xlabel(x_label)
ax.set_ylabel(r_label)
ax.set_title(titolo)
ax.legend()
ax.grid(True, linestyle=':', linewidth=0.5, alpha=0.7)
plt.tight_layout()
plt.show()
# Calcolo residui
def residui(analisi_stat,
A_num, B_num,
uA_num, uB_num, covAB_num):
# Variabili simboliche
A, B, x, y = sp.symbols('A B x y', real=True)
uA, uB, ux, uy = sp.symbols('uA uB ux uy', positive=True)
covAB = sp.symbols('covAB', real=True)
# Residuo: r = y - (Ax + B)
r = y - (A*x + B)
# Propagazione errore (con covarianza A,B)
dr_dA = sp.diff(r, A)
dr_dB = sp.diff(r, B)
dr_dx = sp.diff(r, x)
dr_dy = sp.diff(r, y)
u_r = sp.sqrt(
(dr_dA * uA)**2 +
(dr_dB * uB)**2 +
(dr_dx * ux)**2 +
(dr_dy * uy)**2 +
2 * dr_dA * dr_dB * covAB
)
# Funzioni numeriche
r_fn = sp.lambdify((x , y , A , B ), r, 'numpy')
u_r_fn = sp.lambdify(
(x , y , ux , uy , A , B , uA , uB , covAB ),
u_r , 'numpy'
)
# Calcolo numerico
analisi_stat["r"] = r_fn(
analisi_stat["x"],
analisi_stat["y"],
A_num,
B_num
)
analisi_stat["ur"] = u_r_fn(
analisi_stat["x"], analisi_stat["y"],
analisi_stat["ux"], analisi_stat["uy"],
A_num, B_num,
uA_num, uB_num, covAB_num
)
return analisi_stat
# Plot regressione
def plot_reg(x, y, ux, uy,
A, B, uA, uB, P,
x_label, y_label, titolo,
cd_A=4, cd_B=4, scala_barre=1):
fig, ax = plt.subplots(figsize=(8, 5))
x_fit = np.linspace(x.min(), x.max(), 300)
y_fit = A * x_fit + B
ax.errorbar(
x, y,
xerr = scala_barre * ux,
yerr = scala_barre * uy,
fmt='o', color=sns.color_palette()[0],
ecolor='gray', elinewidth=1, capsize=3,
markersize=5, label=f"Dati (barre errore x{scala_barre})"
)
ax.plot(
x_fit, y_fit,
color='red', linewidth=1.5,
label = f"$A={A:.{cd_A}f}\\pm{uA:.{cd_A}f}$\n"
f"$B={B:.{cd_B}f}\\pm{uB:.{cd_B}f}$\n"
f"$P(chi², ∞)={P:.4f}$\n"
)
sns.despine(ax=ax)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_title(titolo)
ax.legend(fontsize=9)
ax.grid(True, linestyle=':', linewidth=0.5, alpha=0.7)
plt.tight_layout()
plt.show()
# Funzioni regressione
def uy_equiv(x, y, ux, uy):
# Stima iniziale di A con sola uy
sy2 = uy**2
Sw = np.sum(1 / sy2)
Sx = np.sum(x / sy2)
Sxx = np.sum(x**2 / sy2)
Sy = np.sum(y / sy2)
Sxy = np.sum(x * y / sy2)
delta = Sxx * Sw - Sx**2
A_est = (Sxy * Sw - Sx * Sy) / delta
# Propagazione
u_eq = np.sqrt(uy**2 + A_est**2 * ux**2)
return u_eq
def reg_lin(self,
stampa_param=True, plot_regressione=True, calc_residui=True,
x_label="", y_label="", r_label="",
titolo_reg="", titolo_residui="",
cd_A=4, cd_B=4, scala_barre=1):
x = np.asarray(self.analisi_stat["x"], dtype=float)
y = np.asarray(self.analisi_stat["y"], dtype=float)
ux = np.asarray(self.analisi_stat["ux"], dtype=float)
uy = np.asarray(self.analisi_stat["uy"], dtype=float)
# Propagazione errore x → y
uy_eq = uy_equiv(x, y, ux, uy)
# Somme pesate
w = 1.0 / uy_eq**2
Sw = np.sum(w)
Sx = np.sum(w * x)
Sxx = np.sum(w * x**2)
Sy = np.sum(w * y)
Sxy = np.sum(w * x * y)
delta = Sxx * Sw - Sx**2
# Parametri
A = (Sxy * Sw - Sx * Sy) / delta
B = (Sxx * Sy - Sxy * Sx) / delta
uA = np.sqrt(Sw / delta)
uB = np.sqrt(Sxx / delta)
covAB = -Sx / delta
# Chi quadro
x2 = np.sum((y - A * x - B)**2 / uy_eq**2)
dof = len(x) - 2
P = stats.chi2.sf(x2, dof)
# Raccolta parametri
self.param_reg = pd.DataFrame()
self.param_reg["A"] = A
self.param_reg["B"] = B
self.param_reg["uA"] = uA
self.param_reg["uB"] = uB
self.param_reg["covAB"] = covAB
self.param_reg["x2"] = x2
self.param_reg["P"] = P
# Stampa
if stampa_param == True:
print("Ax + B : ")
print(f"A = {A:.{cd_A}f} ± {uA:.{cd_A}f}")
print(f"B = {B:.{cd_B}f} ± {uB:.{cd_B}f}")
print(f"covAB = {covAB:.6f}")
print(f"chi² = {x2:.2f}")
print(f"P(chi², ∞) = {P:.2f}")
# Plot
if plot_regressione == True:
plot_reg(x, y, ux, uy,
A, B, uA, uB, P,
x_label, y_label, titolo_reg,
cd_A, cd_B, scala_barre)
# Residui
if calc_residui == True:
self.analisi_stat = residui(self.analisi_stat, A, B, uA, uB, covAB)
plot_residui(x, self.analisi_stat["r"],
ux, self.analisi_stat["ur"],
x_label, r_label, titolo_residui)
return self
# Aggiunta a classe Data
Data.reg_lin = reg_lin
def plot_gauss(gaussiane):
# Creazione figura
plt.figure(figsize=(12, 7))
# Creazione asse x
xMin = float('inf')
xMax = float('-inf')
for mu, sigma, _, _ in gaussiane:
minimoLocale = mu - 4 * sigma
massimoLocale = mu + 4 * sigma
if minimoLocale < xMin:
xMin = minimoLocale
if massimoLocale > xMax:
xMax = massimoLocale
x = np.linspace(xMin, xMax, 1000)
# Ciclo gaussiane
for mu, sigma, colore, etichetta in gaussiane:
y = stats.norm.pdf(x, mu, sigma)
plt.plot(x, y, color=sns.color_palette()[colore], linewidth=1, label=etichetta)
puntiLinee = [mu - sigma, mu, mu + sigma]
for px in puntiLinee:
py = stats.norm.pdf(px, mu, sigma)
plt.vlines(x=px,
ymin=0, ymax=py,
colors=sns.color_palette()[colore],
linestyles='dashed', linewidth=1)
# Dettagli estetici finali
plt.ylim(bottom=0)
plt.title('Confronto dati')
plt.xlabel('k')
plt.legend()
# Mostriamo il grafico
plt.show()