二年级下册试卷分析讲解

二年级下册试卷分析讲解

首页技巧更新时间:2024-12-19 09:10:44

import numpy as np import matplotlib.pyplot as plt from IPython.display import HTML, display from matplotlib.animation import FuncAnimation import pywt import coherence_utils as c_utils

icksize = 12 fontsize = 15

Load shot gathers from the two different velocity gather_vms

gather_vm1 = np.load('data/shot35_Vmodel1.npy') gather_vm2 = np.load('data/shot35_Vmodel2.npy')

Display shot gathers

plt.figure(figsize=(10, 8)) plt.subplot(211) plt.imshow(gather_vm1, cmap='gray', aspect='auto', vmin=-500, vmax=500) plt.title('Shot gather (Model 1)') plt.colorbar() plt.subplot(212) plt.imshow(gather_vm2, cmap='gray', aspect='auto', vmin=-500, vmax=500) plt.title('Shot gather (Model 2)') plt.colorbar()

Do a wavelet decomposition and inspect how the various levels look like for the two shot gathers

# Define the wavelet and decomposition level wavelet = 'haar' # You can choose a different wavelet, haar level = 3 # Number of decomposition levels # data to use pdata = gather_vm1 # Perform 2D wavelet decomposition coeffs = pywt.wavedec2(pdata, wavelet, level=level)

level_rows = int(2**level) color_map = "turbo" #CMAP plt.figure(figsize=(level_rows, level_rows)) plt.title("Shot gather (Model 1)") ax1 = plt.subplot(level_rows,level_rows,level_rows) ax1.axis('equal') # plt.imshow(coeffs[0], cmap=color_map) plt.imshow(coeffs[0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[0]),90), vmax=np.percentile(np.absolute(coeffs[0]),90), aspect='auto', interpolation='none') ax1.axis("off") for i in range(1,level 1): ax1 = plt.subplot(level_rows,level_rows,level_rows-1) ax1.axis('equal') # plt.imshow(coeffs[i][0], cmap=color_map) plt.imshow(coeffs[i][0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][0]),90), vmax=np.percentile(np.absolute(coeffs[i][0]),90), aspect='auto', interpolation='none') if i != level: ax1.axis('off') else: ax1.xaxis.set_visible(False) ax1 = plt.subplot(level_rows,level_rows,2*level_rows-1) ax1.axis('equal') # plt.imshow(coeffs[i][2], cmap=color_map) plt.imshow(coeffs[i][2], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][2]),90), vmax=np.percentile(np.absolute(coeffs[i][2]),90), aspect='auto', interpolation='none') if i != level: ax1.axis('off') ax1 = plt.subplot(level_rows,level_rows,2*level_rows) ax1.axis('equal') # plt.imshow(coeffs[i][1], cmap=color_map) plt.imshow(coeffs[i][1], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][1]),90), vmax=np.percentile(np.absolute(coeffs[i][1]),90), aspect='auto', interpolation='none') # extent=(0,len(pdata[0])/samples_per_sec, start_ch, start_ch nchannels)) if i != level: ax1.axis('off') else: ax1.yaxis.set_visible(False) level_rows = int(level_rows/2) plt.subplots_adjust(wspace=0.01, hspace=0.01)

# data to use pdata = gather_vm2 # Perform 2D wavelet decomposition coeffs = pywt.wavedec2(pdata, wavelet, level=level)

level_rows = int(2**level) color_map = "turbo" #CMAP plt.figure(figsize=(level_rows, level_rows)) plt.title("Shot gather (Model 2)") ax1 = plt.subplot(level_rows,level_rows,level_rows) ax1.axis('equal') # plt.imshow(coeffs[0], cmap=color_map) plt.imshow(coeffs[0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[0]),90), vmax=np.percentile(np.absolute(coeffs[0]),90), aspect='auto', interpolation='none') ax1.axis("off") for i in range(1,level 1): ax1 = plt.subplot(level_rows,level_rows,level_rows-1) ax1.axis('equal') # plt.imshow(coeffs[i][0], cmap=color_map) plt.imshow(coeffs[i][0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][0]),90), vmax=np.percentile(np.absolute(coeffs[i][0]),90), aspect='auto', interpolation='none') if i != level: ax1.axis('off') else: ax1.xaxis.set_visible(False) ax1 = plt.subplot(level_rows,level_rows,2*level_rows-1) ax1.axis('equal') # plt.imshow(coeffs[i][2], cmap=color_map) plt.imshow(coeffs[i][2], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][2]),90), vmax=np.percentile(np.absolute(coeffs[i][2]),90), aspect='auto', interpolation='none') if i != level: ax1.axis('off') ax1 = plt.subplot(level_rows,level_rows,2*level_rows) ax1.axis('equal') # plt.imshow(coeffs[i][1], cmap=color_map) plt.imshow(coeffs[i][1], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][1]),90), vmax=np.percentile(np.absolute(coeffs[i][1]),90), aspect='auto', interpolation='none') # extent=(0,len(pdata[0])/samples_per_sec, start_ch, start_ch nchannels)) if i != level: ax1.axis('off') else: ax1.yaxis.set_visible(False) level_rows = int(level_rows/2) plt.subplots_adjust(wspace=0.01, hspace=0.01)

# data to use pdata = gather_vm2 - gather_vm1 # Perform 2D wavelet decomposition coeffs = pywt.wavedec2(pdata, wavelet, level=level)

level_rows = int(2**level) color_map = "turbo" #CMAP plt.figure(figsize=(level_rows, level_rows)) plt.title("Difference in shot gathers") ax1 = plt.subplot(level_rows,level_rows,level_rows) ax1.axis('equal') # plt.imshow(coeffs[0], cmap=color_map) plt.imshow(coeffs[0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[0]),90), vmax=np.percentile(np.absolute(coeffs[0]),90), aspect='auto', interpolation='none') ax1.axis("off") for i in range(1,level 1): ax1 = plt.subplot(level_rows,level_rows,level_rows-1) ax1.axis('equal') # plt.imshow(coeffs[i][0], cmap=color_map) plt.imshow(coeffs[i][0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][0]),90), vmax=np.percentile(np.absolute(coeffs[i][0]),90), aspect='auto', interpolation='none') if i != level: ax1.axis('off') else: ax1.xaxis.set_visible(False) ax1 = plt.subplot(level_rows,level_rows,2*level_rows-1) ax1.axis('equal') # plt.imshow(coeffs[i][2], cmap=color_map) plt.imshow(coeffs[i][2], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][2]),90), vmax=np.percentile(np.absolute(coeffs[i][2]),90), aspect='auto', interpolation='none') if i != level: ax1.axis('off') ax1 = plt.subplot(level_rows,level_rows,2*level_rows) ax1.axis('equal') # plt.imshow(coeffs[i][1], cmap=color_map) plt.imshow(coeffs[i][1], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs[i][1]),90), vmax=np.percentile(np.absolute(coeffs[i][1]),90), aspect='auto', interpolation='none') # extent=(0,len(pdata[0])/samples_per_sec, start_ch, start_ch nchannels)) if i != level: ax1.axis('off') else: ax1.yaxis.set_visible(False) level_rows = int(level_rows/2) plt.subplots_adjust(wspace=0.01, hspace=0.01)

plt.imshow(pdata, aspect='auto') plt.title("Difference in shot gathers")

# Define the wavelet and decomposition level wavelet = 'bior1.1' # You can choose a different wavelet, haar, bior1.1, db3 level = 3 # Number of decomposition levels # data to use pdata1 = gather_vm1 pdata2 = gather_vm2 # Perform 2D wavelet decomposition coeffs1 = pywt.wavedec2(pdata1, wavelet, level=level) coeffs2 = pywt.wavedec2(pdata2, wavelet, level=level) print(gather_vm1.shape) coeffs1[0].shape

(500, 37)

plt.figure(figsize=(10,5)) plt.subplot(131) plt.imshow(coeffs1[0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs1[0]),90), vmax=np.percentile(np.absolute(coeffs1[0]),90), aspect='auto', interpolation='none') plt.subplot(132) plt.imshow(coeffs2[0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs2[0]),90), vmax=np.percentile(np.absolute(coeffs2[0]),90), aspect='auto', interpolation='none') plt.subplot(133) plt.imshow(coeffs1[0] - coeffs2[0], cmap=color_map, vmin=-np.percentile(np.absolute(coeffs1[0]),90), vmax=np.percentile(np.absolute(coeffs1[0]),90), aspect='auto', interpolation='none') plt.colorbar()

Investigate how well we can use coherence analysis to summarize this data

data = gather_vm1.T sample_interval = 0.001 overlap = 0 subwindow_len = 0.5 coherence_mat, frequencies = c_utils.welch_coherence(data, subwindow_len = subwindow_len, overlap = overlap, sample_interval=sample_interval)

plt.imshow(coherence_mat[10]) # coherence_mat.shape

# Create two 3D arrays (example data) num_frames = len(coherence_mat) # 30 data1 = coherence_mat[:] # Replace with your 3D data array 1 # Create a function to update the animations fig = plt.figure(figsize=(6, 6)) def update(frame): fig.clear() # Update the first animation ax = fig.add_subplot(111) ax.imshow( # data1[:, :, frame], data1[frame], cmap=plt.cm.viridis, animated=True, extent=[0, nsensors, 0, nsensors], ) # , vmin=-np.percentile(abs(data1[frame]),90), vmax=np.percentile(abs(data1[frame]),90)) # ax.imshow(data1[frame], cmap=plt.cm.viridis, animated=True, vmin=-epsilon, vmax=epsilon) ax.set_title("Frequency = " str(frequencies[frame]), size=fontsize) plt.xticks(fontsize=ticksize) plt.yticks(fontsize=ticksize) plt.tight_layout() # Create the animations fig = plt.figure(figsize=(6, 6)) ani = FuncAnimation(fig, update, frames=num_frames, repeat=False) # Display the animations side by side display(HTML(ani.to_jshtml()))

知乎学术咨询:

https://www.zhihu.com/consult/people/792359672131756032?isMe=1

担任《Mechanical System and Signal Processing》《中国电机工程学报》等期刊审稿专家,擅长领域:信号滤波/降噪,机器学习/深度学习,时间序列预分析/预测,设备故障诊断/缺陷检测/异常检测。

分割线分割线分割线

使用时间序列分析方法预测比特币价格(密集神经网络、卷积网络、LSTM网络等方法,Python,ipynb文件)

完整代码:

https://mbd.pub/o/bread/mbd-ZpmVmZls

MATLAB环境下基于多尺度集成极限学习机的健康评估预测方法(MATLAB R2018A)

完整代码:

https://mbd.pub/o/bread/mbd-Zp2YlJty

基于Savitzky-Golay滤波和Transformer优化网络的multi-step水质预测模型(Python)

完整代码:

https://mbd.pub/o/bread/mbd-ZJyZl5xr

基于Transformer和时间嵌入的外汇股价预测(Python,ipynb文件)

完整代码:

https://mbd.pub/o/bread/mbd-ZpyXmptq

基于机器学习和深度学习的时间序列分析和预测(Python)

主代码:

import os import numpy as np import pandas as pd import seaborn as sns from ARX_Model import arx import statsmodels.api as sm from AR_Model import ar_model import matplotlib.pyplot as plt from ARIMA_Model import arima_model from Plot_Models import plot_models from Least_Squares import lest_squares from Normalize_Regression import normalize_regression from Sequences_Data import sequences_data from Test_Stationary import test_stationary from Auto_Correlation import auto_correlation from Linear_Regression import linear_regression from Xgboost_Regression import xgboost_regression from keras import models, layers from Random_Forest_Regression import random_forest_regression from Tree_Decision_Regression import tree_decision_regression # ======================================== Step 1: Load Data ================================================== os.system('cls') data = sm.datasets.sunspots.load_pandas() # df = pd.read_csv('monthly_milk_production.csv'), df.info(), X = df["Value"].values data = data.data["SUNACTIVITY"] # print('Shape of data \t', data.shape) # print('Original Dataset:\n', data.head()) # print('Values:\n', data) # ================================ Step 2.1: Normalize Data (0-1) ================================================ #data, normalize_modele = normalize_regression(data, type_normalize='MinMaxScaler', display_figure='on') # Type_Normalize: 'MinMaxScaler', 'normalize' # ================================ Step 2.2: Check Stationary Time Series ======================================== #data = test_stationary(data, window=20) # ==================================== Step 3: Find the lags of AR and etc models ============================== #auto_correlation(data, nLags=10) # =========================== Step 4: Split Dataset intro Train and Test ======================================= nLags = 3 num_sample = 300 mu = 0.000001 Data_Lags = pd.DataFrame(np.zeros((len(data), nLags))) for i in range(0, nLags): Data_Lags[i] = data.shift(i 1) Data_Lags = Data_Lags[nLags:] data = data[nLags:] Data_Lags.index = np.arange(0, len(Data_Lags), 1, dtype=int) data.index = np.arange(0, len(data), 1, dtype=int) train_size = int(len(data) * 0.8) # ================================= Step 5: Autoregressive and Automated Methods =============================== sns.set(style='white') fig, axs = plt.subplots(nrows=4, ncols=1, sharey='row', figsize=(16, 10)) plot_models(data, [], [], axs, nLags, train_size, num_sample=num_sample, type_model='Actual_Data') # ------------------------------------------- Least Squares --------------------------------------------------- lest_squares(data, Data_Lags, train_size, axs, num_sample=num_sample) # -------------------------------------------- Auto-Regressive (AR) model -------------------------------------- ar_model(data, train_size, axs, n_lags=nLags, num_sample=num_sample) # ------------------------------------------------ ARX -------------------------------------------------------- arx(data, Data_Lags, train_size, axs, mu=mu, num_sample=num_sample) # ----------------------------- Auto-Regressive Integrated Moving Averages (ARIMA) ----------------------------- arima_model(data, train_size, axs, order=(5, 1, (1, 1, 1, 1)), seasonal_order=(0, 0, 2, 12), num_sample=num_sample) # ======================================= Step 5: Machine Learning Models ====================================== # ------------------------------------------- Linear Regression Model ----------------------------------------- linear_regression(data, Data_Lags, train_size, axs, num_sample=num_sample) # ------------------------------------------ RandomForestRegressor Model --------------------------------------- random_forest_regression(data, Data_Lags, train_size, axs, n_estimators=100, max_features=nLags, num_sample=num_sample) # -------------------------------------------- Decision Tree Model --------------------------------------------- tree_decision_regression(data, Data_Lags, train_size, axs, max_depth=2, num_sample=num_sample) # ---------------------------------------------- xgboost ------------------------------------------------------- xgboost_regression(data, Data_Lags, train_size, axs, n_estimators=1000, num_sample=num_sample) # ----------------------------------------------- LSTM model -------------------------------------------------- train_x, train_y = sequences_data(np.array(data[:train_size]), nLags) # Convert to a time series dimension:[samples, nLags, n_features] test_x, test_y = sequences_data(np.array(data[train_size:]), nLags) mod = models.Sequential() # Build the model # mod.add(layers.ConvLSTM2D(filters=64, kernel_size=(1, 1), activation='relu', input_shape=(None, nLags))) # ConvLSTM2D # mod.add(layers.Flatten()) mod.add(layers.LSTM(units=100, activation='tanh', input_shape=(None, nLags))) mod.add(layers.Dropout(rate=0.2)) # mod.add(layers.LSTM(units=100, activation='tanh')) # Stacked LSTM # mod.add(layers.Bidirectional(layers.LSTM(units=100, activation='tanh'), input_shape=(None, 1))) # Bidirectional LSTM: forward and backward mod.add(layers.Dense(32)) mod.add(layers.Dense(1)) # A Dense layer of 1 node is added in order to predict the label(Prediction of the next value) mod.compile(optimizer='adam', loss='mse') mod.fit(train_x, train_y, validation_data=(test_x, test_y), verbose=2, epochs=100) y_train_pred = pd.Series(mod.predict(train_x).ravel()) y_test_pred = pd.Series(mod.predict(test_x).ravel()) y_train_pred.index = np.arange(nLags, len(y_train_pred) nLags, 1, dtype=int) y_test_pred.index = np.arange(train_size nLags, len(data), 1, dtype=int) plot_models(data, y_train_pred, y_test_pred, axs, nLags, train_size, num_sample=num_sample, type_model='LSTM') # data_train = normalize.inverse_transform((np.array(data_train)).reshape(-1, 1)) mod.summary(), plt.tight_layout(), plt.subplots_adjust(wspace=0, hspace=0.2), plt.show()

完整代码:

https://mbd.pub/o/bread/mbd-ZpmWl5hx

基于机器学习的耗电量预测(Python)

Tools and Libraries

The following libraries and tools were used in this project:

Project Workflow

1. Data Loading and Visualization

2. Outlier Analysis and Removal

3. Train-Test Split

4. Time Series Cross-Validation

5. Feature Engineering

6. Model Training and Evaluation

7. Future Predictions

8. Model Saving and Loading

ResultsConclusion

This project demonstrates the application of machine learning techniques to time series forecasting in the energy sector. The use of XGBoost and careful feature engineering resulted in a model capable of predicting future energy consumption with reasonable accuracy. Further improvements could be made by exploring additional features or different model architectures.

完整代码:

https://mbd.pub/o/bread/mbd-ZpmVmp9y

基于注意力机制长短期记忆(LSTM)网络的通用电气股票价格预测(Python,Keras)

ipynb文件运行,采用的模块如下:

import numpy as np from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense, Dropout, Lambda, Dot, Activation, Concatenate, Layer import pandas as pd from matplotlib import pyplot as plt from sklearn.preprocessing import StandardScaler import seaborn as sns from datetime import datetime

完整代码:

https://mbd.pub/o/bread/mbd-ZpmUmJ9x

基于Arima模型和Transformer模型的能源消耗预测(Python)

算法运行环境为Jupyter Notebook,采用模块如下:

import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.feature_selection import f_regression, mutual_info_regression, SelectKBest from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.callbacks import ModelCheckpoint import keras.backend as K

程序部分出图如下。

完整代码:

https://mbd.pub/o/bread/mbd-ZZibm5Zs

基于连续小波变换的信号滤波方法(Python,ipynb文件)

Filtering by continuous wavelet transform

import numpy as np from waveletFunctions import wavelet, wave_signif import matplotlib.pyplot as plt sst = np.loadtxt('sst_nino3.dat') n = len(sst) dt = 0.25 time = np.arange(len(sst)) * dt 1871.0 # time epochs xlim = ([1870, 2000]) %matplotlib inline plt.figure(figsize=(12, 4)) plt.plot(time, sst) plt.xlim(xlim[:]) plt.xlabel('time (year)') plt.ylabel('NINO3 SST (°C)') plt.title('NINO3 sea surface temperature (seasonal)') plt.show()

Let us filter this signal such that we keep components with periods between 2-8 years.

Let us use again the function of the inverse wavelet transform which has been written earlier.

def icwt(W, sj, dt, dj=1/8, mother='morlet'): """ Inverse continuous wavelet transform. Parameters ---------- W : wavelet transform sj : vector of scale indices dt : sampling interval dj : discrete scale interval, default 0.125. mother : mother wavelet (default: Morlet) Result -------- iW : inverse wavelet transform """ mother = mother.upper() if mother == 'MORLET': Cd = 0.7785 psi0 = 0.751126 elif mother == 'PAUL': # Paul, m=4 Cd = 1.132 psi0 = 1.07894 elif mother == 'DOG': # Dog, m=2 Cd = 3.541 psi0 = 0.86733 else: raise Error('Mother must be one of Morlet, Paul, DOG') a, b = W.shape c = sj.size if a == c: sj = (np.ones([b, 1]) * sj).transpose() elif b == c: sj = np.ones([a, 1]) * sj else: raise Warning('Input array dimensions do not match.') iW = dj * np.sqrt(dt) / (Cd * psi0) * ( np.real(W) / np.sqrt(sj)).sum(axis=0) return iW pad = 1 # zero padding (recommended) dj = 0.125 # 8 sub-octaves (in one octave) s0 = 2 * dt # 6 month initial scale j1 = 7 / dj # 7 octaves mother = 'MORLET'

Calculate CWT and determine corrected signal power, filter for 2-8 year periods and make reconstruction of the filtered signal with inverse CWT.

wave, period, scale, coi = wavelet(sst, dt, pad, dj, s0, j1, mother) scale_ext = np.outer(scale,np.ones(n)) power = (np.abs(wave))**2 /scale_ext # power spectrum (corrected) # filtering for periods between 2-8 years zmask = np.logical_or(scale_ext<2, scale_ext>=8) # zero here power[zmask] = 0.0 wavethr = np.copy(wave) wavethr[zmask] = 0.0 # reconstruction by inverse wavelet transform x = icwt(wavethr, scale, dt, dj, 'morlet')

Plot wavelet map and filtered signal:

import matplotlib plt.figure(figsize=(10, 6)) levels = np.array([2**i for i in range(-15,5)]) with np.errstate(divide='ignore'): CS = plt.contourf(time, period, np.log2(power), len(levels)) im = plt.contourf(CS, levels=np.log2(levels)) plt.xlabel('time (year)') plt.ylabel('period (year)') plt.title('NINO3 SST Morlet wavelet filtered power spectrum') plt.xlim(xlim[:]) plt.yscale('log', base=2, subs=None) plt.ylim([np.min(period), np.max(period)]) ax = plt.gca().yaxis ax.set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.ticklabel_format(axis='y', style='plain') plt.gca().invert_yaxis() plt.show() plt.figure(figsize=(12, 4)) plt.plot(time, x) plt.xlim(xlim[:]) plt.xlabel('time (year)') plt.ylabel('SST (°C)') plt.title('NINO3 SST inverse CWT filtered (2-8 year period)') plt.show()

Filtering wheel accelerometry with CWT

aint = np.loadtxt('aint.dat') tint= aint[:,0] axi = aint[:,1] ayi = aint[:,2] n = len(tint) dt = 0.01 xlim = [32,48] plt.figure(figsize=(12, 8)) plt.plot(tint, axi, label='ax') plt.plot(tint, ayi, label='ay') plt.xlim(xlim[:]) plt.xlabel('time (s)') plt.ylabel('m/s^2') plt.title('wheel accelerations (Á. Vinkó)') plt.legend() plt.show()

pad = 1 dj = 0.125 s0 = 2*dt j1 = 10/dj mother = 'MORLET' wave, period, scale, coi = wavelet(axi,dt,pad,dj,s0,j1,mother) # power spectrum (Compo) power = (np.abs(wave)) ** 2 # filtering by thresholding pvar = np.std(power)**2 # signal variance lvl = 0.15 # threshold (relative to signal variance) thr = lvl*pvar zmask = power <=thr # set zero here powerthr = np.copy(power) powerthr[zmask] = 0.0 wavethr = np.copy(wave) wavethr[zmask] = 0.0 # reconstruction by inverse wavelet transform axr = icwt(wavethr, scale, dt, dj, 'morlet')

Plot wavelet map of the tangential acceleration component:

plt.figure(figsize=(12, 8)) levels = np.array([2**i for i in range(-15,5)]) CS = plt.contourf(tint, period, np.log2(power), len(levels)) im = plt.contourf(CS, levels=np.log2(levels)) plt.xlabel('time (s)') plt.ylabel('period (s)') plt.title('tangential acceleration, wavelet power spectrum') plt.xlim(xlim[:]) plt.yscale('log', base=2, subs=None) plt.ylim([np.min(period), np.max(period)]) ax = plt.gca().yaxis ax.set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.ticklabel_format(axis='y', style='plain') plt.gca().invert_yaxis() plt.show()

Plot filtered signal and its wavelet map

plt.figure(figsize=(12, 8)) levels = np.array([2**i for i in range(-15,5)]) with np.errstate(divide='ignore'): CS = plt.contourf(tint, period, np.log2(powerthr), len(levels)) im = plt.contourf(CS, levels=np.log2(levels)) plt.xlabel('time (s)') plt.ylabel('period (s)') plt.title('tangential acceleration, filtered power spectrum') plt.xlim(xlim[:]) plt.yscale('log', base=2, subs=None) plt.ylim([np.min(period), np.max(period)]) ax = plt.gca().yaxis ax.set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.ticklabel_format(axis='y', style='plain') plt.gca().invert_yaxis() plt.show() plt.figure(figsize=(12, 8)) plt.plot(tint, axr) plt.xlim(xlim[:]) plt.xlabel('time (s)') plt.ylabel('a_x (m/s^2)') plt.title('tangential acceleration, inverse WT filtered') plt.show()

Periodic acceleration transients at 33 s and 36 s are probably due to the slipping driven wheel. This is confirmed by zooming at these places:

plt.figure(figsize=(12, 6)) plt.subplot(121) plt.plot(tint, axr) plt.xlim([33.2,33.7]) plt.xlabel('time (s)') plt.ylabel('a_x (m/s^2)') plt.title('1st transient') plt.subplot(122) plt.plot(tint, axr) plt.xlim([35.8,36.3]) plt.xlabel('time (s)') plt.ylabel('a_x (m/s^2)') plt.title('2nd transient') plt.show()

plt.figure(figsize=(12, 8)) levels = np.array([2**i for i in range(-15,5)]) CS = plt.contourf(tint, period, np.log2(power), len(levels)) im = plt.contourf(CS, levels=np.log2(levels)) plt.xlabel('time (s)') plt.ylabel('period (s)') plt.title('tangential acceleration, wavelet power spectrum') plt.xlim(xlim[:]) plt.yscale('log', base=2, subs=None) plt.ylim([np.min(period), np.max(period)]) ax = plt.gca().yaxis ax.set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.ticklabel_format(axis='y', style='plain') plt.gca().invert_yaxis() # 95% significance level plt.contour(tint, period, sig95, [-99, 1], colors='k') plt.show()

完整代码:

https://mbd.pub/o/bread/mbd-Z5yZlJpx

基于小波分析的时间序列降噪(Python,ipynb文件)

部分代码如下:

import os import numpy as np from pywt import cwt, wavedec, swt, WaveletPacket from scipy.fft import rfft, irfft, rfftfreq import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec plt.rcParams.update({"font.size": 12, "font.weight": 'bold', "lines.linewidth": 1.5}) #Load the data pds = np.loadtxt(r"Data\pds_sim.txt")[:832, :] pds30 = np.loadtxt(r"Data\pds_sim30.txt")[:832] # Calculate number of sampling pint, sampling frequency N, deltaT = len(pds[:, 0]), (pds[1, 0]-pds[0, 0]) # Take Fourier transform of the data fft_pds, fft_pds30 = rfft(pds[:, 1]), rfft(pds30) fq = rfftfreq(N, deltaT) # Take Short Time Fourier Transform of the data # stft_fq, stft_t, stft_esr = stft(x=-1*data_esr[:, 1], fs=deltaT, window='hann', nperseg=256, noverlap=0) # _, _, stft_esr30 = stft(x=data_esr30, fs=deltaT, window='hann', nperseg=256, noverlap=0) seg = [0, 208, 416, 624] stft_pds, stft_pds30 = np.empty((len(seg), len(fft_pds)), dtype=np.complex64), np.empty((len(seg), len(fft_pds)), dtype=np.complex64) for i in range(len(seg)): tempd1, tempd2 = np.zeros((N)), np.zeros((N)) tempd1[:208], tempd2[:208] = pds[seg[i]:seg[i] 208, 1], pds30[seg[i]:seg[i] 208] stft_pds[i, :], stft_pds30[i, :] = rfft(tempd1), rfft(tempd2) # Frequency filtering # Copy the data fftbp_pds, fftbp_pds30 = np.copy(fft_pds), np.copy(fft_pds30) fftgp_pds, fftgp_pds30 = np.copy(fft_pds), np.copy(fft_pds30) # Perform low and high frequency filtering #fftbp_esr[(freq_esr < np.max(freq_esr)/2)], fftbp_esr30[(freq_esr < np.max(freq_esr)/2)] = 0, 0 fftbp_pds30[(fq > (np.max(fq))/2)] = 0 # generate a Gaussian bandpass function gaussp = np.exp(-((fq - min(fq))/(2*16))**2) np.exp(-((fq - max(fq))/(2*16))**2) # Possitive frequency #gaussn = np.exp(-((fq min(fq))/(2*0.1))**2) np.exp(-((fq max(fq))/(2*0.1))**2) # Negative frequency gaussf = gaussp # gaussn # Only have possitive frequencies hence need to filter them only fftgp_pds30 = fftgp_pds30*gaussf # Reconstruct the data using inverse Fourier transform databp_pds, databp_pds30 = irfft(fftbp_pds), irfft(fftbp_pds30) datagp_pds, datagp_pds30 = irfft(fftgp_pds), irfft(fftgp_pds30) # Take FT of the denoised data ftbp_pds30, ftgp_pds30 = rfft(databp_pds30), rfft(datagp_pds30) # Tiime domain pds Plot and save the data plt.figure(figsize=(4.5, 3), dpi=100, layout='constrained') # plt.plot(pds[:, 0], pds30[::-1], '-k', label='Noisy') plt.plot(pds[:, 0], pds30, '-r', label='Noisy') plt.plot(pds[:, 0], pds[:, 1], '-b', label='Noise-free') plt.legend(frameon=False, fontsize=10, loc='upper right') plt.xlim([(pds[0, 0]), (pds[831, 0])]) plt.xlabel('Time ($\\mu s$)', fontsize=14, fontweight='bold'), plt.ylabel('Magnitude', fontsize=14, fontweight='bold') # plt.vlines(x=pds[207, 0], ymin=min(pds[:, 1])-0.01, ymax=max(pds[:, 1]) 0.01, color='k', ls='--', lw=2) # plt.vlines(x=pds[415, 0], ymin=min(pds[:, 1])-0.01, ymax=max(pds[:, 1]) 0.01, color='k', ls='--', lw=2) # plt.vlines(x=pds[623, 0], ymin=min(pds[:, 1])-0.01, ymax=max(pds[:, 1]) 0.01, color='k', ls='--', lw=2) # plt.savefig('pds.png', dpi=400, bbox_inches='tight', pad_inches=0.02) plt.show()

# Fourier transform plot # Arrow properties arprop, alp = dict(facecolor='black', shrink=0.005, width=2.5, alpha=0.7), 0.7 plt.figure(figsize=(4.5, 3), dpi=100, layout='constrained') plt.plot(fq, abs(fftgp_pds30), '-r', label='Noisy') plt.plot(fq, abs(fftgp_pds), '-b', label='Noise-free') plt.legend(frameon=False, fontsize=10, loc='upper right') plt.xlim([fq[0], fq[len(fq)-1]]) plt.ylim([-0.1, 9]) plt.xlabel('Frequency ($MHz$)', fontsize=14, fontweight='bold') plt.ylabel('Magnitude', fontsize=14, fontweight='bold') plt.annotate('Signal \n Distortion', xy=(3.5, 6), xytext=(3.5, 8), xycoords='data', arrowprops=arprop, alpha=alp, ha='left', fontsize=10) plt.annotate('Unable to Remove all Noise', xy=(38, 3), xytext=(38, 6.0), xycoords='data', arrowprops=arprop, alpha=alp, ha='center', fontsize=10) #plt.savefig('ftpdsgp.png', dpi=400, bbox_inches='tight', pad_inches=0.02) plt.show()

# Short Time Fourier Transform plot # Arrow properties arprop, alp = dict(facecolor='black', shrink=0.005, width=2.5, alpha=0.7), 0.7 plt.figure(figsize=(4.5, 3), dpi=100, layout='constrained') plt.plot(fq, abs(stft_pds30[3, :]), '-r', label='Noisy') plt.plot(fq, abs(stft_pds[3, :]), '-b', label='Noise-free') plt.legend(frameon=False, fontsize=10, loc='upper right') plt.xlim([fq[0], fq[len(fq)-1]]) plt.ylim([-0.01, 0.35]) plt.xlabel('Frequency ($MHz$)', fontsize=14, fontweight='bold') plt.ylabel('Magnitude', fontsize=14, fontweight='bold') plt.annotate('Noise Only', xy=(5, 0.26), xytext=(5, 0.33), xycoords='data', arrowprops=arprop, alpha=alp, ha='left', fontsize=10) #plt.annotate('Noise Only', xy=(3.0, 1.5), xytext=(3.0, 2.5), xycoords='data', arrowprops=arprop, alpha=alp, ha='center', fontsize=10) #plt.savefig('stftpds4.png', dpi=400, bbox_inches='tight', pad_inches=0.02) plt.show()

# Continuous Wavelet transform of the data scale = np.arange(2, 45, 1) cwt_pds, cfq_pds = cwt(data=pds[:, 1], scales=scale, wavelet='gaus3') cwt_pds30, cfq_pds30 = cwt(data=pds30, scales=scale, wavelet='gaus3') plt.figure(figsize=(4, 3), dpi=100, layout='constrained') plt.contourf(pds[:416, 0], cfq_pds, abs(cwt_pds[:, :416])**0.5, cmap='hot') #plt.contourf(pds[:416, 0], cfq_pds30, abs(cwt_pds30[:, :416])**0.5, cmap='hot') plt.xlabel('Time ($\\mu s$)', fontsize=14, fontweight='bold') #plt.ylabel('Scale', fontsize=14, fontweight='bold') plt.ylabel('Frequency', fontsize=14, fontweight='bold') plt.show()

plt.figure(figsize=(4, 3), dpi=100, layout='constrained') #plt.contourf(pds[:416, 0], cfq_pds, abs(cwt_pds[:, :416])**0.5, cmap='hot') plt.contourf(pds[:416, 0], cfq_pds30, abs(cwt_pds30[:, :416])**0.5, cmap='hot') plt.xlabel('Time ($\\mu s$)', fontsize=14, fontweight='bold') #plt.ylabel('Scale', fontsize=14, fontweight='bold') plt.ylabel('Frequency', fontsize=14, fontweight='bold') plt.show()

# Statonary wavelet transform dl=4 cf_db6, cf30_db6 = swt(data=pds[:, 1], wavelet='db6', level=dl), swt(data=pds30, wavelet='db6', level=dl) cf_coif3, cf30_coif3 = swt(data=pds[:, 1], wavelet='coif3', level=dl), swt(data=pds30, wavelet='coif3', level=dl) cf_sym10, cf30_sym10 = swt(data=pds[:, 1], wavelet='sym10', level=dl), swt(data=pds30, wavelet='sym10', level=dl) cf_dmey, cf30_dmey = swt(data=pds[:, 1], wavelet='dmey', level=dl), swt(data=pds30, wavelet='dmey', level=dl) dt, dt30 = cf_db6, cf30_db6 fig = plt.figure(figsize=(15, 4), dpi=100, layout='constrained') gs = fig.add_gridspec(2, 4, hspace=0, wspace=0) ax = gs.subplots(sharex=False, sharey=False) for i in range(1, 5): ax[0, i-1].plot(dt30[-i][0], '-r', label='Noisy'), ax[0, i-1].set_xlim(0, 832), ax[0, i-1].legend(frameon=False, fontsize=10, loc='upper right') ax[0, i-1].plot(dt[-i][0], '-b', label='Noise-free'), ax[0, i-1].set_xlim(0, 832), ax[0, i-1].legend(frameon=False, fontsize=10, loc='upper right') ax[1, i-1].plot(dt30[-i][1], '-r', label='Noisy'), ax[1, i-1].set_xlim(0, 832), ax[1, i-1].legend(frameon=False, fontsize=10, loc='upper right') ax[1, i-1].plot(dt[-i][1], '-b', label='Noise-free'), ax[1, i-1].set_xlim(0, 832), ax[1, i-1].legend(frameon=False, fontsize=10, loc='upper right') plt.show()

# Save wavelet coefficients dt, dt30 = cf_coif3, cf30_coif3 for i in range(1, dl 1): plt.figure(figsize=(4.5, 3), dpi = 100 , layout= 'constrained' ) plt.plot(dt30[-i][1], '-r', label='Noisy') plt.plot(dt[-i][1], '-b', label='Noise-free') plt.legend(frameon=False, loc='upper right') plt.xlim([0, len(pds[:, 1])-1]) plt.xlabel('Index', fontsize=14, fontweight='bold') plt.ylabel('Magnitude', fontsize=14, fontweight='bold') plt.ylim([min(dt[-i][0]), max(dt[-i][0])]) #plt.xticks([]) #plt.yticks([]) # plt.savefig('pdsdet_{i}.png'.format(i=i), dpi=400, bbox_inches='tight', pad_inches=0.02) plt.show()

wp_db6, wp_coif3 = WaveletPacket(data=pds[:, 1], wavelet='db6', maxlevel=3), WaveletPacket(data=pds[:, 1], wavelet='coif3', maxlevel=3) wp30_db6, wp30_coif3 = WaveletPacket(data=pds30, wavelet='db6', maxlevel=3), WaveletPacket(data=pds30, wavelet='coif3', maxlevel=3) dt = wp30_coif3 fig = plt.figure(figsize=(17, 4), dpi=100, constrained_layout=True) gs = fig.add_gridspec(3, 8, hspace=0, wspace=0) ax1, ax2 = fig.add_subplot(gs[0, 0:4]), fig.add_subplot(gs[0, 4:]) ax3, ax4, ax5, ax6 = fig.add_subplot(gs[1, :2]), fig.add_subplot(gs[1, 2:4]),fig.add_subplot(gs[1, 4:6]), fig.add_subplot(gs[1, 6:]) for i in range(7, 15): exec(f'ax{i} = fig.add_subplot(gs[2, i-7])') # ax7, ax8, ax9, ax10 = fig.add_subplot(gs[2, 0]), fig.add_subplot(gs[2, 1]),fig.add_subplot(gs[2, 2]), fig.add_subplot(gs[2, 3]) # ax11, ax12, ax13, ax14 = fig.add_subplot(gs[2, 4]), fig.add_subplot(gs[2, 5]),fig.add_subplot(gs[2, 6]), fig.add_subplot(gs[2, 7]) ax1.plot(dt['a'].data, '-b'), ax2.plot(dt['d'].data, '-r') ax3.plot(dt['aa'].data, '-b'), ax4.plot(dt['ad'].data, '-r'), ax5.plot(dt['da'].data, '-b'), ax6.plot(dt['dd'].data, '-r') ax7.plot(dt['aaa'].data, '-b'), ax8.plot(dt['aad'].data, '-r'), ax9.plot(dt['ada'].data, '-b'), ax10.plot(dt['add'].data, '-r') ax11.plot(dt['daa'].data, '-b'), ax12.plot(dt['dad'].data, '-r'), ax13.plot(dt['dda'].data, '-b'), ax14.plot(dt['ddd'].data, '-r') for i in range(1, 15): exec(f'ax{i}.set_xticks([])') exec(f'ax{i}.set_yticks([])') plt.show()

完整代码:

https://mbd.pub/o/bread/mbd-Z5yZkppv,

大家还看了
也许喜欢
更多栏目

© 1998-2024 shitiku.com.cn,All Rights Reserved.