Demo entry 6354102

py

   

Submitted by anonymous on Apr 05, 2017 at 09:25
Language: Python. Code size: 3.2 kB.

# This script is used to analyze the haze dataset of North of china.
import os
import numpy as np
import calendar
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from sklearn.linear_model import LinearRegression

os.chdir('I:\hazedata') #所有的数据放在该文件夹下

#自定义读取txt文件数据,并进行简单的处理,求出月霾日数和
def readtxt(filename,idx,num_site):
    data=np.loadtxt(filename)
    year=int(filename[0:4])
    month=int(filename[4:6])
#    print year,month
    weekday,days=calendar.monthrange(year,month) #获取该月份的天数
    #对数据进行操作,挑选有霾现象的天数并进行求和
#    print days
    s=0
    for i in range(0,num_site):
        a=idx[i][0] #京津冀地区气象站序号,因为是列表
        for j in range(0,days):
            if data[a*days+j,5] !=0:
                s=s+1
    haze_num=s/num_site
#    print haze_num
    return haze_num
    
#挑选站点
file='I:\hazedata\zhandian.txt'
site=np.loadtxt(file)
idx1=list(np.argwhere((site[:,1]>36)&(site[:,1]<42))) #纬度范围在36N-42N
#idx2=list(np.argwhere((site[:,2]>113)&(site[:,2]<120)))#经度范围在113E-120E
idx=[val for val in idx1]      #取交集,获得京津冀地区站点索引,返回的是列表的列表
num_site=len(idx) #总站点数
#利用自定义函数批量处理数据
year=['1982','1983','1984','1985','1986','1987','1988','1989','1990',
        '1991','1992','1993','1994','1995','1996','1997','1998','1999',
        '2000','2001','2002','2003','2004','2005','2006','2007','2008',
        '2009','2010','2011','2012','2013','2014']
month=['01','02','12']
m=len(year)
n=len(month)
filename=map(str,np.zeros(m*n))      #创建数组,并将浮点型数组转化成字符型数组
for i in range(0,m):
    for j in range (0,n):
        filename[i*3+j]=year[i]+month[j]+'.txt'
filename.pop(m*n-1)  #文件日期从198212到201402,因此要移除几个文件名
filename.pop(0)
filename.pop(0)

haze_day=np.zeros(len(filename)) #建立与文件名个数相等的浮点型数组
for i in range(0,len(filename)):
    haze_day[i]=readtxt(filename[i],idx,num_site)


#有待完善
haze_month=np.zeros(len(filename)/3)
for i in range(0,len(filename)/3):
    haze_month[i]=np.sum(haze_day[i*3:i*3+2])
haze_month=haze_month_ave-np.average(haze_month)  #数据距平化
#detrended data analysis
model=LinearRegression()
t=np.arange(1982,2014,1)
t=np.reshape(t,(len(t),1))
haze_month=np.reshape(haze_month,(len(haze_month),1))
model.fit(t,haze_month)
trend=model.predict(t)
haze_detrended=np.array([haze_month[i]-trend[i] for i in range(0,len(haze_month))])                                                                     
np.savetxt("haze_day.txt",haze_detrended,fmt='%4.1f')  #save the txt file
print haze_month
#图像
plt.figure()
plt.plot(t,np.ones(len(t)),'k',linewidth=0.5)
plt.plot(t, haze_detrended.squeeze(), 'r*')
plt.plot(t, haze_detrended.squeeze(),'r--',linewidth=2)
plt.plot(t, haze_month,'b*')
plt.plot(t, haze_month,'b',linewidth=2)
plt.xlabel('Year',fontsize=14)
plt.ylabel("Days",fontsize=14)
plt.xlim(1982,2013)
plt.ylim(-10,12)
plt.title('Winter monthly anomalously average haze days in the North China',fontsize=10)
blue_line=mlines.Line2D([],[],linestyle='-',color='blue',markersize=2, label='Haze days(DJF)')  
red_line=mlines.Line2D([],[],linestyle='--',color='red',markersize=2, label='Detrended haze days(DJF)')  
plt.legend(handles=[blue_line,red_line],loc='upper left') 
plt.show()

This snippet took 0.01 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).