cesium示例有紐約的3dtiles數據,下載官方有下載連接,可是下載後爲亂碼。html
所以研究了下,寫了個爬蟲解碼下載,使用辦法,安裝Python直接運行便可,代碼以下:python
#coding=utf-8 from urllib import request import os import socket import zlib # python版本3.7 # 設置超時 socket.setdefaulttimeout(60) def mkdir(path): # 去除首位空格 path=path.strip() # 去除尾部 \ 符號 path=path.rstrip("\\") # 判斷路徑是否存在 # 存在 True # 不存在 False isExists=os.path.exists(path) # 判斷結果 if not isExists: # 若是不存在則建立目錄 # 建立目錄操做函數 os.makedirs(path) print('path create success!') return True else: # 若是目錄存在則不建立,並提示目錄已存在 print('path already exist!') return False # 定義要建立的目錄 mkpath="F:\\python\\cesiumData\\" # 調用函數 #mkdir(mkpath) def callbackfunc(blocknum, blocksize, totalsize): '''回調函數 @blocknum: 已經下載的數據塊 @blocksize: 數據塊的大小 @totalsize: 遠程文件的大小 ''' # percent = 100.0 * blocknum * blocksize / totalsize # if percent > 100: # percent = 100 print("--") #便利URL,獲取數據 def getDataByUrl(): str1="https://beta.cesium.com/api/assets/1461/" str2=".b3dm?access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJqdGkiOiJiMTBjN2E3Mi03ZGZkLTRhYmItOWEzNC1iOTdjODEzMzM5MzgiLCJpZCI6NDQsImlhdCI6MTQ4NjQ4NDM0M30.B3C7Noey3ZPXcf7_FXBEYwirct23fsUecRnS12FltN8&v=1.0" for z in range(0,8): for x in range(0,2**z): path=str(z)+"\\"+str(x) temppath=mkpath+path mkdir(temppath) for y in range(0,2**z): url=str(z) + '/' + str(x) + '/' + str(y) str3=str1+ url + str2 try: #urllib.urlretrieve(str3,url+'.b3dm') req = request.Request(str3) req.add_header('Host', 'beta.cesium.com') req.add_header('Connection', 'keep-alive') req.add_header('Origin', 'https://cesiumjs.org') req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36') req.add_header('Accept', '*/*') req.add_header('Referer', 'https://cesiumjs.org/NewYork/?view=-74.02572034279622%2C40.669539917125135%2C1364.6164107825127%2C21.27406391595569%2C-21.3627766554608%2C0.0706585523215407') req.add_header('Accept-Encoding', 'gzip, deflate, br') req.add_header('Accept-Language', 'zh-CN,zh;q=0.9') with request.urlopen(req) as f: #print('Status:', f.status, f.reason) #for k, v in f.getheaders(): #print('%s: %s' % (k, v)) f2 = open(url+'.b3dm', 'wb') data=f.read() html = zlib.decompress(data, 16+zlib.MAX_WBITS) f2.write(html) except Exception as e: print(e) getDataByUrl()