A quick post to link to a jupyter notebook demonstrating how to download a whole dataset. Here’s the code. It simply hops 7999 entries at a time, downloading all the records that fall between the two ends of each step.
import json, requests from datetime import datetime, timedelta apiurl = 'http://thingspeak.com/channels/241694' nextid = 1 result = None alldata = [] endtime = None while result != '-1': print nextid result = json.loads(requests.post(apiurl+'/feeds/entry/%d.json' % nextid).content) starttime = endtime if result == '-1': endtime = datetime.now() else: endtime = datetime.strptime(result['created_at'],'%Y-%m-%dT%H:%M:%SZ') if (nextid==1): starttime = endtime else: start = datetime.strftime(starttime,'%Y-%m-%dT%H:%M:%SZ') end = datetime.strftime(endtime-timedelta(seconds=1),'%Y-%m-%dT%H:%M:%SZ') data = json.loads(requests.post(apiurl+'/feeds.json?start=%s&end=%s' % (start,end)).content) print nextid, len(data['feeds']) alldata.extend(data['feeds']) nextid += 7999 #thought download was 8000 fields, but it's 8000 records. 8000/len(result)