begin = time.monotonic()
cur.execute("select name, count(name) from student group by name")
# with open("res.csv", "w", newline="") as f:
# writer = csv.writer(f)
# for n in cur.fetchall():
# writer.writerow(n)
print(f"耗时:{time.monotonic() - begin} 秒", )
耗时:6.037000000011176 秒
建索引后 耗时 : 0.0秒 但是 数据库由原来的 324MB 扩大到 459MB
建索引代码:
[Python] 纯文本查看复制代码
cur.execute("create index name_index on student (name)")
刚学习数据库,这个速度算快吗(Intel(R) Celeron(R) CPU G1840 @ 2.80GHz)!或者还有更快的统计方法?
测试代码:
[Python] 纯文本查看复制代码
import sqlite3
import datetime
import random
import csv
import time
con = sqlite3.connect("big_table.db")
cur = con.cursor()
# 建表
# cur.execute("""create table student(
# number integer primary key autoincrement,
# name char(5) not null,
# local char(5),
# yuwen float,
# shuxue float,
# english float)""")
#324
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
x_n = "0123456789"
local = ["三年一班","三年二班","三年三班","三年四班","三年五班"]
def name():
temp = ""
temp += x[random.randint(0,25)]
for i in range(4):
temp += x_n[random.randint(0,9)]
return temp
# 插入数据
# for n in range(10000000):
# cur.execute("insert into student(name, local, yuwen, shuxue, english) values(?,?,?,?,?)",
# (name(),local[random.randint(0,4)],random.randint(0,100),random.randint(0,100),random.randint(0,100)))
# 建索引
# cur.execute("create index name_index on student (name)")
begin = time.monotonic()
cur.execute("select name, count(name) from student group by name")
# for n in cur.fetchall():
# print(n)
# with open("res.csv", "w", newline="") as f:
# writer = csv.writer(f)
# for n in cur.fetchall():
# writer.writerow(n)
print(f"耗时:{time.monotonic() - begin} 秒", )
# 提交
con.commit()
con.close()