黑马程序员技术交流社区
标题: Python学习笔记 [打印本页]
作者: 专注的一批 时间: 2019-12-11 15:45
标题: Python学习笔记
# windows
python -m http.server [<portNo>]
# linux
python -m SimpleHTTPServer [<portNo>]
def fun(a,*args,**kwargs):
print("a = "+str(a))
for i in args:
print('===================')
print(i)
print('===================')
for key,val in kwargs.items():
print(["key : "+str(key)," val : "+str(val)])
if __name__=="__main__":
fun(1,'a','b','c',*('t',2,3),**{'c':1,'b':2},s=5,u=6)
# output
a = 1
a
b
c
t
2
3
['key : c', ' val : 1']
['key : b', ' val : 2']
['key : s', ' val : 5']
['key : u', ' val : 6']
# pip install 2to3
2to3 -w example.py
# pip install autopep8
autopep8.exe --in-place --aggressive --aggressive test.py
import asyncio
import time
import concurrent.futures as cf
import requests
from bs4 import BeautifulSoup
def get_title(i):
url = 'https://movie.douban.com/top250?start={}&filter='.format(i*25)
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1.6) ",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us",
"Connection": "keep-alive",
"Accept-Charset": "GB2312,utf-8;q=0.7,*;q=0.7"}
r = requests.get(url,headers=headers)
soup = BeautifulSoup(r.content)
lis = soup.find('ol', class_='grid_view').find_all('li')
for li in lis:
title = li.find("span",class_="title").text
print(title)
async def title():
with cf.ThreadPoolExecutor(max_workers = 10) as excutor:
loop = asyncio.get_event_loop()
futures = (loop.run_in_executor(excutor,get_title,i) for i in range(10))
for result in await asyncio.gather(*futures):
pass
def myfunc(i):
print("start {}th".format(i))
time.sleep(1)
print("finish {}th".format(i))
async def main():
with cf.ThreadPoolExecutor(max_workers = 10) as executor:
loop = asyncio.get_event_loop()
futures=(loop.run_in_executor(executor,myfunc,i) for i in range(10))
for result in await asyncio.gather(*futures):
pass
if __name__=="__main__":
time1=time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(title())
#下面代码供测试速度对比
# for i in range(10):
# get_title(i)
print("花费了:"+str(time.time()-time1)+"s")
# Ascii85编解码
import base64
s = "Hello World!"
b = s.encode("UTF-8")
e = base64.a85encode(b)
s1 = e.decode("UTF-8")
print("ASCII85 Encoded:", s1)
b1 = s1.encode("UTF-8")
d = base64.a85decode(b1)
s2 = d.decode("UTF-8")
print(s2)
# base64编解码
import base64
s = "Hello World!"
b = s.encode("UTF-8")
e = base64.b64encode(b)
s1 = e.decode("UTF-8")
print(s1)
#base85编解码
import base64
# Creating a string
s = "Hello World!"
# Encoding the string into bytes
b = s.encode("UTF-8")
# Base85 Encode the bytes
e = base64.b85encode(b)
# Decoding the Base85 bytes to string
s1 = e.decode("UTF-8")
# Printing Base85 encoded string
print("Base85 Encoded:", s1)
# Encoding the Base85 encoded string into bytes
b1 = s1.encode("UTF-8")
# Decoding the Base85 bytes
d = base64.b85decode(b1)
# Decoding the bytes to string
s2 = d.decode("UTF-8")
print(s2)
import configparser
config = configparser.ConfigParser()
# config['settings']={'email':"2561908792@qq.com",'phone':'15827993562'}
# with open('config.txt','w') as configfile:
# config.write(configfile)
if __name__=="__main__":
config.read("config.txt")
for key,val in config['settings'].items():
print("key : "+key+" val : "+val)
# for key, val in config['host'].items():
# print("key : " + key + " val : " + val)
创建空双端队列:
dl = deque() # deque([]) creating empty deque
使用一些元素创建deque:
dl = deque([1, 2, 3, 4]) # deque([1, 2, 3, 4])
向deque添加元素:
dl.append(5) # deque([1, 2, 3, 4, 5])
在deque中添加元素左侧:
dl.appendleft(0) # deque([0, 1, 2, 3, 4, 5])
向deque添加元素列表:
dl.extend([6, 7]) # deque([0, 1, 2, 3, 4, 5, 6, 7])
从左侧添加元素列表:
dl.extendleft([-2, -1]) # deque([-1, -2, 0, 1, 2, 3, 4, 5, 6, 7])
使用.pop()元素自然会从右侧删除一个项目:
dl.pop() # 7 => deque([-1, -2, 0, 1, 2, 3, 4, 5, 6])
使用.popleft()元素从左侧删除项目:
dl.popleft() # -1 deque([-2, 0, 1, 2, 3, 4, 5, 6])
按值删除元素:
dl.remove(1) # deque([-2, 0, 2, 3, 4, 5, 6])
反转deque中元素的顺序:
dl.reverse() # deque([6, 5, 4, 3, 2, 0, -2])
>>> import dis
>>> def hello():
... print "Hello, World"
...
>>> dis.dis(hello)
2 0 LOAD_CONST 1 ('Hello, World')
3 PRINT_ITEM
4 PRINT_NEWLINE
5 LOAD_CONST 0 (None)
8 RETURN_VALUE
# 生成器表达式
a=(x*2 for x in range(10)) #<generator object <genexpr> at 0x000001A3ACC7CF48>
next(a)
print([i for i in a])
b=[x*2 for x in range(10)] #list
# 生成器
def fib(n):
prev,curr = 0,1
function(){ //外汇返佣 http://www.fx61.com/
while n>0:
n-=1
yield curr
prev,curr=curr,curr+prev
print(i for i in fib(10))
# lambda函数
s=lambda x:x*x
s(2)
# 格式化输出
a="this {} a new {}".format("is","start")
b="this %s a new %s"%("is","start")
import time
import requests
from bs4 import BeautifulSoup
def timer(info):
def decorator(func):
def wrapper(*args,**kwargs):
if info=="m":
start=time.time()
func(*args,**kwargs)
print((time.time()-start)/60)
if info=="s":
start=time.time()
func(*args,**kwargs)
print((time.time()-start))
return wrapper
return decorator
@timer('s')
def get_title(s):
for i in range(s):
url = 'https://movie.douban.com/top250?start={}&filter='.format(i*25)
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1.6) ",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us",
"Connection": "keep-alive",
"Accept-Charset": "GB2312,utf-8;q=0.7,*;q=0.7"}
r = requests.get(url,headers=headers)
soup = BeautifulSoup(r.content)
lis = soup.find('ol', class_='grid_view').find_all('li')
for li in lis:
title = li.find("span",class_="title").text
print(title)
class Timer:
def __init__(self,func):
self._func=func
def __call__(self, *args, **kwargs):
start=time.time()
result = self._func(*args,**kwargs)
end = time.time()
print("time : "+str(end-start))
return result
@Timer
def get_title1(s):
for i in range(s):
url = 'https://movie.douban.com/top250?start={}&filter='.format(i*25)
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1.6) ",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us",
"Connection": "keep-alive",
"Accept-Charset": "GB2312,utf-8;q=0.7,*;q=0.7"}
r = requests.get(url,headers=headers)
soup = BeautifulSoup(r.content)
lis = soup.find('ol', class_='grid_view').find_all('li')
for li in lis:
title = li.find("span",class_="title").text
print(title)
if __name__=="__main__":
get_title1(10)
欢迎光临 黑马程序员技术交流社区 (http://bbs.itheima.com/) |
黑马程序员IT技术论坛 X3.2 |