廖雪峰Python学习笔记day12

骑猪看日落 2022-12-26 10:10 167阅读 0赞

学习笔记day11

  1. # python study day12
  2. # itertools 模块提供迭代功能处理函数,返回值是Iterator,用循环迭代计算输出
  3. # 1、count() 无限自然数序列迭代器
  4. # import itertools
  5. # ct = itertools.count(33)
  6. # for per in ct:
  7. # print(per) # 33 34 …
  8. # 2、cycle() 固定序列无限重复
  9. # import itertools
  10. # cc = itertools.cycle('abc')
  11. # for per in cc:
  12. # print(per) # a b c a b …
  13. # 3、repeat 元素无限重复,提供第二个参数限定重复次数
  14. # import itertools
  15. # re = itertools.repeat('ac', 3)
  16. # for per in re:
  17. # print(per) # ac ac ac
  18. # 3、chain() 串联迭代对象
  19. # import itertools
  20. # for per in itertools.chain('abc', 'xyz'):
  21. # print(per) # a b c x y z
  22. # 4、groupby() 将重复元素分组, 也可添加类似map中间处理函数
  23. # import itertools
  24. # for key, group in itertools.groupby('aAbBBcc', lambda c: c.lower()):
  25. # print(key, list(group))
  26. # # a ['a', 'A']
  27. # # b ['b', 'B', 'B']
  28. # # c ['c', 'c']
  29. # ' 计算pi的值 '
  30. # # step 1: 创建一个奇数序列: 1, 3, 5, 7, 9, ...
  31. # # step 2: 取该序列的前N项: 1, 3, 5, 7, 9, ..., 2*N-1.
  32. # # step 3: 添加正负符号并用4除: 4/1, -4/3, 4/5, -4/7, 4/9, ...
  33. # # step 4: 求和:
  34. # import itertools
  35. # # from functools import reduce
  36. # cc = itertools.count(1, 2)
  37. # List = []
  38. # for c in cc:
  39. # l = len(List)
  40. # if l > 1001:
  41. # break
  42. # List.append((4 * pow(-1, l) / c))
  43. # # result = reduce(lambda x, y: x+y, List)
  44. # result = sum(List)
  45. # print(result)
  46. # with open('/path/file', 'r') as f:… 语句,
  47. # 需要实现上下文__enter__和__exit__方法才能使用
  48. # class Query(object):
  49. # def __init__(self, name):
  50. # self.name = name
  51. # def __enter__(self):
  52. # print('Begin')
  53. # return self
  54. # def __exit__(self, exc_type, exc_value, traceback):
  55. # if exc_type:
  56. # print('error')
  57. # else:
  58. # print('end')
  59. # def query(self):
  60. # print('query info about %s ' % self.name)
  61. # with Query('Alice') as q:
  62. # q.query()
  63. # # Begin
  64. # # query info about Alice
  65. # # end
  66. # contextlib标准库提供了更简便的写法 @contextmanager
  67. # from contextlib import contextmanager
  68. # class Query(object):
  69. # def __init__(self, name):
  70. # self.name = name
  71. # def query(self):
  72. # print('query info about %s ' % self.name)
  73. # @contextmanager
  74. # def create_query(name):
  75. # print('begin')
  76. # q = Query(name)
  77. # yield q # yield上半部分是__enter__,下半部分是__exit__
  78. # print('end')
  79. # from contextlib import contextmanager
  80. # @contextmanager
  81. # def tag(name):
  82. # print('<%s>' % name)
  83. # yield
  84. # print('</%s>' % name)
  85. # with tag('h1'):
  86. # print('\thello')
  87. # print('\tworld')
  88. # # <h1>
  89. # # hello
  90. # # world
  91. # # </h1>
  92. # @closing 可以使用closing()把对象变成上下文对象
  93. # from contextlib import closing
  94. # from urllib.request import urlopen
  95. # with closing(urlopen('https://www.pyhton.org')) as page:
  96. # for line in page:
  97. # print(line)
  98. # urllib 提供各种请求URL操作,可以通过请求头伪装成浏览器发出请求
  99. # GET 请求
  100. # from urllib import request
  101. # with request.urlopen('http://localhost:8888/maven-ssm/newShow') as f:
  102. # data = f.read()
  103. # print('status:', f.status, f.reason)
  104. # for k, v in f.getheaders():
  105. # print('%s %s' % (k, v))
  106. # print('data:', data.decode('utf-8'))
  107. # status: 200
  108. # Content-Type application/json;charset=utf-8
  109. # Transfer-Encoding chunked
  110. # Date Fri, 04 Dec 2020 03:50:59 GMT
  111. # Connection close
  112. # data: "{\"code\":0,\"sta……\"}"
  113. # 在Request对象添加HTTP头,模拟iPhone6发送请求
  114. # from urllib import request
  115. # req = request.Request('http://www.douban.com/')
  116. # req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0'+
  117. # ' like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 '+
  118. # 'Mobile/10A5376e Safari/8536.25')
  119. # with request.urlopen(req) as f:
  120. # print('status:', f.status, f.reason)
  121. # for k, v in f.getheaders():
  122. # print('%s %s' % (k, v))
  123. # print('data:', f.read().decode('utf-8'))
  124. # # <meta charset="UTF-8">
  125. # # <title>豆瓣(手机版)</title>
  126. # # <meta name="google-site-verification" content="ok0wCgT……
  127. # Post 请求,把参数data以bytes形式传入
  128. # from urllib import request, parse
  129. # data = parse.urlencode([
  130. # ('username', 'email'),
  131. # ('password', 'passwd')
  132. # ])
  133. # req = request.Request('https://passport.weibo.cn/sso/login')
  134. # req.add_header('Origin', 'https://passport.weibo.cn')
  135. # req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 '+
  136. # 'like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 '+
  137. # 'Mobile/10A5376e Safari/8536.25')
  138. # req.add_header('Referer', 'https://passport.weibo.cn/signin/login?'+
  139. # 'entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F')
  140. # with request.urlopen(req, data=login_data.encode('utf-8')) as f:
  141. # print('Status:', f.status, f.reason)
  142. # for k, v in f.getheaders():
  143. # print('%s: %s' % (k, v))
  144. # print('Data:', f.read().decode('utf-8'))
  145. # Handler 利用ProxyHandler 通过Proxy 访问网站
  146. # url = {'http': 'http://www.example.com:3128/'}
  147. # proxy_handler = urllib.request.ProxyHandler(url)
  148. # proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
  149. # proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
  150. # opener = urllib.request.build_opener(proxy_handler, proxy_auth_handler)
  151. # with opener.open('http://www.example.com/login.html') as f:
  152. # pass
  153. # from urllib import request
  154. # import json
  155. # def fetch_data(url):
  156. # req = request.Request(url)
  157. # with request.urlopen(req) as f:
  158. # data = f.read()
  159. # return json.loads(data.decode('utf-8'))
  160. # URL = 'http://www.httpbin.org/get'
  161. # data = fetch_data(URL)
  162. # print(data)
  163. # XML解析。DOM:将整个XML读入内存、SAX:流模式,边读边解析(推荐)
  164. # SAX通过 start_element、end_element、char_data 函数解析xml
  165. # from xml.parsers.expat import ParserCreate
  166. # class DefaultSaxHandler(object):
  167. # def start_element(self, name, attrs):
  168. # print('sax:start_element: %s, attrs: %s' % (name, str(attrs)))
  169. # def end_element(self, name):
  170. # print('sax:end_element: %s' % name)
  171. # def char_data(self, text):
  172. # print('sax:char_data: %s' % text)
  173. # xml = r'''<?xml version="1.0"?>
  174. # <ol>
  175. # <li><a href="/python">Python</a></li>
  176. # <li><a href="/ruby">Ruby</a></li>
  177. # </ol>
  178. # '''
  179. # handler = DefaultSaxHandler()
  180. # parser = ParserCreate()
  181. # parser.StartElementHandler = handler.start_element
  182. # parser.EndElementHandler = handler.end_element
  183. # parser.CharacterDataHandler = handler.char_data
  184. # parser.Parse(xml)
  185. # HTMLParser 可以将html页面中的文本、图像等解析出来
  186. # from html.parser import HTMLParser
  187. # from html.entities import name2codepoint
  188. # class MyHTMLParser(HTMLParser):
  189. # def handle_starttag(self, tag, attrs):
  190. # print('<%s>' % tag)
  191. # def handle_endtag(self, tag):
  192. # print('</%s>' % tag)
  193. # def handle_startendtag(self, tag, attrs):
  194. # print('<%s/>' % tag)
  195. # def handle_data(self, data):
  196. # print(data)
  197. # def handle_comment(self, data):
  198. # print('<!--', data, '-->')
  199. # def handle_entityref(self, name):
  200. # print('&%s;' % name)
  201. # def handle_charref(self, name):
  202. # print('&#%s;' % name)
  203. # parser = MyHTMLParser()
  204. # parser.feed('''<html>
  205. # <head></head>
  206. # <body>
  207. # <!-- test html parser -->
  208. # <p>Some <a href=\"#\">html</a> HTML tutorial...<br>END</p>
  209. # </body></html>''')

在这里插入图片描述
学习笔记day13

发表评论

表情:
评论列表 (有 0 条评论,167人围观)

还没有评论,来说两句吧...

相关阅读