root 6 years ago
parent 65cc36ec20
commit acfd1d4075

@ -8,6 +8,7 @@
## 选项
```md
echo [SHORT-OPTION] ... [STRING]
echo LONG-OPTION
-n 不换行输出内容
@ -28,6 +29,7 @@ echo LONG-OPTION
* \0NNN 八进制值NNN(1-3位)的字节
* \xHH 十六进制值HH(1-2位)的字节
* -E 取消-e效果默认就是这个
```
## 实例
@ -36,6 +38,10 @@ echo "I live in `locale territory`" # 从locale数据库中展开信息
echo "$((0x2dec))" # 十六进制转十进制
for i in {0..255}; do echo -e "\e[38;05;${i}m${i}"; done | column -c 80 -s ' '; echo -e "\e[m" # 输出256中全部色彩
# md5加密字符串
echo -n xxx | md5sum # 方法一
echo -n xxx | openssl md5 # 方法二
# 获取8位随机字符串
echo $RANDOM | md5sum | cut -c 1-8 # 方法一
openssl rand -base64 4 # 方法二
@ -63,7 +69,7 @@ echo -e "\v\v\v" # 用-e选项echo会打印出转义字符
echo -e "\042" # 根据"引号字符的八进制ASCII码打印出字符
echo $'\042' # 版本2开始bash允许使用$'\nnn'结构,这里'\nnn'表示一个八进制的值
echo $'\x22' # 使用$'\xnnn'结构也可以使用十六进制来转义
# 当使用像$'\x'的结构时,-e的选项是多余的
echo "NEW LINE and BEEP"
echo $'\n' # 新行
@ -89,4 +95,4 @@ echo "\"escape\" echoes as $escape" # 不可见的输出
转义符也提供了写一个多行命令的手段。一般地,每个单独的行有一个不同的命令,而在一行末尾的转义符转义新行符,命令序列则由下
一行继续
comment
```
```

@ -36,4 +36,25 @@ g++ -g hello.cpp -o hello # 对C/C++程序的调试,需要在编译前就加
gdb hello # 调试可执行文件hello
gdb --args 程序或命令 # 在gdb下启动程序
# 常用操作如下(core dump生成需要在编译程序时加入-g参数)
gdb 进程文件 core dump文件
//This will switch the disassembly listing to intel format.
(gdb) set disassembly-flavor intel
//To view the stack trace and see where the program crashed.
(gdb) bt full
//To disassemble the instructions around the address where the crash happened.
(gdb) disas 0x<addr>
查看注册值
(gdb) i r
退出
(gdb) q
```

@ -7,7 +7,7 @@
## 选项
```markdown
-g<网关> 设置路由器跃程通信网关,最丢哦可设置8个
-g<网关> 设置路由器跃程通信网关,最可设置8个
-G<指向器数目> 设置来源路由指向器其数值为4的倍数
-h 在线帮助
-i<延迟秒数> 设置时间间隔,以便传送信息及扫描通信端口

@ -0,0 +1,76 @@
from elasticsearch import Elasticsearch
import json
# 如有报错Result window is too large, from + size must be less than or equal to: [10000]
# 执行以下修改【不再使用此方式防止内存溢出使用如下的scroll api处理】
# curl -XPUT "http://192.168.2.15:9200/props_change_log/_settings" -d '{ "index" : { "max_result_window" : 1000000 } }'
# 定义数据写入的文件路径
root_path = "D:/xxx.json"
def record_docs(root_path, record):
with open(root_path, "a", encoding="utf-8") as file:
file.write(record)
file.close()
# 定义配置
host = "192.168.2.15:9200"
# index = "props_change_log"
index = "time_limited_props_log"
scroll = "1m"
size = 1000
body = {
"query": {"match_all": {}},
}
es = Elasticsearch(hosts=host)
# es.indices.refresh(index="by_petskill_log")
# 利用json.dumps处理hits数据,将返回str类型
def process_hits(hits):
for item in hits:
# print(json.dumps(item["_source"], indent=2))
# print(json.dumps(item["_source"]))
record_docs(root_path, json.dumps(item["_source"]))
# 检查index是否存在
if not es.indices.exists(index=index):
print("index" + index + "不存在")
exit()
# 数据总量大于等于1000时用scroll api搜索
data_scroll = es.search(index=index, scroll=scroll, size=size, body=body)
# 数据总量少于1000时将使用此搜索方式
data = es.search(index=index, size=size, body=body)
# 获取scroll id
scroll_id = data_scroll["_scroll_id"]
# 获取匹配文档数
# scroll_size = len(data["hits"]["hits"])
# 获取匹配文档总数
scroll_size = data["hits"]["total"]
print("匹配到文档总数为:" + str(scroll_size) + "\n")
if scroll_size > 0 and scroll_size < 1000:
process_hits(data["hits"]["hits"])
elif scroll_size >= 1000:
while scroll_size > 0:
# print("处理中ing......")
# 滚动处理之前,先处理当前匹配项
process_hits(data_scroll["hits"]["hits"])
data_scroll = es.scroll(scroll_id=scroll_id, scroll=scroll)
# 更新scroll id
scroll_id = data_scroll["_scroll_id"]
# 获取最后一次scroll的数量
scroll_size = len(data_scroll["hits"]["hits"])

@ -1,23 +1,21 @@
# 整合官方和常用示例
# https://api.mongodb.com/python/current/py-modindex.html
from pymongo import MongoClient
from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne
from bson.objectid import ObjectId
from bson.son import SON
from bson import json_util, CodecOptions
import datetime
import multiprocessing
import ssl
import urllib.parse
from pprint import pprint
import gridfs
import pymongo
from bson.code import Code
import urllib.parse
import ssl
from pymongo import errors
from pymongo import WriteConcern
import pytz
import gridfs
import multiprocessing
from bson import CodecOptions, json_util
from bson.code import Code
from bson.objectid import ObjectId
from bson.son import SON
from pymongo import (DeleteMany, InsertOne, MongoClient, ReplaceOne, UpdateOne,
WriteConcern, errors)
client = MongoClient(host="192.168.2.15", port=27017)

@ -0,0 +1,22 @@
import requests
from datetime import datetime, date, timedelta
# 20161228
def gen_dates(init_date, days):
day = timedelta(days=1)
for i in range(days):
yield init_date + day * i
# ??url???????????????????pdf??
init_date = date(2016, 12, 28)
for url_date in gen_dates(init_date, 1000):
url_date2 = datetime.strftime(url_date, '%Y%m%d')
# print(url_date2)
url1 = "http://www.xxx.com/"
url2 = "/Zabbix_3_training_day.pdf"
down_url = url1 + url_date2 + url2
request = requests.get(down_url)
if request.status_code == 200:
print(down_url)

@ -0,0 +1,37 @@
import os
import paramiko
import subprocess
from shutil import copy2
xxxpath="xxx/xxx"
# 私钥可以被paramiko使用执行转换命令ssh-keygen -p -m PEM -f ~/.ssh/copyid_rsa
key_path = 'xxx/.ssh/copyid_rsa'
# 定义上传函数,paramiko通过公钥免密连接
def upload(root_path, key_private_path):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname='服务器ip', port='端口',
username='用户', key_filename=key_private_path, allow_agent=True)
sftp = ssh.open_sftp()
for root, dirs, files in os.walk(xxxpath):
root_linux = root.replace('\\', '/')
# sftp的mkdir函数不支持创建多级目录固使用ssh连接后linux原生命令创建
remote_path = os.path.join('xxx/path', root_linux[29:])
stdin, stdout, stderr = ssh.exec_command(
''.join(['mkdir -p ' + remote_path]))
# 利用os的walk函数获取本地目录文件列表
for root, dirs, files in os.walk(xxxpath):
root_linux = root.replace('\\', '/')
remote_path = os.path.join('xxx/path/', root_linux[29:])
for file in files:
upload_file = os.path.join(root_linux, file).replace('\\', '/')
print(u'Put files...' + upload_file)
sftp.put(upload_file, os.path.join(
remote_path, file).replace('\\', '/'))
ssh.close()
sftp.close()
upload(xxxpath, key_path)

@ -0,0 +1,95 @@
# -*- coding: UTF-8 -*-
import os
import sys
import click
import tinify
"""
根据https://github.com/GcsSloop/TinyPng修改为python3版本并修改部分逻辑
shell版https://github.com/ameer1234567890/tinifier
golang版https://github.com/gwpp/tinify-go
"""
tinify.key = 'xxx' # API KEY
version = "1.5.1" # 版本
# 压缩的核心
def compress_core(inputFile, outputFile, img_width):
source = tinify.from_file(inputFile)
if img_width is not -1:
resized = source.resize(method="scale", width=img_width)
resized.to_file(outputFile)
else:
source.to_file(outputFile)
# 压缩一个文件夹下的图片
def compress_path(path, width):
print("compress_path----------")
if not os.path.isdir(path):
print("这不是一个文件夹,请输入文件夹的正确路径!")
else:
fromFilePath = path # 源路径
toFilePath = path + "/tiny" # 输出路径
print("fromFilePath=%s" % fromFilePath)
print("toFilePath=%s" % toFilePath)
for root, dirs, files in os.walk(fromFilePath):
print("root = %s" % root)
print("dirs = %s" % dirs)
print("files= %s" % files)
for name in files:
fileName, fileSuffix = os.path.splitext(name)
if fileSuffix == '.png' or fileSuffix == '.jpg' \
or fileSuffix == '.jpeg':
src_image_path = root + '/' + name
src_image_size = float(
os.path.getsize(src_image_path)) / 1024
# print('图片路径:' + src_image_path + str(src_image_size))
toFullName = str(toFilePath) + '/' + name
# 图片文件小于100k不压缩或目录已存在
if src_image_size < 100 or os.path.exists(toFilePath):
pass
else:
os.mkdir(toFilePath)
compress_core(root + '/' + name, toFullName, width)
break # 仅遍历当前目录
# 仅压缩指定文件
def compress_file(inputFile, width):
print("compress_file---------")
if not os.path.isfile(inputFile):
print("这不是一个文件,请输入文件的正确路径!")
return
print("file = %s" % inputFile)
dirname = os.path.dirname(inputFile)
basename = os.path.basename(inputFile)
fileName, fileSuffix = os.path.splitext(basename)
if fileSuffix == '.png' or fileSuffix == '.jpg' or fileSuffix == '.jpeg':
compress_core(inputFile, dirname + "/tiny_" + basename, width)
else:
print("不支持该文件类型!")
@click.command()
@click.option('-f', "--file", type=str, default=None, help="单个文件压缩")
@click.option('-d', "--dir", type=str, default=None, help="被压缩的文件夹")
@click.option('-w', "--width", type=int, default=-1, help="图片宽度,默认不变")
def run(file, dir, width):
print("GcsSloop TinyPng V%s" % (version))
if file is not None:
compress_file(file, width) # 仅压缩一个文件
pass
elif dir is not None:
compress_path(dir, width) # 压缩指定目录下的文件
pass
else:
compress_path(os.getcwd(), width) # 压缩当前目录下的文件
print("压缩完成!")
if __name__ == "__main__":
run()

@ -7,7 +7,7 @@ host = 'https://jisuwnl.market.alicloudapi.com'
path = '/calendar/query'
method = 'GET'
appcode = '32394ce559ff4551936f79a7ea8237f0'
querys = 'date=2019-08-28'
querys = 'date=2020-03-12'
bodys = {}
# url = host + path
url = host + path + '?' + querys
@ -26,13 +26,12 @@ print(content_dict['result'])
"""
输出格式
{'status': 0, 'msg': 'ok',
'result': {'2018-12-30': {'name': '元旦', 'content': '12月30日至1月1日放假共三天与周末连休。'},
'2019-02-04': {'name': '春节', 'content': '2月04日至2月10日放假调休共7天。2月2日周六、2月3日周日上班。'},
'2019-04-05': {'name': '清明节', 'content': '4月5日至7日放假调休共3天与周末连休。'},
'2019-05-01': {'name': '劳动节', 'content': '无调休共1天。'},
'2019-06-07': {'name': '端午节', 'content': '6月07日至09日放假共3天与周末连休。'},
'2019-09-13': {'name': '中秋节', 'content': '9月13日至15日放假共3天与周末连休。'},
{'status': 0, 'msg': 'ok',
'result': {'2018-12-30': {'name': '元旦', 'content': '12月30日至1月1日放假共三天与周末连休。'},
'2019-02-04': {'name': '春节', 'content': '2月04日至2月10日放假调休共7天。2月2日周六、2月3日周日上班。'},
'2019-04-05': {'name': '清明节', 'content': '4月5日至7日放假调休共3天与周末连休。'},
'2019-05-01': {'name': '劳动节', 'content': '无调休共1天。'},
'2019-06-07': {'name': '端午节', 'content': '6月07日至09日放假共3天与周末连休。'},
'2019-09-13': {'name': '中秋节', 'content': '9月13日至15日放假共3天与周末连休。'},
'2019-10-01': {'name': '国庆节', 'content': '10月1日至7日放假调休共7天。9月29日周日、10月12日周六上班。'}}}
"""

@ -13,7 +13,7 @@ def get_mfw_news(url):
# 定义http head伪装成curl浏览器获取IP数据
headers = {'User-Agent': "curl/10.0",
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
"Accept": "text/json"}
request = requests.get(url, headers=headers)
response = eval(request.text)
print(response)

Loading…
Cancel
Save