From cb1cd822a44d06c55b88ba11d0a8a19c4548e884 Mon Sep 17 00:00:00 2001 From: root Date: Sun, 29 Dec 2019 18:45:42 +0800 Subject: [PATCH] --- Linux_man_cn/find.md | 5 +- Linux_man_cn/grep.md | 4 +- Linux_man_cn/head.md | 6 +- Linux_man_cn/ls.md | 3 +- Linux_man_cn/lsof.md | 1 - Linux_man_cn/make.md | 27 +- Linux_man_cn/pr.md | 7 +- Linux_man_cn/restorecon.md | 12 +- Linux_man_cn/sed.md | 8 +- Linux_man_cn/split.md | 4 +- Linux_man_cn/stat.md | 42 +- Linux_man_cn/tar.md | 53 +- Linux_man_cn/umask.md | 3 - Linux_man_cn/wget.md | 4 +- Py3Scripts/AwesomeMath.ipynb | 1 - Py3Scripts/AwesomePython.ipynb | 1 + Py3Scripts/pymongo_example.py | 260 +++++ Py3Scripts/secret.py | 2 + Py3Scripts/tower.py | 1749 ++++++++++++++++++++++++++++++++ 19 files changed, 2070 insertions(+), 122 deletions(-) delete mode 100644 Py3Scripts/AwesomeMath.ipynb create mode 100644 Py3Scripts/AwesomePython.ipynb create mode 100644 Py3Scripts/pymongo_example.py create mode 100644 Py3Scripts/secret.py create mode 100644 Py3Scripts/tower.py diff --git a/Linux_man_cn/find.md b/Linux_man_cn/find.md index 2af4ffb..5acdfcb 100644 --- a/Linux_man_cn/find.md +++ b/Linux_man_cn/find.md @@ -131,9 +131,12 @@ find /usr/ -path "*local*" -print # 匹配文件路径或者文件,path后路 # -prune使用这一选项可以使find命令不在当前指定的目录中查找,如果同时使用-depth选项,那么-prune将被find命令忽略 find . -path "./sk" -prune -o -name "*.txt" -print # 查找当前目录或者子目录下所有.txt文件,但是跳过子目录sk -find /usr/sam \( -path /usr/sam/dir1 -o -path /usr/sam/file1 \) -prune -o -print # 排除多个目录,-path必须在-prune之前 find /usr/sam ! -path /usr/sam/dir1 # !也可以用来排除目录 +# 排除当前目录下A和B子目录,找出后缀为log的文件,-path必须在-prune之前 +find . -type f -name "*.log" -not -path "*/A*" -not -path "*/B*" +find . -type d \( -path ./A -o -path ./B \) -prune -o -print -type f -name "*.log" + # 当前目录及子目录下查找所有以.txt和.pdf结尾的文件 find . -name "*.txt" -o -name "*.pdf" -print find . \( -name "*.txt" -o -name "*.pdf" \) -print diff --git a/Linux_man_cn/grep.md b/Linux_man_cn/grep.md index 569680b..063b608 100644 --- a/Linux_man_cn/grep.md +++ b/Linux_man_cn/grep.md @@ -111,7 +111,7 @@ x\{m,\} # 重复字符x,至少m次,如:'o\{5,\}'匹配至少有5个o的行 x\{m,n\} # 重复字符x,至少m次,不多于n次,如:'o\{5,10\}'匹配5--10个o的行 \w # 匹配文字和数字字符,即[A-Za-z0-9],如:'G\w*p'匹配以G后跟零个或多个文字或数字字符,然后是p \W # \w的反置形式,匹配一个或多个非单词字符,如点号句号等 -\b # 单词锁定符,如: '\bgrep\b'只匹配grep。 +\b # 单词锁定符,如: '\bgrep\b'只匹配grep。 # POSIX字符类 @@ -196,4 +196,4 @@ grep -A 3 -i "example" demo_text seq 10 | grep "5" -B 3 # 显示匹配某个结果之前的3行,使用 -B 选项 seq 10 | grep "5" -C 3 # 显示匹配某个结果的前三行和后三行,使用 -C 选项 echo -e "a\nb\nc\na\nb\nc" | grep a -A 1 # 如果匹配结果有多个,会用“--”作为各匹配结果之间的分隔符 -``` \ No newline at end of file +``` diff --git a/Linux_man_cn/head.md b/Linux_man_cn/head.md index d6b7331..a5eb5e2 100644 --- a/Linux_man_cn/head.md +++ b/Linux_man_cn/head.md @@ -8,8 +8,8 @@ With no FILE, or when FILE is -, read standard input -c, --bytes=[-]K 打印每个文件的前K个字节;以'-'开头,打印每个文件中除最后K个字节以外的所有字节 -n, --lines=[-]K 打印前K行而不是前10行;带前缀'-'并打印每个文件中除最后K行外的所有行 --q, --quiet, --silent 不显示包含给定文件名的文件头 --v, --verbose 总是显示包含给定文件名的文件头 +-q, --quiet, --silent 不显示包含给定文件名的文件头 +-v, --verbose 总是显示包含给定文件名的文件头 K 后面可以跟乘号: b 512, kB 1000, K 1024, MB 1000*1000, M 1024*1024, @@ -21,4 +21,4 @@ GB 1000*1000*1000, G 1024*1024*1024, 对于T, P, E, Z, Y 同样适用 ```bash head -1 file # 显示file文件第一行 head -n1 /etc/issue # 查看操作系统版本,不一定每个系统都能看到 -``` \ No newline at end of file +``` diff --git a/Linux_man_cn/ls.md b/Linux_man_cn/ls.md index 3e57ba6..d831735 100644 --- a/Linux_man_cn/ls.md +++ b/Linux_man_cn/ls.md @@ -51,8 +51,7 @@ ## 实例 ```bash -ls -lrt # 列出当前目录可见文件详细信息并以时间倒序排列 -ls -hl # 列出详细信息并以可读大小显示文件大小 +ls -hlrt --time-style="+%Y-%m-%d %H:%M" # 列出当前目录可见文件详细信息并以格式化的时间倒序排列 ls -al # 列出所有文件(包括隐藏)的详细信息 ls ??R* # 列出任意两个字符开始,接着跟R,后面任何字符的文件 ls log.[0-9]* # 匹配log+任何数字+任意字符的文件 diff --git a/Linux_man_cn/lsof.md b/Linux_man_cn/lsof.md index 6846a52..6456c1d 100644 --- a/Linux_man_cn/lsof.md +++ b/Linux_man_cn/lsof.md @@ -54,7 +54,6 @@ Defaults in parentheses; comma-separated set (s) items; dash-separated ranges. names select named files or files on named file systems Anyone can list all files; /dev warnings disabled; kernel ID check disabled. - ``` ## 详解 diff --git a/Linux_man_cn/make.md b/Linux_man_cn/make.md index b68bf12..8511258 100644 --- a/Linux_man_cn/make.md +++ b/Linux_man_cn/make.md @@ -1,7 +1,4 @@ -make -=== - -GNU的工程化编译工具 +# **make** ## 说明 @@ -9,13 +6,7 @@ GNU的工程化编译工具 ## 选项 -``` -make(选项)(参数) -``` - - - -``` +```markdown -f:指定“makefile”文件 -i:忽略命令执行返回的出错信息 -s:沉默模式,在执行之前不输出相应的命令行信息 @@ -25,27 +16,15 @@ make(选项)(参数) -q:make操作将根据目标文件是否已经更新返回"0"或非"0"的状态信息 -p:输出所有宏定义和目标文件描述 -d:Debug模式,输出有关文件和检测时间的详细信息 -``` Linux下常用选项与Unix系统中稍有不同,下面是不同的部分: -``` -c dir:在读取 makefile 之前改变到指定的目录dir -I dir:当包含其他 makefile文件时,利用该选项指定搜索目录 -h:help文挡,显示所有的make选项 -w:在处理 makefile 之前和之后,都显示工作目录 ``` -### 参数 - -目标:指定编译目标 - -### 知识扩展 - -无论是在linux 还是在Unix环境 中,make都是一个非常重要的编译命令。不管是自己进行项目开发还是安装应用软件,我们都经常要用到make或make install。利用make工具,我们可以将大型的开发项目分解成为多个更易于管理的模块,对于一个包括几百个源文件的应用程序,使用make和 makefile工具就可以简洁明快地理顺各个源文件之间纷繁复杂的相互关系 - -而且如此多的源文件,如果每次都要键入gcc命令进行编译的话,那对程序员 来说简直就是一场灾难。而make工具则可自动完成编译工作,并且可以只对程序员在上次编译后修改过的部分进行编译 - -因此,有效的利用make和 makefile工具可以大大提高项目开发的效率。同时掌握make和makefile之后,您也不会再面对着Linux下的应用软件手足无措了 +## 知识扩展 diff --git a/Linux_man_cn/pr.md b/Linux_man_cn/pr.md index a4b5e23..454c817 100644 --- a/Linux_man_cn/pr.md +++ b/Linux_man_cn/pr.md @@ -10,11 +10,8 @@ 用法:pr [选项] [文件] Mandatory arguments to long options are mandatory for short options too. -+首页[:末页], --pages=首页[:末页] - 在指定的首页/末页处开始/停止打印 --列数, --columns=列数 - 输出指定的列数。如果指定了-a 选项,则从上到下列印 - 程序会自动在每一页均衡每列占用的行数 ++首页[:末页], --pages=首页[:末页] 在指定的首页/末页处开始/停止打印 +-列数, --columns=列数 输出指定的列数。如果指定了-a 选项,则从上到下列印 程序会自动在每一页均衡每列占用的行数 -a, --across 设置每列从上到下输出,配合"-列数"选项一起使用 -c, --show-control-chars 使用头标(^G)和八进制反斜杠标记 diff --git a/Linux_man_cn/restorecon.md b/Linux_man_cn/restorecon.md index f5f5d53..e40b130 100644 --- a/Linux_man_cn/restorecon.md +++ b/Linux_man_cn/restorecon.md @@ -1,7 +1,4 @@ -restorecon -=== - -恢复文件的安全上下文 +# restorecon ## 说明 @@ -13,7 +10,7 @@ restorecon restorecon [-iFnrRv] [-e excludedir ] [-o filename ] [-f filename | pathname...] ``` - + ``` -i:忽略不存在的文件 @@ -63,6 +60,7 @@ type=AVC msg=audit(1378974214.610:465): avc: denied { open } for pid=2359 com /*使用restorecon来恢复网页主目录中所有文件的SELinux配置信息(如果目标为一个目录,可以添加-R参数递归)*/ [root@jsdig.com html]# restorecon -R /var/www/html/ -``` - +# 测试publickeys免密登录权限authorized_keys文件 +restorecon -r -vv .ssh/authorized_keys +``` diff --git a/Linux_man_cn/sed.md b/Linux_man_cn/sed.md index 0ea0c1f..0724826 100644 --- a/Linux_man_cn/sed.md +++ b/Linux_man_cn/sed.md @@ -11,10 +11,10 @@ ```markdown 用法: sed [选项] {脚本(如果没有其他脚本)} [输入文件] --n, --quiet, --silent 取消自动打印模式空间 --e 脚本, --expression=脚本 添加“脚本”到程序的运行列表,指定script处理文本 --f 脚本文件, --file=脚本文件 添加“脚本文件”到程序的运行列表,指定脚本文件处理文本 ---follow-symlinks 直接修改文件时跟随软链接 +-n, --quiet, --silent 取消自动打印模式空间 +-e 脚本, --expression=脚本 添加“脚本”到程序的运行列表,指定script处理文本 +-f 脚本文件, --file=脚本文件 添加“脚本文件”到程序的运行列表,指定脚本文件处理文本 +--follow-symlinks 直接修改文件时跟随软链接 -i[SUFFIX], --in-place[=SUFFIX] edit files in place (makes backup if SUFFIX supplied) -c, --copy use copy instead of rename when shuffling files in -i mode -b, --binary 什么也不做,用于与WIN32/CYGWIN/MSDOS/EMX兼容(二进制模式下的打开文件(CR+LF换行符未被特殊对待)) diff --git a/Linux_man_cn/split.md b/Linux_man_cn/split.md index b28ab19..6f697d7 100644 --- a/Linux_man_cn/split.md +++ b/Linux_man_cn/split.md @@ -47,6 +47,8 @@ dd if=/dev/zero bs=100k count=1 of=date.file # 生成一个大小为100KB的 split -b 10k date.file # 使用split命令将上面创建的date.file文件分割成大小为10KB的小文件 split -b 10k date.file -d -a 3 # file分割成多个后缀文件,若想用数字后缀可使用-d参数,同时可使用-a length来指定后缀长度 split -b 10k date.file -d -a 3 split_file # 为分割后的文件指定文件名的前缀 -split -l 10 date.file # 使用-l选项根据文件的行数来分割文件,例如把文件分割成每个包含10行的小文件 + +# 使用-l选项根据文件的行数来分割文件,例如把文件分割成每个包含100行的小文件 +split -l 100 test.sql -d -a 2 --additional-suffix=.sql test ``` diff --git a/Linux_man_cn/stat.md b/Linux_man_cn/stat.md index 8892b73..b845851 100644 --- a/Linux_man_cn/stat.md +++ b/Linux_man_cn/stat.md @@ -1,7 +1,4 @@ -stat -=== - -用于显示文件的状态信息 +# **stat** ## 说明 @@ -9,13 +6,7 @@ stat ## 选项 -``` -stat(选项)(参数) -``` - - - -``` +```markdown -L:支持符号连接 -f:显示文件系统状态而非文件状态 -t:以简洁方式输出信息 @@ -23,35 +14,8 @@ stat(选项)(参数) --version:显示指令的版本信息 ``` -### 参数 - -文件:指定要显示信息的普通文件或者文件系统对应的设备文件名 - ## 实例 -``` -[root@localhost ~]# ls -l myfile --rw-r--r-- 1 root root 0 2010-10-09 myfile - -[root@localhost ~]# stat myfile -file: “myfile” -Size: 0 Blocks: 8 IO Block: 4096 一般空文件 -Device: fd00h/64768d Inode: 194805815 Links: 1 -Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root) -Access: 2010-12-12 12:22:35.000000000 +0800 -Modify: 2010-10-09 20:44:21.000000000 +0800 -Change: 2010-10-09 20:44:21.000000000 +0800 - -[root@localhost ~]# stat -f myfile -File: "myfile" -id: 0 Namelen: 255 type: ext2/ext3 -Block size: 4096 Fundamental block size: 4096 -Blocks: Total: 241555461 free: 232910771 Available: 220442547 -Inodes: Total: 249364480 Free: 249139691 - -[root@localhost ~]# stat -t myfile -myfile 0 8 81a4 0 0 fd00 194805815 1 0 0 1292127755 1286628261 1286628261 4096 +```bash ``` - - diff --git a/Linux_man_cn/tar.md b/Linux_man_cn/tar.md index de31fe6..fca6c03 100644 --- a/Linux_man_cn/tar.md +++ b/Linux_man_cn/tar.md @@ -268,6 +268,32 @@ tar -cf all.tar *.jpg # 这条命令是将所有.jpg的文件打成一个 tar -rf all.tar *.gif # 这条命令是将所有.gif的文件增加到all.tar的包里面去。-r是表示增加文件的意思 tar -uf all.tar logo.gif # 这条命令是更新原来tar包all.tar中logo.gif文件,-u是表示更新文件的意思 tar -tf all.tar # 这条命令是列出all.tar包中所有文件,-t是列出文件的意思 + +# 将文件全部打包成tar包 +tar -cvf log.tar log2012.log # 仅打包,不压缩! +tar -zcvf log.tar.gz log2012.log # 打包后,以 gzip 压缩 +tar -jcvf log.tar.bz2 log2012.log # 打包后,以 bzip2 压缩 +tar -cf - foo/ | xz -9 -c - > foo.tar.xz # 目录foo打包后以xz压缩 +tar -zxvf log.tar.gz # 将tar包解压缩 +tar -ztvf log.tar.gz # 查阅上述tar包内有哪些文件,选项z表示由gzip压缩的 +tar -zxvf /opt/soft/test/log30.tar.gz log2013.log # 只将tar内的部分文件解压出来 +tar -zcvpf log.tar.gz archive-$(date +%Y%m%d).log # 文件备份下来,并且保存其权限;这个`-p`的属性 +tar -c dir/ | gzip | gpg -c | ssh user@remote 'dd of=dir.tar.gz.gpg' # 将目录dir/压缩打包并放到远程机器上 +tar -c /dir/to/copy | cd /where/to/ && tar -x -p # 拷贝目录copy/到目录/where/to/并保持文件属性 +tar -c /dir/to/copy | ssh -C user@remote 'cd /where/to/ && tar -x -p' # 拷贝目录copy/到远程目录/where/to/并保持文件属性 +find dir/ -name '*.txt' | tar -c --files-from=- | bzip2 > dir_txt.tar.bz2 # 将目录dir及其子目录所有txt文件打包并用bzip2压缩 +tar --exclude=scf/service -zcvf scf.tar.gz scf/* # 备份文件夹内容时排除部分文件 +tar -xvf log.tar.gz --wildcards "*.txt" # 解压以txt结尾的文件 +tar -czwf log.tar.gz /dir/* # 将dir目录下所有文件压缩,w选项表示每个文件添加到存档之前要求确认 +tar -cvWf log.tar /dir/ # 验证压缩包中的文件,W选项表示验证 +tar -rvf log.tar.gz test.log # 将test.log文件添加到已存在的压缩包内 + +tar -N "2012/11/13" -zcvf log17.tar.gz test + +# 其实最简单的使用 tar就只要记忆底下的方式即可 +tar -jcvf filename.tar.bz2 # 要被压缩的文件或目录名称 +tar -jtvf filename.tar.bz2 # 要查询的压缩文件 +tar -jxvf filename.tar.bz2 -C dir # 解压缩到dir目录 ``` ```markdown @@ -346,30 +372,3 @@ jar -cvfm [目标文件名].jar META-INF/MANIFEST.MF [原文件名/目录名] 注:这个7z解压命令支持rar格式,即: 7z x [原文件名].rar ``` - -```bash -# 将文件全部打包成tar包 -tar -cvf log.tar log2012.log # 仅打包,不压缩! -tar -zcvf log.tar.gz log2012.log # 打包后,以 gzip 压缩 -tar -jcvf log.tar.bz2 log2012.log # 打包后,以 bzip2 压缩 -tar -zxvf log.tar.gz # 将tar包解压缩 -tar -ztvf log.tar.gz # 查阅上述tar包内有哪些文件,选项z表示由gzip压缩的 -tar -zxvf /opt/soft/test/log30.tar.gz log2013.log # 只将tar内的部分文件解压出来 -tar -zcvpf log.tar.gz archive-$(date +%Y%m%d).log # 文件备份下来,并且保存其权限;这个`-p`的属性 -tar -c dir/ | gzip | gpg -c | ssh user@remote 'dd of=dir.tar.gz.gpg' # 将目录dir/压缩打包并放到远程机器上 -tar -c /dir/to/copy | cd /where/to/ && tar -x -p # 拷贝目录copy/到目录/where/to/并保持文件属性 -tar -c /dir/to/copy | ssh -C user@remote 'cd /where/to/ && tar -x -p' # 拷贝目录copy/到远程目录/where/to/并保持文件属性 -find dir/ -name '*.txt' | tar -c --files-from=- | bzip2 > dir_txt.tar.bz2 # 将目录dir及其子目录所有txt文件打包并用bzip2压缩 -tar --exclude=scf/service -zcvf scf.tar.gz scf/* # 备份文件夹内容时排除部分文件 -tar -xvf log.tar.gz --wildcards "*.txt" # 解压以txt结尾的文件 -tar -czwf log.tar.gz /dir/* # 将dir目录下所有文件压缩,w选项表示每个文件添加到存档之前要求确认 -tar -cvWf log.tar /dir/ # 验证压缩包中的文件,W选项表示验证 -tar -rvf log.tar.gz test.log # 将test.log文件添加到已存在的压缩包内 - -tar -N "2012/11/13" -zcvf log17.tar.gz test - -# 其实最简单的使用 tar就只要记忆底下的方式即可 -tar -jcvf filename.tar.bz2 # 要被压缩的文件或目录名称 -tar -jtvf filename.tar.bz2 # 要查询的压缩文件 -tar -jxvf filename.tar.bz2 -C dir # 解压缩到dir目录 -``` diff --git a/Linux_man_cn/umask.md b/Linux_man_cn/umask.md index d09cae7..8d2edae 100644 --- a/Linux_man_cn/umask.md +++ b/Linux_man_cn/umask.md @@ -25,6 +25,3 @@ umask u=, g=w, o=rwx umask -s # 检查新创建文件的默认权限 ``` - - - diff --git a/Linux_man_cn/wget.md b/Linux_man_cn/wget.md index b39e0c8..77aadee 100644 --- a/Linux_man_cn/wget.md +++ b/Linux_man_cn/wget.md @@ -83,7 +83,7 @@ IP) --prefer-family=地址族 首先连接至指定家族(IPv6,IPv4 的地址 --user=用户 将 ftp 和 http 的用户名均设置为 - --password=密码 将 ftp 和 http 的密码均设置为 < + --password=密码 将 ftp 和 http 的密码均设置为 --ask-password 提示输入密码 --use-askpass=命令 指定用于请求用户名和密码的凭据管 如果没有提供指定命令,程序将使 @@ -181,7 +181,7 @@ WARC 选项: 递归下载: -r, --recursive 指定递归下载 - -l, --level=数字 最大递归深度 (inf 或 0 代表无限制,即全部下载) + -l, --level=数字 最大递归深度 (inf 或 0 代表无限制,即全部下载) --delete-after 下载完成后删除本地文件 -k, --convert-links 让下载得到的 HTML 或 CSS 中的链接指向本地文件 --convert-file-only 只转换 URL 的文件部分(一般叫做“基础名”/basename) diff --git a/Py3Scripts/AwesomeMath.ipynb b/Py3Scripts/AwesomeMath.ipynb deleted file mode 100644 index a7c6bb7..0000000 --- a/Py3Scripts/AwesomeMath.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"code","execution_count":5,"metadata":{},"outputs":[],"source":"# 百钱白鸡问题:1只公鸡5元,1只母鸡3元,3只小鸡1元,100元买100只鸡,问:公鸡母鸡小鸡各有多少?\n# 经典三元一次方程求解,设各有x,y,z只\n\n# 解法一:推断每种鸡花费依次轮询,运行时间最短,2019-7-24最优方案\n# import time\n# start = time.perf_counter_ns() # 用自带time函数统计运行时长\nfor x in range(0, 101, 5): # 公鸡花费x元在0-100范围包括100,步长为5\n for y in range(0, 101 - x, 3): # 母鸡花费y元在0到100元减去公鸡花费钱数,步长为3\n z = 100 - x - y # 小鸡花费z元为100元减去x和y\n if x / 5 + y / 3 + z * 3 == 100:\n print(\"公鸡:%d只,母鸡:%d只,小鸡:%d只\" % (x / 5, y / 3, z * 3))\n # pass\n# end = time.perf_counter_ns()\n# time1 = end - start\n# print(\"解法一花费时间:\", time1)\n\n# 解法二:解法和解法一类似\n# 解题思路:买一只公鸡花费5元,剩余95元(注意考虑到不买公鸡的情况),再买一只母鸡花费3元剩余92元,依次轮询下去,钱数不断减\n# 少,100元不再是固定的。假设花费钱数依次为x、y、z元\nfor x in range(0, 101, 5): # 公鸡花费x元在0-100范围包括100,步长为5\n for y in range(0, 101 - x, 3): # 母鸡花费y元在0到100元减去公鸡花费钱数,步长为3\n for z in range(0, 101 - x - y):\n if x / 5 + y / 3 + z * 3 == 100 and x + y + z == 100: # 花费和鸡数都是100\n print(\"公鸡:%d只,母鸡:%d只,小鸡:%d只\" % (x / 5, y / 3, z * 3))\n\n# 解法三:枚举法\n# 解题思路:若只买公鸡最多20只,但要买100只,固公鸡在0-20之间不包括20;若只买母鸡则在0-33之间不包括33;若只买小鸡则在0-100\n# 之间不包括100\nfor x in range(0, 20):\n for y in range(0, 33):\n z = 100 - x - y # 小鸡个数z等于100只减去公鸡x只加母鸡y只\n if 5 * x + 3 * y + z / 3 == 100: # 钱数相加等于100元\n print(\"公鸡:%d只,母鸡:%d只,小鸡:%d只\" % (x, y, z))"},{"cell_type":"code","execution_count":2,"metadata":{},"outputs":[],"source":"# 经典斐波那契数列\n# 定义:https://wikimedia.org/api/rest_v1/media/math/render/svg/c374ba08c140de90c6cbb4c9b9fcd26e3f99ef56\n# 用文字来说,就是斐波那契数列由0和1开始,之后的斐波那契系数就是由之前的两数相加而得出\n\n# 方法一:使用递归\ndef fib1(n):\n if n<0:\n print(\"Incorrect input\")\n elif n==1:\n return 0 # 第一个斐波那契数是0\n elif n==2:\n return 1 # 第二斐波那契数是1\n else:\n return fib1(n-1)+fib1(n-2)\n\nprint(fib1(2))\n\n\n# 方法二:使用动态编程\nFibArray = [0, 1]\n\n\ndef fib2(n):\n if n < 0:\n print(\"Incorrect input\")\n elif n <= len(FibArray):\n return FibArray[n - 1]\n else:\n temp_fib = fib2(n - 1) + fib2(n - 2)\n FibArray.append(temp_fib)\n return temp_fib\n\n# 方法三:空间优化\ndef fibonacci(n):\n a = 0\n b = 1\n if n < 0:\n print(\"Incorrect input\")\n elif n == 0:\n return a\n elif n == 1:\n return b\n else:\n for i in range(2,n):\n c = a + b\n a = b\n b = c\n return b"},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[],"source":"# 水仙花数:水仙花数即此数字是各位立方和等于这个数本身的数。例:153 = 1**3 + 5**3 + 3**3\n# 找出1-1000之间的水仙花数\n# 分别四个数字:1,2,3,4,组成不重复的三位数。问题扩展:对于给定数字或给定范围的数字,组成不重复的n位数\n\n# 方法一:解答四个数组成不重复三位数(暂未想到更优方法)\nfor x in range(1, 5):\n for y in range(1, 5):\n for z in range(1, 5):\n if (x != y) and (x != z) and (z != y):\n print(x, y, z)"},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":"# 计算pi小数点任意位数\nfrom __future__ import division\nimport math\nfrom time import time\ntime1 = time()\nnumber = int(input('输入计算的位数:'))\nnumber1 = number + 10 # 多计算十位方式尾数取舍影响\nb = 10 ** number1\n# 求含4/5的首项\nx1 = b * 4 // 5\n# 求含1/239的首项\nx2 = b // -239\n\n# 求第一大项\nhe = x1 + x2\n# 设置下面循环的终点,即共计算n项\nnumber *= 2\n\n# 循环初值=3,末值2n,步长=2\nfor i in range(3, number, 2):\n # 求每个含1/5的项及符号\n x1 //= -25\n # 求每个含1/239的项及符号\n x2 //= -57121\n # 求两项之和\n x = (x1 + x2) // i\n # 求总和\n he += x\n\n# 求出π\npi = he * 4\n# 舍掉后十位\npi //= 10 ** 10\n\n# 输出圆周率π的值\npi_string = str(pi)\nresult = pi_string[0] + str('.') + pi_string[1:len(pi_string)]\nprint(result)\n\ntime2 = time()\n\nprint(u'耗时:' + str(time2 - time1) + 's')\n\n\n# 使用chudnovsky算法计算\n# 参考链接:https://www.craig-wood.com/nick/articles/pi-chudnovsky/\n\n\"\"\"\nPython3 program to calculate Pi using python long integers, BINARY\nsplitting and the Chudnovsky algorithm\n\n\"\"\"\n\nimport math\nfrom gmpy2 import mpz\nfrom time import time\n\ndef pi_chudnovsky_bs(digits):\n \"\"\"\n Compute int(pi * 10**digits)\n\n This is done using Chudnovsky's series with BINARY splitting\n \"\"\"\n C = 640320\n C3_OVER_24 = C**3 // 24\n def bs(a, b):\n \"\"\"\n Computes the terms for binary splitting the Chudnovsky infinite series\n\n a(a) = +/- (13591409 + 545140134*a)\n p(a) = (6*a-5)*(2*a-1)*(6*a-1)\n b(a) = 1\n q(a) = a*a*a*C3_OVER_24\n\n returns P(a,b), Q(a,b) and T(a,b)\n \"\"\"\n if b - a == 1:\n # Directly compute P(a,a+1), Q(a,a+1) and T(a,a+1)\n if a == 0:\n Pab = Qab = mpz(1)\n else:\n Pab = mpz((6*a-5)*(2*a-1)*(6*a-1))\n Qab = mpz(a*a*a*C3_OVER_24)\n Tab = Pab * (13591409 + 545140134*a) # a(a) * p(a)\n if a & 1:\n Tab = -Tab\n else:\n # Recursively compute P(a,b), Q(a,b) and T(a,b)\n # m is the midpoint of a and b\n m = (a + b) // 2\n # Recursively calculate P(a,m), Q(a,m) and T(a,m)\n Pam, Qam, Tam = bs(a, m)\n # Recursively calculate P(m,b), Q(m,b) and T(m,b)\n Pmb, Qmb, Tmb = bs(m, b)\n # Now combine\n Pab = Pam * Pmb\n Qab = Qam * Qmb\n Tab = Qmb * Tam + Pam * Tmb\n return Pab, Qab, Tab\n # how many terms to compute\n DIGITS_PER_TERM = math.log10(C3_OVER_24/6/2/6)\n N = int(digits/DIGITS_PER_TERM + 1)\n # Calclate P(0,N) and Q(0,N)\n P, Q, T = bs(0, N)\n one_squared = mpz(10)**(2*digits)\n sqrtC = (10005*one_squared).sqrt()\n return (Q*426880*sqrtC) // T\n\n# The last 5 digits or pi for various numbers of digits\ncheck_digits = {\n 100 : 70679,\n 1000 : 1989,\n 10000 : 75678,\n 100000 : 24646,\n 1000000 : 58151,\n 10000000 : 55897,\n}\n\nif __name__ == \"__main__\":\n digits = 100\n pi = pi_chudnovsky_bs(digits)\n print(pi)\n #raise SystemExit\n for log10_digits in range(1,9):\n digits = 10**log10_digits\n start =time()\n pi = pi_chudnovsky_bs(digits)\n print(\"chudnovsky_gmpy_mpz_bs: digits\",digits,\"time\",time()-start)\n if digits in check_digits:\n last_five_digits = pi % 100000\n if check_digits[digits] == last_five_digits:\n print(\"Last 5 digits %05d OK\" % last_five_digits)\n else:\n print(\"Last 5 digits %05d wrong should be %05d\" % (last_five_digits, check_digits[digits]))\n"},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":""}],"nbformat":4,"nbformat_minor":2,"metadata":{"language_info":{"name":"python","codemirror_mode":{"name":"ipython","version":3}},"orig_nbformat":2,"file_extension":".py","mimetype":"text/x-python","name":"python","npconvert_exporter":"python","pygments_lexer":"ipython3","version":3}} \ No newline at end of file diff --git a/Py3Scripts/AwesomePython.ipynb b/Py3Scripts/AwesomePython.ipynb new file mode 100644 index 0000000..91c4f35 --- /dev/null +++ b/Py3Scripts/AwesomePython.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"code","metadata":{},"outputs":[],"source":["# 百钱白鸡问题:1只公鸡5元,1只母鸡3元,3只小鸡1元,100元买100只鸡,问:公鸡母鸡小鸡各有多少?\n","# 经典三元一次方程求解,设各有x,y,z只\n","\n","# 解法一:推断每种鸡花费依次轮询,运行时间最短,2019-7-24最优方案\n","# import time\n","# start = time.perf_counter_ns() # 用自带time函数统计运行时长\n","for x in range(0, 101, 5): # 公鸡花费x元在0-100范围包括100,步长为5\n"," for y in range(0, 101 - x, 3): # 母鸡花费y元在0到100元减去公鸡花费钱数,步长为3\n"," z = 100 - x - y # 小鸡花费z元为100元减去x和y\n"," if x / 5 + y / 3 + z * 3 == 100:\n"," print(\"公鸡:%d只,母鸡:%d只,小鸡:%d只\" % (x / 5, y / 3, z * 3))\n"," # pass\n","# end = time.perf_counter_ns()\n","# time1 = end - start\n","# print(\"解法一花费时间:\", time1)\n","\n","# 解法二:解法和解法一类似\n","# 解题思路:买一只公鸡花费5元,剩余95元(注意考虑到不买公鸡的情况),再买一只母鸡花费3元剩余92元,依次轮询下去,钱数不断减\n","# 少,100元不再是固定的。假设花费钱数依次为x、y、z元\n","for x in range(0, 101, 5): # 公鸡花费x元在0-100范围包括100,步长为5\n"," for y in range(0, 101 - x, 3): # 母鸡花费y元在0到100元减去公鸡花费钱数,步长为3\n"," for z in range(0, 101 - x - y):\n"," if x / 5 + y / 3 + z * 3 == 100 and x + y + z == 100: # 花费和鸡数都是100\n"," print(\"公鸡:%d只,母鸡:%d只,小鸡:%d只\" % (x / 5, y / 3, z * 3))\n","\n","# 解法三:枚举法\n","# 解题思路:若只买公鸡最多20只,但要买100只,固公鸡在0-20之间不包括20;若只买母鸡则在0-33之间不包括33;若只买小鸡则在0-100\n","# 之间不包括100\n","for x in range(0, 20):\n"," for y in range(0, 33):\n"," z = 100 - x - y # 小鸡个数z等于100只减去公鸡x只加母鸡y只\n"," if 5 * x + 3 * y + z / 3 == 100: # 钱数相加等于100元\n"," print(\"公鸡:%d只,母鸡:%d只,小鸡:%d只\" % (x, y, z))"]},{"cell_type":"code","metadata":{},"outputs":[],"source":["# 经典斐波那契数列\n","# 定义:https://wikimedia.org/api/rest_v1/media/math/render/svg/c374ba08c140de90c6cbb4c9b9fcd26e3f99ef56\n","# 用文字来说,就是斐波那契数列由0和1开始,之后的斐波那契系数就是由之前的两数相加而得出\n","\n","# 方法一:使用递归\n","def fib1(n):\n"," if n<0:\n"," print(\"Incorrect input\")\n"," elif n==1:\n"," return 0 # 第一个斐波那契数是0\n"," elif n==2:\n"," return 1 # 第二斐波那契数是1\n"," else:\n"," return fib1(n-1)+fib1(n-2)\n","\n","print(fib1(2))\n","\n","\n","# 方法二:使用动态编程\n","FibArray = [0, 1]\n","\n","\n","def fib2(n):\n"," if n < 0:\n"," print(\"Incorrect input\")\n"," elif n <= len(FibArray):\n"," return FibArray[n - 1]\n"," else:\n"," temp_fib = fib2(n - 1) + fib2(n - 2)\n"," FibArray.append(temp_fib)\n"," return temp_fib\n","\n","# 方法三:空间优化\n","def fibonacci(n):\n"," a = 0\n"," b = 1\n"," if n < 0:\n"," print(\"Incorrect input\")\n"," elif n == 0:\n"," return a\n"," elif n == 1:\n"," return b\n"," else:\n"," for i in range(2,n):\n"," c = a + b\n"," a = b\n"," b = c\n"," return b"]},{"cell_type":"code","metadata":{},"outputs":[],"source":["# 水仙花数:水仙花数即此数字是各位立方和等于这个数本身的数。例:153 = 1**3 + 5**3 + 3**3\n","# 找出1-1000之间的水仙花数\n","# 分别四个数字:1,2,3,4,组成不重复的三位数。问题扩展:对于给定数字或给定范围的数字,组成不重复的n位数\n","\n","# 方法一:解答四个数组成不重复三位数(暂未想到更优方法)\n","for x in range(1, 5):\n"," for y in range(1, 5):\n"," for z in range(1, 5):\n"," if (x != y) and (x != z) and (z != y):\n"," print(x, y, z)"]},{"cell_type":"code","metadata":{},"outputs":[],"source":["# 计算pi小数点任意位数\n","from __future__ import division\n","import math\n","from time import time\n","time1 = time()\n","number = int(input('输入计算的位数:'))\n","number1 = number + 10 # 多计算十位方式尾数取舍影响\n","b = 10 ** number1\n","# 求含4/5的首项\n","x1 = b * 4 // 5\n","# 求含1/239的首项\n","x2 = b // -239\n","\n","# 求第一大项\n","he = x1 + x2\n","# 设置下面循环的终点,即共计算n项\n","number *= 2\n","\n","# 循环初值=3,末值2n,步长=2\n","for i in range(3, number, 2):\n"," # 求每个含1/5的项及符号\n"," x1 //= -25\n"," # 求每个含1/239的项及符号\n"," x2 //= -57121\n"," # 求两项之和\n"," x = (x1 + x2) // i\n"," # 求总和\n"," he += x\n","\n","# 求出π\n","pi = he * 4\n","# 舍掉后十位\n","pi //= 10 ** 10\n","\n","# 输出圆周率π的值\n","pi_string = str(pi)\n","result = pi_string[0] + str('.') + pi_string[1:len(pi_string)]\n","print(result)\n","\n","time2 = time()\n","\n","print(u'耗时:' + str(time2 - time1) + 's')\n","\n","\n","# 使用chudnovsky算法计算\n","# 参考链接:https://www.craig-wood.com/nick/articles/pi-chudnovsky/\n","\n","\"\"\"\n","Python3 program to calculate Pi using python long integers, BINARY\n","splitting and the Chudnovsky algorithm\n","\n","\"\"\"\n","\n","import math\n","from gmpy2 import mpz\n","from time import time\n","\n","def pi_chudnovsky_bs(digits):\n"," \"\"\"\n"," Compute int(pi * 10**digits)\n","\n"," This is done using Chudnovsky's series with BINARY splitting\n"," \"\"\"\n"," C = 640320\n"," C3_OVER_24 = C**3 // 24\n"," def bs(a, b):\n"," \"\"\"\n"," Computes the terms for binary splitting the Chudnovsky infinite series\n","\n"," a(a) = +/- (13591409 + 545140134*a)\n"," p(a) = (6*a-5)*(2*a-1)*(6*a-1)\n"," b(a) = 1\n"," q(a) = a*a*a*C3_OVER_24\n","\n"," returns P(a,b), Q(a,b) and T(a,b)\n"," \"\"\"\n"," if b - a == 1:\n"," # Directly compute P(a,a+1), Q(a,a+1) and T(a,a+1)\n"," if a == 0:\n"," Pab = Qab = mpz(1)\n"," else:\n"," Pab = mpz((6*a-5)*(2*a-1)*(6*a-1))\n"," Qab = mpz(a*a*a*C3_OVER_24)\n"," Tab = Pab * (13591409 + 545140134*a) # a(a) * p(a)\n"," if a & 1:\n"," Tab = -Tab\n"," else:\n"," # Recursively compute P(a,b), Q(a,b) and T(a,b)\n"," # m is the midpoint of a and b\n"," m = (a + b) // 2\n"," # Recursively calculate P(a,m), Q(a,m) and T(a,m)\n"," Pam, Qam, Tam = bs(a, m)\n"," # Recursively calculate P(m,b), Q(m,b) and T(m,b)\n"," Pmb, Qmb, Tmb = bs(m, b)\n"," # Now combine\n"," Pab = Pam * Pmb\n"," Qab = Qam * Qmb\n"," Tab = Qmb * Tam + Pam * Tmb\n"," return Pab, Qab, Tab\n"," # how many terms to compute\n"," DIGITS_PER_TERM = math.log10(C3_OVER_24/6/2/6)\n"," N = int(digits/DIGITS_PER_TERM + 1)\n"," # Calclate P(0,N) and Q(0,N)\n"," P, Q, T = bs(0, N)\n"," one_squared = mpz(10)**(2*digits)\n"," sqrtC = (10005*one_squared).sqrt()\n"," return (Q*426880*sqrtC) // T\n","\n","# The last 5 digits or pi for various numbers of digits\n","check_digits = {\n"," 100 : 70679,\n"," 1000 : 1989,\n"," 10000 : 75678,\n"," 100000 : 24646,\n"," 1000000 : 58151,\n"," 10000000 : 55897,\n","}\n","\n","if __name__ == \"__main__\":\n"," digits = 100\n"," pi = pi_chudnovsky_bs(digits)\n"," print(pi)\n"," #raise SystemExit\n"," for log10_digits in range(1,9):\n"," digits = 10**log10_digits\n"," start =time()\n"," pi = pi_chudnovsky_bs(digits)\n"," print(\"chudnovsky_gmpy_mpz_bs: digits\",digits,\"time\",time()-start)\n"," if digits in check_digits:\n"," last_five_digits = pi % 100000\n"," if check_digits[digits] == last_five_digits:\n"," print(\"Last 5 digits %05d OK\" % last_five_digits)\n"," else:\n"," print(\"Last 5 digits %05d wrong should be %05d\" % (last_five_digits, check_digits[digits]))\n"]},{"cell_type":"code","metadata":{},"outputs":[],"source":["import ast\n","import json\n","from urllib.request import urlopen, Request\n","\n","\n","# urllib请求解析,json\n","# url = 'https://ip8.com/ajax/resolve.php'\n","# request = Request(url)\n","# response = urlopen(request)\n","# content_dict = json.loads(response.read().decode('utf-8'))\n","\n","# eval\n","# url = 'https://ip8.com/ajax/resolve.php'\n","# request = Request(url)\n","# response = urlopen(request)\n","# content = eval(response.read().decode('utf-8'))\n","\n","# ast\n","url = 'https://ip8.com/ajax/resolve.php'\n","request = Request(url)\n","response = urlopen(request)\n","content_dict = ast.literal_eval(response.read().decode('utf-8'))\n","print(content_dict['resolved'])"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":[]}],"nbformat":4,"nbformat_minor":2,"metadata":{"language_info":{"name":"python","codemirror_mode":{"name":"ipython","version":3},"version":"3.7.3"},"orig_nbformat":2,"file_extension":".py","mimetype":"text/x-python","name":"python","npconvert_exporter":"python","pygments_lexer":"ipython3","version":3}} \ No newline at end of file diff --git a/Py3Scripts/pymongo_example.py b/Py3Scripts/pymongo_example.py new file mode 100644 index 0000000..6fccbd1 --- /dev/null +++ b/Py3Scripts/pymongo_example.py @@ -0,0 +1,260 @@ +# 整合官方和常用示例 +# https://api.mongodb.com/python/current/py-modindex.html + +from pymongo import MongoClient +from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne +from bson.objectid import ObjectId +from bson.son import SON +from bson import json_util, CodecOptions +import datetime +from pprint import pprint +import pymongo +from bson.code import Code +import urllib.parse +import ssl +from pymongo import errors +from pymongo import WriteConcern +import pytz +import gridfs +import multiprocessing + + +client = MongoClient(host="192.168.2.15", port=27017) + +all_databases = client.list_database_names() +pprint(all_databases) +# using dictionary style access +db = client["AdminConfigDB"] + +all_collections = db.collection_names() + +pprint(all_collections) +# using nomal style +collection = db.arc_AdminConf + +# pprint(collection.find_one({})) + + +# for collection in collection.find({"flush":False}).sort("productId"): +# pprint(collection) + +# 以product_id升序创建索引 +# create_index = collection.create_index([('product_id', pymongo.ASCENDING)], unique=True) + +# 打印集合索引信息 +# pprint(sorted(list(collection.index_information()))) + +db2 = client.TestData +collection2 = db2.things +# result = collection2.insert_many([{"x": 1, "tags": ["dog", "cat"]}, +# {"x": 2, "tags": ["cat"]}, +# {"x": 2, "tags": ["mouse", "cat", "dog"]}, +# {"x": 3, "tags": []}]) +# pprint(result.inserted_ids) + +# Aggregation Framework示例 +# pipeline = [ +# {"$unwind": "$tags"}, +# {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, +# {"$sort": SON([("count", -1), ("_id", -1)])}] + +# pprint(list(collection2.aggregate(pipeline))) + +# Map/Reduce示例 +# mapper = Code( +# """ +# function () { +# this.tags.forEach(function(z) { +# emit(z, 1); +# }); +# } +# """ +# ) + +# reducer = Code( +# """ +# function (key, values) { +# var total = 0; +# for (var i = 0; i < values.length; i++) { +# total += values[i]; +# } +# return total; +# } +# """ +# ) + +# result = collection2.map_reduce(mapper, reducer, "map_reduce_result") +# for doc in result.find(): +# pprint(doc) + +# results = collection2.map_reduce( +# mapper, reducer, "myresults", query={"x": {"$lt": 2}}) +# for doc in results.find(): +# pprint(doc) + +# 认证示例 +# username = urllib.parse.quote_plus('user') +# password = urllib.parse.quote_plus('pass/word') +# client = MongoClient('mongodb://%s:%s@127.0.0.1' % (username, password)) + +# version3.7支持SCRAM-SHA-256 +# client = MongoClient('example.com', +# username='user', +# password='password', +# authSource='the_database', +# authMechanism='SCRAM-SHA-256') +# mongodb uri连接方式 +# uri = "mongodb://user:password@example.com/?authSource=the_database&authMechanism=SCRAM-SHA-256" +# client = MongoClient(uri) +# mongodb-x509认证 +# client = MongoClient('example.com', +# username="", +# authMechanism="MONGODB-X509", +# ssl=True, +# ssl_certfile='/path/to/client.pem', +# ssl_cert_reqs=ssl.CERT_REQUIRED, +# ssl_ca_certs='/path/to/ca.pem') + +# 复制一个数据库 +# client.admin.command('copydb', fromdb='src_db_name', todb='dst_db_name', fromhost='src_host_ip') + +# 批量插入 +# _id对于大多数高写入量的应用程序而言,对于插入的文档本身没有_id字段时,在插入时自动创建代价较高。inserted_ids表示按提供_id的顺序插入文档 +# collection2.insert_many([{'x': i} for i in range(10000)]).inserted_ids +# print(collection2.count_documents({})) + +# 批量删除 +# collection2.delete_many({'x':{"$gte": 3}}) + +# bulk write,混合批量写入 +# 添加write_concern 写关注 +# collection2 = db2.get_collection('things', write_concern=WriteConcern(w=2, wtimeout=10)) +# try: +# result = collection2.bulk_write([ +# DeleteMany({}), # Remove all documents from the previous example. +# InsertOne({'_id': 1}), +# InsertOne({'_id': 2}), +# InsertOne({'_id': 3}), +# UpdateOne({'_id': 1}, {'$set': {'foo': 'bar'}}), +# UpdateOne({'_id': 4}, {'$inc': {'j': 1}}, upsert=True), +# ReplaceOne({'j': 1}, {'j': 2})]) +# except errors.BulkWriteError as bwe: +# pprint(bwe.details) +# pprint(result.bulk_api_result) + + +# 日期时间和时区(mongodb默认假定时间以UTC) +# result = db2.objects.insert_one({"last_modified": datetime.datetime.utcnow()}) + +# tz_aware选项,该选项启用“感知” datetime.datetime对象.即知道其所在时区的日期时间 +# result = db2.demo.insert_one( {'date': datetime.datetime(2019, 11, 28, 14, 0, 0)}) +# db2.demo.find_one()['date'] +# datetime.datetime(2019, 11, 28, 14, 0) +# options = CodecOptions(tz_aware=True) +# db2.get_collection('demo', codec_options=options).find_one()['date'] + +# 使用时区保存日期时间 +# 存储datetime.datetime指定时区的对象,即tzinfo属性不是None时,PyMongo会将这些日期时间自动转换为UTC +# pacific = pytz.timezone('Asia/Shanghai') +# aware_datetime = pacific.localize( datetime.datetime(2019, 11, 28, 14, 0, 0)) +# result = db2.demo.insert_one({"date_tz": aware_datetime}) +# datetime.datetime(2019, 11, 28, 14, 0) + +# 地理空间索引示例 +# https://api.mongodb.com/python/current/examples/geo.html + +# GridFS示例 +# 每个GridFS实例都是使用特定Database实例创建的,并将在特定实例上运行 +# db = MongoClient().gridfs_example +# fs = gridfs.GridFS(db) +# 将数据写入gridfs,put()在GridFS中创建一个新文件,并返回文件文档"_id"密钥的值 +# data = fs.put(b"hello world") +# get()方法取回文件内容,get()返回类似文件对象,调用read()方法获取文件内容 +# content = fs.get(data).read() +# 除了将str作为GridFS文件放置外,还可以放置任何类似文件的对象(带有read() 方法的对象)。GridFS将自动处理按块大小的段读取文件。还可以将其他属性作为关键字参数添加到文件中 +# b = fs.put(fs.get(a), filename="foo", bar="baz") +# out = fs.get(b) +# out.read() +# out.filename +# out.bar +# out.upload_date + +# 可拖尾游标,客户端用尽游标中所有结果后自动关闭游标,但对于上限集合(copped集合)可以使用可拖尾的游标 +# https://api.mongodb.com/python/current/examples/tailable.html + + +# 自定义类型 +""" +https://api.mongodb.com/python/current/examples/custom_type.html + +为了编码自定义类型,必须首先为该类型定义类型编解码器 +用户在定义类型编解码器时必须从以下基类中进行选择: +* TypeEncoder:将其子类化以定义将自定义Python类型编码为已知BSON类型的编解码器。用户必须实现 python_type属性/属性和transform_python方法。 + +* TypeDecoder:将其子类化以定义将特定BSON类型解码为自定义Python类型的编解码器。用户必须实现bson_type属性/属性和transform_bson方法。 + +* TypeCodec:此方法的子类以定义可以对自定义类型进行编码和解码的编解码器。用户必须实现 python_type和bson_type属性/属性以及 transform_python和transform_bson方法。 + +自定义类型的类型编解码器仅需要定义如何将 Decimal实例转换为 Decimal128实例,反之亦然 + +from bson.decimal128 import Decimal128 +from bson.codec_options import TypeCodec +class DecimalCodec(TypeCodec): + python_type = Decimal # the Python type acted upon by this type codec + bson_type = Decimal128 # the BSON type acted upon by this type codec + def transform_python(self, value): + # Function that transforms a custom type value into a type that BSON can encode + return Decimal128(value) + def transform_bson(self, value): + # Function that transforms a vanilla BSON type value into our custom type + return value.to_decimal() +decimal_codec = DecimalCodec() + +# 开始对自定义类型对象进行编码和解码之前,我们必须首先将相应的编解码器告知PyMongo。这是通过创建一个TypeRegistry实例来完成 +# 以使用任意数量的类型编解码器实例化类型注册表。一旦实例化,注册表是不可变的,将编解码器添加到注册表的唯一方法是创建一个新的注册表 +from bson.codec_options import TypeRegistry +type_registry = TypeRegistry([decimal_codec]) + +# 使用CodecOptions实例定义一个实例,type_registry并使用它来获取一个Collection理解Decimal数据类型的对象 + +未完待续...... + +""" + + +# mongodb跨数据库查询、跨表(集合)、跨服务器查询都可根据以下方式修改 +# 查询data下product集合以条件gaId为1不重复的paId +# 使用此paId作为查询pa下pa_info集合以条件pa_id等于paId且v为1的文档 +# data = client.data +# product = data.product + +# pa = client.pa +# pa_info = pa.pa_info + +# pipeline = [ +# {"$match": {"gaId": 1}}, +# {"$sort": {"paId": -1}}, +# {"$group": {"_id": "$paId"}}, +# {"$project": {"paId": 1.0}}, +# ] + +# cursor = pa_info.aggregate(pipeline, allowDiskUse=False) +# try: +# for doc in cursor: +# doc_value = doc['_id'] +# pa_result = pa_info.find({"pa_id": doc_value, "v":1}) +# for pa_doc in pa_result: +# # 查询到的结果写入到其他集合 +# result_insert = collection2.insert_many([pa_doc]) +# # pass +# finally: +# client.close() + +# 父进程和每个子进程必须创建自己的MongoClient实例 +# Each process creates its own instance of MongoClient. +# def func(): +# db = pymongo.MongoClient().mydb +# # Do something with db. + +# proc = multiprocessing.Process(target=func) +# proc.start() diff --git a/Py3Scripts/secret.py b/Py3Scripts/secret.py new file mode 100644 index 0000000..9cea80e --- /dev/null +++ b/Py3Scripts/secret.py @@ -0,0 +1,2 @@ +username = 'admin' +secret = 'admin' diff --git a/Py3Scripts/tower.py b/Py3Scripts/tower.py new file mode 100644 index 0000000..379d979 --- /dev/null +++ b/Py3Scripts/tower.py @@ -0,0 +1,1749 @@ +#!/usr/bin/env python3 + +import sys +import csv +import json +import os.path +import argparse +import logging +import logging.config +import time +import re +from math import ceil + +from secret import username, secret +import yaml +import requests + +class ObjectNotFound(Exception): + pass + +class TooManyResults(Exception): + pass + +class NoLastExecutionFound(Exception): + pass + +class ActionFailure(Exception): + pass + +class AuthenticationFailure(Exception): + pass + +try: + FileNotFoundError +except NameError: + FileNotFoundError = IOError + +#Static vars +tower_url = 'http://192.168.2.129:8052' +#tower_url = 'http://192.168.2.128' +api_url = tower_url + '/api/v2/' +here = os.path.dirname(__file__) +yaml_launch_folder = os.path.join(here, 'yaml_launch') +MAX_CONCURRENT_JOBS = 3 +COOLDOWN = 90 + +# return absolute path for file. +def absoluteFilePaths(directory): + for dirpath, _, filenames in os.walk(directory): + for f in filenames: + yield os.path.abspath(os.path.join(dirpath, f)) + +# delete double entries in list +def skip_duplicates(iterable, key=lambda x: x): + fingerprints = set() + for x in iterable: + fingerprint = key(x) + if fingerprint not in fingerprints: + yield x + fingerprints.add(fingerprint) + +#Authentication +def authentication(username, password): + url = tower_url + '/api/login/' + session = requests.session() + r = session.get(url) + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise AuthenticationFailure("%d : Authentication Failed : %s" % (r.status_code, r.text)) + + headers = {'Referer':url} + csrf_token = r.cookies['csrftoken'] + payload = {'csrfmiddlewaretoken':csrf_token, 'next':'/api/', 'username':username, 'password':password} + r = session.post(url, headers=headers, data=payload) + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise AuthenticationFailure("%d : Authentication Failed : %s" % (r.status_code, r.text)) + return session + +#Search one host and return its id +def return_host_id(session, fqdn): + url = api_url + 'hosts/' + payload = {'name':fqdn} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search host %s" % (r.status_code, fqdn)) + + search_response = json.loads(r.text) + count = search_response['count'] + + if not count: + raise ObjectNotFound("Host %s doesn't exist" % (fqdn)) + elif count > 1: + raise TooManyResults("The search return %d results, this isn't something we want", count) + else: + host_id = search_response['results'][0]['id'] + + logging.debug("Host id: %d", host_id) + return host_id + +#Search one group from inventory and return its id +def return_group_id(session, group_name, inventory_name): + return_inventory_id(session, inventory_name) + + url = api_url + 'groups/' + payload = {'name':group_name} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search group %s" % (r.status_code, group_name)) + + search_response = json.loads(r.text) + if not search_response['count']: + raise ObjectNotFound("Group %s doesn't exist" % (group_name)) + else: + for result in search_response['results']: + if result['summary_fields']['inventory']['name'] == inventory_name: + group_id = result['id'] + break + else: + raise ObjectNotFound("Group %s doesn't exist in inventory '%s'" % (group_name, inventory_name)) + + logging.debug("Group id: %d", group_id) + return group_id + +#Search one project and return its id +def return_project_id(session, project_name): + url = api_url + 'projects/' + payload = {'name':project_name} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search project %s" % (r.status_code, project_name)) + + search_response = json.loads(r.text) + if not search_response['count']: + raise ObjectNotFound("Project %s doesn't exist" % (project_name)) + else: + project_id = search_response['results'][0]['id'] + + logging.debug("Project id: %d", project_id) + return project_id + +#Search one group from inventory and return its name +def return_group_name(session, group_id): + url = api_url + 'groups/' + payload = {'id':group_id} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search group %d" % (r.status_code, group_id)) + + search_response = json.loads(r.text) + if not search_response['count']: + raise ObjectNotFound("Group %d doesn't exist" % (group_id)) + else: + group_name = search_response['results'][0]['name'] + + logging.debug("Group name: %s", group_name) + return group_name + +#Search one inventory and return its id +def return_inventory_id(session, inventory_name): + url = api_url + 'inventories/' + payload = {'name':inventory_name} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search inventory %s" % (r.status_code, inventory_name)) + + search_response = json.loads(r.text) + count = search_response['count'] + + if not count: + raise ObjectNotFound("Inventory %s doesn't exist" % (inventory_name)) + else: + inventory_id = search_response['results'][0]['id'] + + logging.debug("Inventory id: %d", inventory_id) + return inventory_id + +#Search one job_template and return its id +def return_job_template_id(session, job_template_name): + url = api_url + 'job_templates/' + payload = {'name':job_template_name} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search job template '%s'" % (r.status_code, job_template_name)) + + search_response = json.loads(r.text) + count = search_response['count'] + + if not count: + raise ObjectNotFound("Job template '%s' doesn't exist" % (job_template_name)) + else: + job_template_id = search_response['results'][0]['id'] + + logging.debug("Job template id: %d", job_template_id) + return job_template_id + +#Search one organization and return its id +def return_organization_id(session, organization_name): + url = api_url + 'organizations/' + payload = {'name':organization_name} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search organization %s" % (r.status_code, organization_name)) + + search_response = json.loads(r.text) + count = search_response['count'] + + if not count: + raise ObjectNotFound("Organization %s doesn't exist" % (organization_name)) + else: + organization_id = search_response['results'][0]['id'] + + logging.debug("Organization id: %d", organization_id) + return organization_id + +#Search a group id and return the name of its inventory +def return_inventory_name_from_group(session, group_id): + url = api_url + 'groups/' + payload = {'id':group_id} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search inventory of group number %d" % (r.status_code, group_id)) + + search_response = json.loads(r.text) + if not search_response['count']: + raise ObjectNotFound("Group %d doesn't exist" % (group_id)) + else: + inventory_name = search_response['results'][0]['summary_fields']['inventory']['name'] + + logging.debug("Inventory name: %s", inventory_name) + return inventory_name + +#Search an inventory id and return its name +def return_inventory_name(session, inventory_id): + url = api_url + 'inventories/' + str(inventory_id) + r = session.get(url) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search inventory %d" % (r.status_code, inventory_id)) + + search_response = json.loads(r.text) + inventory_name = search_response['name'] + + logging.debug("Inventory name: %s", inventory_name) + return inventory_name + +#Search one credential and return its id +def return_credential_id(session, user_name): + url = api_url + 'credentials/' + payload = {'name':user_name} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search user %s" % (r.status_code, user_name)) + + search_response = json.loads(r.text) + count = search_response['count'] + + if not count: + raise ObjectNotFound("User " + user_name + " doesn't exist") + else: + credential_id = search_response['results'][0]['id'] + + logging.debug("Credential id: %d", credential_id) + return credential_id + +#Search one job template and return its name +def return_job_template_name(session, job_template_id): + url = api_url + 'job_templates/' + str(job_template_id) + '/' + r = session.get(url) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code == 404): + raise ObjectNotFound("%d : Job template %d doesn't exist" % (r.status_code, job_template_id)) + else: + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search job template %d" % (r.status_code, job_template_id)) + + search_response = json.loads(r.text) + job_template_name = search_response['name'] + + logging.debug("Job Template name: %s", job_template_name) + return job_template_name + +#Search one project and return its name +def return_project_name(session, project_id): + url = api_url + 'projects/' + payload = {'id':project_id} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search project %d" % (r.status_code, project_id)) + + search_response = json.loads(r.text) + if not search_response['count']: + raise ObjectNotFound("Project %d doesn't exist" % (project_id)) + else: + project_name = search_response['results'][0]['name'] + + logging.debug("Project name: %s", project_name) + return project_name + +#Return a json dump of all something +def return_all(session, search, custom=""): + url = api_url + search + '/' + if custom: + payload = {'order_by':'id', 'page_size':'200', custom['key']:custom['value']} + else: + payload = {'order_by':'id', 'page_size':'200'} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to search all %s : %s" % (r.status_code, search, r.text)) + + result = json.loads(r.text) + nb_search = result['count'] + logging.debug("Tower inventory contains %d %s", nb_search, search) + + full_list = result['results'] + if nb_search > 200: + page = ceil(nb_search / 200) + for i in range(2, page+1): + payload = {'order_by':'id', 'page_size':'200', 'page':i} + r2 = session.get(url, params=payload) + logging.debug("%s %s", r.request.method, r.url) + full_list = full_list + json.loads(r2.text)['results'] + logging.debug("Function 'return_all' found %d %s in %d page(s)", len(full_list), search, page) + + return full_list + +#Add one host to inventory +def add_host(session, fqdn, inventory_name, delete=False, variables=''): + inventory_id = return_inventory_id(session, inventory_name) + try: + if delete: + delete_host(session, fqdn) + host_id = return_host_id(session, fqdn) + logging.error("The host %s already exist in %s with ID %d", fqdn, inventory_name, host_id) + except ObjectNotFound: + url = api_url + 'hosts/' + payload = {'name':fqdn, 'inventory':inventory_id, 'variables':variables} + r = session.post(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 201): + raise ActionFailure("%d : Failed to add host %s %s" % (r.status_code, fqdn, r.text)) + + search_response = json.loads(r.text) + host_id = search_response['id'] + + logging.info("Host %s added with ID %s", fqdn, host_id) + return host_id + +#Add one group to inventory +def add_group(session, group_name, inventory_name, delete=False): + inventory_id = return_inventory_id(session, inventory_name) + try: + if delete: + delete_group(session, group_name, inventory_name) + group_id = return_group_id(session, group_name, inventory_name) + logging.error("The group %s already exist in %s with ID %d", group_name, inventory_name, group_id) + except ObjectNotFound: + url = api_url + 'groups/' + payload = {'name':group_name, 'inventory':inventory_id} + r = session.post(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 201): + raise ActionFailure("%d : Failed to add group %s %s" % (r.status_code, group_name, r.text)) + + search_response = json.loads(r.text) + group_id = search_response['id'] + + logging.info("Group %s added with ID %s", group_name, group_id) + return group_id + +#Import all hosts listed in txtfile +def mass_import_host(session, txtfile): + with open(txtfile, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter=';') + count = 0 + for row in reader: + add_host(session, row[0], row[1]) + count = count + 1 + logging.info("%d host(s) imported", count) + +#Enable or disable one host +def change_host_status(session, fqdn, status): + try: + host_id = return_host_id(session, fqdn) + url = api_url + 'hosts/' + str(host_id) + '/' + payload = {'name':fqdn, 'enabled':status} + r = session.patch(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to update host status %s %s" % (r.status_code, fqdn, r.text)) + + search_response = json.loads(r.text) + host_id = search_response['id'] + + if status == "True" or status == "true": + modification = "enabled" + else: + modification = "disabled" + + logging.info("Host %s with ID %d has been %s", fqdn, host_id, modification) + return host_id + except ObjectNotFound: + logging.error("The host %s doesn't exist", fqdn) + +#Change status of all hosts listed in txtfile +def mass_change_host_status(session, txtfile): + with open(txtfile, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter=';') + count = 0 + for row in reader: + change_host_status(session, row[0], row[1]) + count = count + 1 + logging.info("%d host(s) updated", count) + +#Remove one host from inventory +def delete_host(session, fqdn): + host_id = return_host_id(session, fqdn) + + url = api_url + 'hosts/' + str(host_id) + '/' + r = session.delete(url) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to remove host %s %s" % (r.status_code, fqdn, r.text)) + + logging.info("Host %s deleted", fqdn) + return host_id + +#Remove one group from inventory +def delete_group(session, group_name, inventory): + group_id = return_group_id(session, group_name, inventory) + + url = api_url + 'groups/' + str(group_id) + '/' + r = session.delete(url) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to remove group %s %s" % (r.status_code, group_name, r.text)) + + logging.info("Group %s deleted", group_name) + return group_id + +#Remove one project +def delete_project(session, project_name): + project_id = return_project_id(session, project_name) + + url = api_url + 'projects/' + str(project_id) + '/' + r = session.delete(url) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to remove project %s %s" % (r.status_code, project_name, r.text)) + + logging.info("Project %s deleted", project_name) + return project_id + +#Remove all hosts listed in txtfile +def mass_delete_host(session, txtfile): + with open(txtfile, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter=';') + count = 0 + for row in reader: + delete_host(session, row[0]) + count = count + 1 + logging.info("%d host(s) removed", count) + +#Add one host to a group in inventory +def associate_to_group(session, fqdn, group_name, inventory_name): + host_id = return_host_id(session, fqdn) + return_inventory_id(session, inventory_name) + group_id = return_group_id(session, group_name, inventory_name) + + url = api_url + 'hosts/' + str(host_id) + '/groups/' + payload = {'associate':True, 'id':group_id} + r = session.post(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to add host %s to group %s %s" % (r.status_code, fqdn, group_name, r.text)) + + logging.info("%s added to %s in %s", fqdn, group_name, inventory_name) + +# associate variable to group +def associate_variable(session, type_var, name, key, value, inventory_name): + if type_var == "groups": + id_var = return_group_id(session, name, inventory_name) + elif type_var == "hosts": + id_var = return_host_id(session, name) + else: + raise ActionFailure("Failed to add variable '%s : %s' to %s '%s' : Type not found (groups or hosts)." + % (key, value, type_var, name)) + url = api_url + type_var +'/' + str(id_var) + '/variable_data/' + r = session.get(url) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to get variale for %s %d" % (r.status_code, type_var, id_var)) + + data = json.loads(r.text) + data[key] = value + logging.debug(data) + + r = session.put(url, json=data) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 200): + raise ActionFailure("%d : Failed to add variable '%s : %s' to %s '%s' : %s" % + (r.status_code, key, value, type_var, name, r.text)) + + logging.info("%s : %s added to %s in %s", key, value, name, inventory_name) + +#Add one children to a group in inventory +def associate_children_to_group(session, group_parent_name, group_child_name, inventory_name): + group_child_id = return_group_id(session, group_child_name, inventory_name) + return_inventory_id(session, inventory_name) + group_parent_id = return_group_id(session, group_parent_name, inventory_name) + + url = api_url + 'groups/' + str(group_parent_id) + '/children/' + payload = {'associate':True, 'id':group_child_id} + r = session.post(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to add group (child) %s to group (parent) %s %s" + % (r.status_code, group_child_name, group_parent_name, r.text)) + + logging.info("%s added to %s in %s", group_child_name, group_parent_name, inventory_name) + +#Associate every host with respective groups from txtfile +def mass_associate(session, txtfile): + with open(txtfile, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter=';') + count = 0 + for row in reader: + associate_to_group(session, row[0], row[1], row[2]) + count = count + 1 + logging.info("%d host(s) associated", count) + +#Remove one host from a group in inventory +def disassociate_from_group(session, fqdn, group_name, inventory_name): + host_id = return_host_id(session, fqdn) + return_inventory_id(session, inventory_name) + group_id = return_group_id(session, group_name, inventory_name) + + url = api_url + 'hosts/' + str(host_id) + '/groups/' + payload = {'disassociate':True, 'id':group_id} + r = session.post(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to remove host %s from group %s %s" % (r.status_code, fqdn, group_name, r.text)) + + logging.info("%s removed from %s in %s", fqdn, group_name, inventory_name) + +#Print all host's groups +def host_groups(session, fqdn, manual=True): + host_id = return_host_id(session, fqdn) + + url = api_url + 'hosts/' + str(host_id) + '/all_groups/' + payload = {'page_size':'100'} + r = session.get(url, params=payload) + + if manual: + logging.info("%s %s", r.request.method, r.url) + + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to search all groups of host %s : %s" % (r.status_code, fqdn, r.text)) + + host_groups_list = json.loads(r.text) + + if manual: + if host_groups_list['count']: + logging.info("Host %s (ID %d) has %d group(s):", fqdn, host_id, host_groups_list['count']) + for result in host_groups_list['results']: + logging.info("%s", result['name']) + else: + logging.error("Host %s (ID %d) has no group", fqdn, host_id) + else: + return host_groups_list['results'] + +#Return all hosts in inventory +def return_all_hosts_from_inventory(session, inventory_name): + inventory_id = return_inventory_id(session, inventory_name) + + url = api_url + 'hosts/' + payload = {'order_by':'id', 'inventory':inventory_id, 'page_size':'200'} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search first 200 hosts" % r.status_code) + + result = json.loads(r.text) + nb_search = result['count'] + logging.debug("Tower inventory '%s' contains %d hosts", inventory_name, nb_search) + + full_list = result['results'] + if nb_search > 200: + page = ceil(nb_search / 200) + for i in range(2, page+1): + payload = {'order_by':'id', 'inventory':inventory_id, 'page_size':'200', 'page':i} + r2 = session.get(url, params=payload) + logging.debug("%s %s", r.request.method, r.url) + full_list = full_list + json.loads(r2.text)['results'] + logging.debug("Function 'return_all_hosts' found %d host(s) in inventory %s in %d page(s)", + len(full_list), inventory_name, page) + + if nb_search: + return full_list + else: + raise ObjectNotFound("Inventory %s is empty" % (inventory_name)) + +#Print all members of several groups +def groupList_members(session, inventory_name, inverse, grouplist): + all_members = [] + for group_name in grouplist: + group_id = return_group_id(session, group_name, inventory_name) + + url = api_url + 'groups/' + str(group_id) + '/hosts' + payload = {'page_size':'200'} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search group members for %s %s" % (r.status_code, group_name, r.text)) + + members = json.loads(r.text) + if not members['count']: + #TODO: Test children + logging.info("Group %s (ID %d) in inventory '%s' has no member", group_name, group_id, inventory_name) + else: + logging.info("Group %s (ID %d) in inventory '%s' has %d member(s)", group_name, group_id, inventory_name, members['count']) + all_members.extend(members['results']) + + if not len(all_members): + logging.error("No host found in your group list in inventory '%s'", inventory_name) + return 0 + else: + if inverse: + all_hosts_from_inventory = return_all_hosts_from_inventory(session, inventory_name) + inverse_list = [item for item in all_hosts_from_inventory if item not in all_members] + for member in inverse_list: + logging.info("%s", member['name']) + return len(inverse_list) + else: + for member in all_members: + logging.info("%s", member['name']) + return len(all_members) + +#Print all group members in inventory and return their number +def group_members(session, group_name=None, inventory_name=None, export=False, group_id=None): + manual = False + if group_name is None: + group_name = return_group_name(session, group_id) + if inventory_name is None: + inventory_name = return_inventory_name_from_group(session, group_id) + if group_id is None: + group_id = return_group_id(session, group_name, inventory_name) + manual = True + + url = api_url + 'groups/' + str(group_id) + '/hosts' + payload = {'page_size':'200'} + r = session.get(url, params=payload) + + logging.debug("%s %s", r.request.method, r.url) + + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search group members for %s %s" % (r.status_code, group_name, r.text)) + + members = json.loads(r.text) + if not members['count']: + #TODO: Test children + logging.error("Group %s (ID %d) in inventory '%s' has no member", group_name, group_id, inventory_name) + return 0 + else: + if manual: + logging.info("Group %s (ID %d) in inventory '%s' has %d member(s)", group_name, group_id, inventory_name, members['count']) + for member in members['results']: + logging.info("%s", member['name']) + if export: + with open('export_group_vars', 'w') as stream: + for member in members['results']: + stream.write(member['name'] + "\n") + return members['count'] + +#Print or return all group vars in inventory +def group_vars(session, group_name=None, inventory_name=None, group_id=None): + manual = False + if group_id is None: + group_id = return_group_id(session, group_name, inventory_name) + manual = True + + url = api_url + 'groups/' + str(group_id) + '/' + r = session.get(url) + + if manual: + logging.info("%s %s", r.request.method, r.url) + + if (r.status_code != requests.codes.ok): + raise ObjectNotFound("%d : Failed to search group variables for %s %s" % (r.status_code, group_name, r.text)) + + search_response = json.loads(r.text) + gvars = re.sub('---\n', '', search_response['variables']) + + if manual: + if gvars == '' or gvars == '{}': + logging.error("Group %s (ID %d) has no var", group_name, group_id) + else: + logging.info('\n' + '='*15 + ' GROUP ' + group_name + ' ' + '='*15) + logging.info("%s", gvars) + else: + if gvars == '' or gvars == '{}': + return "" + else: + return gvars + +#Return the last job template execution status +def last_execution_status(session, quiet, job_template_id): + job_template_name = return_job_template_name(session, job_template_id) + + url = api_url + 'job_templates/' + str(job_template_id) + '/jobs/' + payload = {'started__isnull':'False', 'order_by':'-started', 'page_size':'1'} + r = session.get(url, params=payload) + + if not quiet: + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise NoLastExecutionFound("Search of last execution status failed : status code %d" % (r.status_code)) + + search_response = json.loads(r.text) + if not search_response['count']: + raise NoLastExecutionFound("No launch history for %s" % (job_template_name)) + + status = search_response['results'][0]['status'] + job_id = search_response['results'][0]['id'] + + logging.info("Last execution of '%s' was '%s' - Job ID : %d", job_template_name, status, job_id) + return (status, job_id) + +#Show if last job template execution change anything +def last_execution_change(session, quiet, job_template_id): + status, job_id = last_execution_status(session, quiet, job_template_id) + logging.info("Full output : %s/#/jobs/%d", tower_url, job_id) + + if (status != "successful"): + logging.error("Last execution status was '%s' please relaunch the job", status) + sys.exit(99) + else: + url = api_url + 'jobs/' + str(job_id) + '/job_events/' + payload = {'changed':'true', 'page_size':'200'} + r = session.get(url, params=payload) + + if not quiet: + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise NoLastExecutionFound("Search of last execution change failed : status code %d" % (r.status_code)) + + search_response = json.loads(r.text) + count = 0 + for result in search_response['results']: + if result['event_level'] == 3: + count += 1 + logging.info("%d : %s : %s", result['id'], result['event_data']['task'], result['event_data']['host']) + logging.info("%d task(s) have a 'changed' status in job ID %d ", count, job_id) + sys.exit(count) + +#Print all vars in all tower groups +def all_group_vars(session): + groups = return_all(session, 'groups') + + #for idx, group in enumerate(groups['results']): + for group in groups: + group_id = group['id'] + group_name = group['name'] + group_inventory = group['summary_fields']['inventory']['name'] + + variables = group_vars(session, group_id=group_id) + + if variables != '{}' and variables != '': + logging.info("%d/%s/%s : %s", group_id, group_inventory, group_name, variables) + #if idx == 5: + # break + +#Print all vars in all hosts +def all_host_vars(session): + hosts = return_all(session, 'hosts') + + for result in hosts: + host_id = result['id'] + host_name = result['name'] + host_inventory = result['summary_fields']['inventory']['name'] + + url = api_url + 'hosts/' + str(host_id) + '/variable_data/' + r = session.get(url) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to search all vars of host %s : %s" % (r.status_code, host_name, r.text)) + + if r.text != '{}' and r.text != '': + variables = json.loads(r.text) + logging.info("%d/%s/%s : %s", host_id, host_inventory, host_name, str(variables)) + +#Print all hosts that aren't in any group +def all_lonely_hosts(session): + hosts = return_all(session, 'hosts') + + for host in hosts: + host_id = host['id'] + host_name = host['name'] + host_inventory = host['summary_fields']['inventory']['name'] + + url = api_url + 'hosts/' + str(host_id) + '/all_groups/' + r = session.get(url) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to search all groups of host %s : %s" % (r.status_code, host_name, r.text)) + + host_groups_list = json.loads(r.text) + + if not host_groups_list['count']: + logging.error("Host %s (ID %d) in inventory '%s' has no group", host_name, host_id, host_inventory) + +#Print all groups that don't have any member +def all_lonely_groups(session): + groups = return_all(session, 'groups') + + count = 0 + for group in groups: + group_nb_member = group_members(session, group_id=group['id']) + + if not group_nb_member: + count += 1 + logging.info("There is %d lonely group(s)", count) + +#Print all templates not using Default project +def all_not_default_project(session): + templates = return_all(session, 'job_templates') + + count = 0 + for template in templates: + try: + project_id = int(template['related']['project'].split('/')[-2]) + if project_id not in [6, 7, 12, 16]: + project_name = return_project_name(session, project_id) + logging.info("(id:{:5d}) {:60s} is using (id:{:5d}) {:30s}".format( + template['id'], template['name'], project_id, project_name)) + count += 1 + except KeyError: + logging.info("(id:{:5d}) {:60s} is NOT using ANY project".format(template['id'], template['name'])) + count += 1 + logging.info("There is %d template(s) using non default projects", count) + +def all_projects_with_old_branch(session): + projects = return_all(session, 'projects') + count = 0 + with open("%s" % ('branchList'), 'r') as stream: + branchList = stream.read().splitlines() + sortedProjects = sorted(projects, key=lambda k: k['name']) + + for project in sortedProjects: + if project['scm_url'] == 'git@gitlab.example.com:infra/ansible.git' and project['scm_branch']: + if project['scm_branch'] not in branchList: + count += 1 + logging.info("Project {:30s} is using not existing branch {:30s}".format(project['name'], project['scm_branch'])) + logging.info("There is %d project(s) using obsolete branch", count) + +#Print all job templates using surveys +def all_templates_with_survey(session): + templates = return_all(session, 'job_templates') + + count = 0 + for template in templates: + if template['survey_enabled']: + logging.info("%d : %s", template['id'], template['name']) + count += 1 + logging.info("There is %d template(s) with survey", count) + +# Print host variables +def host_vars(session, fqdn, nested=False): + host_id = return_host_id(session, fqdn) + + url = api_url + 'hosts/' + str(host_id) + r = session.get(url) + + logging.debug("%s %s", r.request.method, r.url) + + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to search host vars for %s : %s" % (r.status_code, fqdn, r.text)) + + search_response = json.loads(r.text) + hvars = search_response['variables'] + + if hvars == '' or hvars == '{}': + logging.error("Host %s (ID %d) has no var", fqdn, host_id) + else: + logging.info('\n' + '='*15 + ' HOST ' + fqdn + ' ' + '='*15) + logging.info(re.sub('---\n', '', hvars)) + + if nested: + group_list = host_groups(session, fqdn, False) + for group in group_list: + inventory_name = return_inventory_name(session, search_response['inventory']) + group_id = return_group_id(session, group['name'], inventory_name) + gvars = group_vars(session, group['name'], inventory_name, group_id) + if gvars: + logging.info('\n' + '='*15 + ' GROUP ' + group['name'] + ' ' + '='*15) + logging.info(gvars) + +# Cancel a job in progress +def stop_job(session, job_id): + url = api_url + 'jobs/' + str(job_id) + '/cancel/' + json_payload = {'can_cancel': 'false'} + r = session.post(url, json=json_payload) + logging.info("%s %s", r.request.method, r.url) + + if (r.status_code != 202): + raise ActionFailure("%d : Failed to stop Job %s : %s" % (r.status_code, job_id, r.text)) + + logging.info("Stop successful : %s", job_id) + +def launch_job(session, login, password, jsonfile, tags, inventory, limit, si_version, job_type, disable_cooldown, returnId=False): + global MAX_CONCURRENT_JOBS # pylint: disable=global-statement + with open("%s/%s.yaml" % (yaml_launch_folder, jsonfile), 'r') as stream: + exec_settings = yaml.load(stream) + + if disable_cooldown: + MAX_CONCURRENT_JOBS = 50 + + launchCount = 0 + for job_template_id in exec_settings['job_template_id']: + launchCount += 1 + if launchCount > MAX_CONCURRENT_JOBS: + logging.info("%d jobs are now running, waiting %d seconds for performance saving", MAX_CONCURRENT_JOBS, COOLDOWN) + time.sleep(COOLDOWN) + launchCount = 1 + url = api_url + 'job_templates/' + str(job_template_id) + '/launch/' + r = session.get(url) + + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ActionFailure("%d : Failed to launch Job Template %d" % (r.status_code, job_template_id)) + + prelaunch_result = json.loads(r.text) + logging.debug(prelaunch_result) + for variable in prelaunch_result['variables_needed_to_start']: + if variable not in exec_settings['survey']: + raise ActionFailure("Failed to launch Job Template %d : Missing mandatory var %s" % (job_template_id, variable)) + + #extra_vars = yaml.load(prelaunch_result['defaults']['extra_vars']) + extra_vars = {} + try: + if exec_settings['survey']: + for k, v in exec_settings['survey'].items(): + extra_vars[k] = v + except KeyError: + pass + + credential_id = return_credential_id(session, login) + json_payload = {'credential': credential_id, + 'ssh_password': password, + 'ssh_key_unlock': password, + 'become_password': password, + 'extra_vars':extra_vars + } + if tags: + json_payload['job_tags'] = tags + if inventory: + json_payload['inventory'] = inventory + if limit: + json_payload['limit'] = limit + elif 'limit' in exec_settings: + json_payload['limit'] = exec_settings['limit'] + if si_version: + json_payload['extra_vars']['si_version'] = si_version + if job_type: + json_payload['job_type'] = job_type + elif 'job_type' in exec_settings: + json_payload['job_type'] = exec_settings['job_type'] + + logging.debug(json_payload) + + r = session.post(url, json=json_payload) + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 201): + raise ActionFailure("%d : Failed to launch Job Template %d : %s" % (r.status_code, job_template_id, r.text)) + + launch_result = json.loads(r.text) + logging.info("Launch successful : %s/#/jobs/%d", tower_url, launch_result['id']) + logging.debug(launch_result) + if returnId: + logging.info("IDJOB:%s", launch_result['id']) + +# Function to import ansible inventory +def import_ansible_inventory(session, filename, inventory, organization='Default', + export_host_file=False, group_var_directory_path=None, host_var_directory_path=None): + add_inventory(session, inventory, organization, False) + with open(filename, 'r') as stream: + groupChildren = group = None + list_hosts = [] + for line in stream.readlines(): + line = line.replace('\n', '') + if line and not line.isspace(): + if line[0] == '[': + if ':children' in line: + groupChildren = line.replace(':children', '') + groupChildren = groupChildren[1:-1] + add_group(session, groupChildren, inventory) + if group is not None: + group = None + else: + group = line + group = group[1:-1] + add_group(session, group, inventory) + if groupChildren is not None: + groupChildren = None + elif 'ip' in line and '#' not in line and groupChildren is None and group is None: + if 'ansible_host' in line: + line = line.split() + fqdn = str(line[1].replace('ansible_host=', '')) + del line[1], line[0] + else: + line = line.split() + fqdn = str(line[0]) + del line[0] + list_hosts.append(fqdn) + variables = '\n'.join(line) + variables = variables.replace('=', ': ') + add_host(session, fqdn, inventory, True, variables) + elif groupChildren != None: + associate_children_to_group(session, groupChildren, line, inventory) + elif group != None: + for host in list_hosts: + if line in host: + associate_to_group(session, host, group, inventory) + if group_var_directory_path != None: + browse_yaml_inventory(session, 'groups', group_var_directory_path, inventory) + if host_var_directory_path != None: + browse_yaml_inventory(session, 'hosts', host_var_directory_path, inventory) + if export_host_file: + list_hosts = list(skip_duplicates(list_hosts)) + with open('export_host_list', 'w') as stream: + for host in list_hosts: + stream.write(host + '\n') + +# Function to export ansible inventory +def export_ansible_inventory(session, jsonfile, inventory_name, bash=False): + inventory_id = return_inventory_id(session, inventory_name) + + url = api_url + 'inventories/' + str(inventory_id) + '/script' + payload = {'hostvars':'1', 'all':'1'} + r = session.get(url, params=payload) + export_result = json.loads(r.text) + logging.debug(export_result) + with open(jsonfile, 'w') as stream: + if bash: + stream.writelines(['#!/bin/bash\n', + 'if [ "$1" == "--list" ] ; then\n', + 'cat << "EOF"\n']) + stream.write(r.text) + if bash: + stream.writelines(['\nEOF\n', + 'elif [ "$1" == "--host" ]; then\n', + ' echo \'{"_meta": {"hostvars": {}}}\'\n', + 'else\n', + ' echo "{ }"\n', + 'fi\n']) + logging.info("Inventory '%s' exported into %s file", inventory_name, jsonfile) + +#Add one schedule +def add_schedule(session, job_template_id, payload): + url = api_url + 'job_templates/' + str(job_template_id) + '/schedules/' + r = session.post(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 201): + raise ActionFailure(">>> %d : Failed to add schedule %s %s" % (r.status_code, payload['name'], r.text)) + + result = json.loads(r.text) + schedule_id = result['id'] + + logging.info("Schedule '%s' added with ID %s", payload['name'], schedule_id) + return schedule_id + +#Add one job_template +def add_job_template(session, data): + url = api_url + 'job_templates/' + payload = {'name':data['name'], + 'description':data['description'], + 'job_type':data['job_type'], + 'inventory':data['inventory'], + 'project':data['project'], + 'playbook':data['playbook'], + 'credential':data['credential'], + 'vault_credential':data['vault_credential'], + 'forks':data['forks'], + 'limit':data['limit'], + 'verbosity':data['verbosity'], + 'extra_vars':data['extra_vars'], + 'job_tags':data['job_tags'], + 'force_handlers':data['force_handlers'], + 'skip_tags':data['skip_tags'], + 'start_at_task':data['start_at_task'], + 'timeout':data['timeout'], + 'use_fact_cache':data['use_fact_cache'], + 'host_config_key':data['host_config_key'], + 'ask_diff_mode_on_launch':data['ask_diff_mode_on_launch'], + 'ask_variables_on_launch':data['ask_variables_on_launch'], + 'ask_limit_on_launch':data['ask_limit_on_launch'], + 'ask_tags_on_launch':data['ask_tags_on_launch'], + 'ask_skip_tags_on_launch':data['ask_skip_tags_on_launch'], + 'ask_job_type_on_launch':data['ask_job_type_on_launch'], + 'ask_verbosity_on_launch':data['ask_verbosity_on_launch'], + 'ask_inventory_on_launch':data['ask_inventory_on_launch'], + 'ask_credential_on_launch':data['ask_credential_on_launch'], + 'survey_enabled':data['survey_enabled'], + 'become_enabled':data['become_enabled'], + 'diff_mode':data['diff_mode'], + 'allow_simultaneous':data['allow_simultaneous']} + + r = session.post(url, json=payload) + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != 201): + if r.status_code == 400 and "already exists" in r.text: + logging.info("Job Template '%s' already exist", data['name']) + return -1 + if r.status_code == 400 and "Playbook not found for project" in r.text: + logging.info(">>> %d : Failed to add job template '%s': Playbook not found for project", r.status_code, data['name']) + return -1 + raise ActionFailure(">>> %d : Failed to add job template '%s': %s" % (r.status_code, data['name'], r.text)) + response = json.loads(r.text) + logging.debug(response) + + job_template_id = response['id'] + logging.info("Job template '%s' added with ID %s", data['name'], job_template_id) + + if 'extra_credentials' in data: + url = api_url + 'job_templates/' + str(job_template_id) + '/extra_credentials/' + for cred_id in data['extra_credentials']: + payload = {'id':cred_id} + r = session.post(url, json=payload) + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure(">>> %d : Failed to add extra credential to job template '%s': %s" % + (r.status_code, data['name'], r.text)) + logging.info("%d extra credential(s) added to job template '%s'", len(data['extra_credentials']), data['name']) + + if 'schedule' in data: + for schedule in data['schedule']['results']: + payload = {'name':schedule['name'], + 'description':schedule['description'], + 'enabled':schedule['enabled'], + 'rrule':schedule['rrule'], + 'extra_data':schedule['extra_data']} + add_schedule(session, job_template_id, payload) + logging.info("%d schedule(s) added to job template '%s'", data['schedule']['count'], data['name']) + + if 'survey' in data: + url = api_url + 'job_templates/' + str(job_template_id) + '/survey_spec/' + payload = {'name':data['survey']['name'], + 'description':data['survey']['description'], + 'spec':data['survey']['spec']} + + r = session.post(url, json=payload) + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != requests.codes.ok): + raise ActionFailure(">>> %d : Failed to add survey to job template '%s': %s" % (r.status_code, data['name'], r.text)) + logging.info("Survey added to job template '%s'", data['name']) + + return job_template_id + +# Function to import job templates from file +def import_job_templates(session, jsonfile): + with open(jsonfile, 'r') as stream: + job_templates = json.loads(stream.read().replace('\n', '')) + logging.info("%d job template(s) loaded from json file", len(job_templates)) + + for job_template in job_templates: + add_job_template(session, job_template) + + logging.info("All job templates successfully imported") + +# Function to export all job templates +def export_all_job_templates(session, jsonfile): + templates = return_all(session, 'job_templates') + + countJob = countSurvey = countSchedule = countExtraCredential = 0 + for template in templates: + countJob += 1 + + url = api_url + 'job_templates/' + str(template['id']) + '/extra_credentials/' + r = session.get(url) + logging.debug("%s %s", r.request.method, r.url) + request_result = json.loads(r.text) + logging.debug(request_result) + if request_result['count']: + countExtraCredential += 1 + cred_list = [] + for result in request_result['results']: + cred_list.append(result['id']) + template['extra_credentials'] = cred_list + + if template['survey_enabled']: + countSurvey += 1 + url = api_url + 'job_templates/' + str(template['id']) + '/survey_spec/' + r = session.get(url) + logging.debug("%s %s", r.request.method, r.url) + request_result = json.loads(r.text) + logging.debug(request_result) + template['survey'] = request_result + + if 'next_schedule' in template['related']: + countSchedule += 1 + url = api_url + 'job_templates/' + str(template['id']) + '/schedules/' + r = session.get(url) + logging.debug("%s %s", r.request.method, r.url) + request_result = json.loads(r.text) + logging.debug(request_result) + template['schedule'] = request_result + + with open(jsonfile, 'w') as stream: + stream.write(json.dumps(templates)) + logging.info("All %d job templates have been exported, with %d surveys, %d schedules and %d extra_credential", + countJob, countSurvey, countSchedule, countExtraCredential) + +# Function to display credentials used by job templates +def display_job_templates_credentials(session): + templates = return_all(session, 'job_templates') + + logging.info("name,id,machine,vault,extra") + for template in templates: + url = api_url + 'job_templates/' + str(template['id']) + '/extra_credentials/' + r = session.get(url) + logging.debug("%s %s", r.request.method, r.url) + request_result = json.loads(r.text) + logging.debug(request_result) + if request_result['count']: + cred_list = [] + for result in request_result['results']: + cred_list.append(result['id']) + template['extra_credentials'] = cred_list + logging.info("%s,%d,%s,%s,%s", template['name'], template['id'], + template['credential'], template['vault_credential'], str(template.get('extra_credentials'))) + +#Add one credential +def add_credential(session, data, delete=False): + if data['organization'] is None: + data['organization'] = return_organization_id(session, 'Default') + + if 'password' in data['inputs']: + data['inputs']['password'] = 'ASK' + if 'become_password' in data['inputs']: + data['inputs']['become_password'] = 'ASK' + if 'ssh_key_data' in data['inputs']: + data['inputs']['ssh_key_data'] = '' + if 'ssh_key_unlock' in data['inputs']: + data['inputs']['ssh_key_unlock'] = '' + if 'vault_password' in data['inputs']: + data['inputs']['vault_password'] = 'password' + + url = api_url + 'credentials/' + payload = {'name':data['name'], + 'description':data['description'], + 'organization':data['organization'], + 'credential_type':data['credential_type'], + 'inputs':data['inputs']} + + r = session.post(url, json=payload) + logging.debug("%s %s", r.request.method, r.url) + if (r.status_code != 201): + if r.status_code == 400 and "already exists" in r.text: + logging.info("Credential '%s' already exist in organization %s", data['name'], data['organization']) + return -1 + raise ActionFailure("%d : Failed to add credential '%s': %s" % (r.status_code, data['name'], r.text)) + response = json.loads(r.text) + logging.debug(response) + + credential_id = response['id'] + logging.info("Credential '%s' added with ID %s", data['name'], credential_id) + + return credential_id + +#Add one project +def add_project(session, data, delete=False): + url = api_url + 'projects/' + payload = {'name':data['name'], + 'description':data['description'], + 'organization':data['organization'], + 'scm_type':data['scm_type'], + 'scm_url':data['scm_url'], + 'local_path':data['local_path'], + 'scm_branch':data['scm_branch'], + 'credential':data['credential'], + 'scm_clean':data['scm_clean'], + 'scm_delete_on_update':data['scm_delete_on_update'], + 'scm_update_on_launch':data['scm_update_on_launch'], + 'scm_update_cache_timeout':data['scm_update_cache_timeout'], + 'timeout':data['timeout']} + + r = session.post(url, json=payload) + logging.info("%s %s", r.request.method, r.url) + if r.status_code == 400 and "already exists" in r.text and delete: + delete_project(session, data['name']) + r = session.post(url, json=payload) + if (r.status_code != 201): + raise ActionFailure("%d : Failed to add project '%s': %s" % (r.status_code, data['name'], r.text)) + response = json.loads(r.text) + logging.debug(response) + + project_id = response['id'] + logging.info("Project '%s' added with ID %s", data['name'], project_id) + + return project_id + +# Function to import something from file +def import_all(session, jsonfile, object_type, delete=False): + with open(jsonfile, 'r') as stream: + results = json.loads(stream.read().replace('\n', '')) + logging.info("%d %s loaded from json file", len(results), object_type) + + command = 'add_' + object_type[:-1] + for result in results: + globals()[command](session, result, delete) + + logging.info("All %s successfully imported", object_type) + +# Function to export all something +def export_all(session, jsonfile, object_type): + if object_type == 'disabledHosts': + results = return_all(session, 'hosts', {"key":"enabled", "value":"false"}) + else: + results = return_all(session, object_type) + + with open(jsonfile, 'w') as stream: + stream.write(json.dumps(results)) + logging.info("All %d %s have been exported", len(results), object_type) + +# Create tower inventory +def add_inventory(session, inventory_name, organization_name, delete=False, variables=""): + organization_id = return_organization_id(session, organization_name) + try: + if delete: + delete_inventory(session, inventory_name) + inventory_id = return_inventory_id(session, inventory_name) + logging.error("The inventory %s already exist with ID %d", inventory_name, inventory_id) + except ObjectNotFound: + url = api_url + 'inventories/' + payload = {'name':inventory_name, 'organization':organization_id, 'variables':variables} + r = session.post(url, json=payload) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 201): + raise ActionFailure("%d : Failed to add inventory %s %s" % (r.status_code, inventory_name, r.text)) + + search_response = json.loads(r.text) + inventory_id = search_response['id'] + + logging.info("Inventory %s added with ID %s", inventory_name, inventory_id) + return inventory_id + +#Import all inventory in txtfile +def mass_import_inventory(session, txtfile): + with open(txtfile, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter=';') + count = 0 + for row in reader: + add_inventory(session, row[0], row[1]) + count = count + 1 + logging.info("%d inventory(ies) imported", count) + +#Remove inventory +def delete_inventory(session, inventory): + inventory_id = return_inventory_id(session, inventory) + + url = api_url + 'inventories/' + str(inventory_id) + '/' + r = session.delete(url) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to remove inventory %s %s" % (r.status_code, inventory, r.text)) + + logging.info("Inventory %s deleted", inventory) + return inventory_id + +#Remove job template +def delete_job_template(session, job_template): + job_template_id = return_job_template_id(session, job_template) + + url = api_url + 'job_templates/' + str(job_template_id) + '/' + r = session.delete(url) + + logging.info("%s %s", r.request.method, r.url) + if (r.status_code != 204): + raise ActionFailure("%d : Failed to remove job_template %s %s" % (r.status_code, job_template, r.text)) + + logging.info("Job Template '%s' deleted", job_template) + return job_template_id + +# browse yaml inventory (group_vars and host_vars) +def browse_yaml_inventory(session, type_var, path, inventory): + list_file = absoluteFilePaths(path) + for f in list_file: + if '.yaml' in f or '.yml' in f: + filename = f.split("/") + filename = filename[-1].replace(".yaml", "").replace('.yml', '') + with open(f, 'r') as stream: + doc = yaml.safe_load(stream) + for variable in doc: + if type_var == 'groups' or type_var == 'hosts': + associate_variable(session, type_var, filename, variable, doc[variable], inventory) + else: + raise ActionFailure("Error (Browse_yaml_inventory) : Type not found") + +def main(): + parser = argparse.ArgumentParser() + verbosity_choices = ['INFO', 'ERROR', 'DEBUG'] + parser.add_argument('--verbosity', action='store', choices=verbosity_choices, type=str, default='INFO') + parser.add_argument('--username', action='store', type=str, default=username) + parser.add_argument('--password', action='store', type=str, default=secret) + subparsers = parser.add_subparsers(dest='subcommand') + subparsers.required = True + + parser_addHost = subparsers.add_parser('addHost') + parser_addHost.add_argument('fqdn', action="store", default=None) + parser_addHost.add_argument('inventory_name', action="store", default=None) + + parser_massImportHost = subparsers.add_parser('massImportHost') + parser_massImportHost.add_argument('txtfile', action="store", default=None) + + parser_massImportInventory = subparsers.add_parser('massImportInventory') + parser_massImportInventory.add_argument('txtfile', action="store", default=None) + + parser_deleteHost = subparsers.add_parser('deleteHost') + parser_deleteHost.add_argument('fqdn', action="store", default=None) + + parser_massDeleteHost = subparsers.add_parser('massDeleteHost') + parser_massDeleteHost.add_argument('txtfile', action="store", default=None) + + parser_massChangeHostStatus = subparsers.add_parser('massChangeHostStatus') + parser_massChangeHostStatus.add_argument('txtfile', action="store", default=None) + + parser_addGroup = subparsers.add_parser('addGroup') + parser_addGroup.add_argument('group_name', action="store", default=None) + parser_addGroup.add_argument('inventory_name', action="store", default=None) + parser_addGroup.add_argument('--force-create', dest='force_create', action='store_true') + parser_addGroup.set_defaults(disable_cooldown=False) + + parser_deleteGroup = subparsers.add_parser('deleteGroup') + parser_deleteGroup.add_argument('group_name', action="store", default=None) + parser_deleteGroup.add_argument('inventory_name', action="store", default=None) + + parser_addInventory = subparsers.add_parser('addInventory') + parser_addInventory.add_argument('inventory_name', action="store", default=None) + parser_addInventory.add_argument('--org', dest='organization_name', type=str, nargs='?', action="store", default="Default") + parser_addInventory.add_argument('--force-create', dest='force_create', action='store_true') + parser_addInventory.set_defaults(disable_cooldown=False) + + parser_deleteInventory = subparsers.add_parser('deleteInventory') + parser_deleteInventory.add_argument('inventory_name', action="store", default=None) + + parser_deleteJobTemplate = subparsers.add_parser('deleteJobTemplate') + parser_deleteJobTemplate.add_argument('job_template_name', action="store", default=None) + + parser_associate = subparsers.add_parser('associate') + parser_associate.add_argument('fqdn', action="store", default=None) + parser_associate.add_argument('group_name', action="store", default=None) + parser_associate.add_argument('inventory_name', action="store", default=None) + + parser_associateVariable = subparsers.add_parser('associateVariable') + type_choices = ['hosts', 'groups'] + parser_associateVariable.add_argument('type', action="store", choices=type_choices, type=str, default=None) + parser_associateVariable.add_argument('name', action="store", type=str, default=None) + parser_associateVariable.add_argument('key', action="store", type=str, default=None) + parser_associateVariable.add_argument('value', action="store", type=str, default=None) + parser_associateVariable.add_argument('inventory_name', action="store", type=str, default=None) + + parser_associateChildren = subparsers.add_parser('associateChildren') + parser_associateChildren.add_argument('group_parent_name', action="store", default=None) + parser_associateChildren.add_argument('group_child_name', action="store", default=None) + parser_associateChildren.add_argument('inventory_name', action="store", default=None) + + parser_massAssociate = subparsers.add_parser('massAssociate') + parser_massAssociate.add_argument('txtfile', action="store", default=None) + + parser_disassociate = subparsers.add_parser('disassociate') + parser_disassociate.add_argument('fqdn', action="store", default=None) + parser_disassociate.add_argument('group_name', action="store", default=None) + parser_disassociate.add_argument('inventory_name', action="store", default=None) + + parser_hostGroups = subparsers.add_parser('hostGroups') + parser_hostGroups.add_argument('fqdn', action="store", default=None) + + parser_groupMembers = subparsers.add_parser('groupMembers') + parser_groupMembers.add_argument('group_name', action="store", default=None) + parser_groupMembers.add_argument('inventory_name', action="store", default=None) + parser_groupMembers.add_argument('--export', dest='export', action="store_true") + parser_groupMembers.set_defaults(export=False) + + parser_groupListMembers = subparsers.add_parser('groupListMembers') + parser_groupListMembers.add_argument('inventory_name', action="store", default=None) + parser_groupListMembers.add_argument('group_list', nargs='+', action="store", default=None) + parser_groupListMembers.add_argument('--inverse', dest='inverse', action="store_true") + parser_groupListMembers.set_defaults(export=False) + + parser_groupVars = subparsers.add_parser('groupVars') + parser_groupVars.add_argument('group_name', action="store", default=None) + parser_groupVars.add_argument('inventory_name', action="store", default=None) + + parser_hostVars = subparsers.add_parser('hostVars') + parser_hostVars.add_argument('fqdn', action="store", default=None) + parser_hostVars.add_argument('--nested', action="store_true") + + parser_lastExecutionStatus = subparsers.add_parser('lastExecutionStatus') + parser_lastExecutionStatus.add_argument('id', type=int, action="store", default=None) + parser_lastExecutionStatus.add_argument('-q', '--quiet', dest='quiet', action="store_true") + parser_lastExecutionStatus.set_defaults(quiet=False) + + parser_lastExecutionChange = subparsers.add_parser('lastExecutionChange') + parser_lastExecutionChange.add_argument('id', type=int, action="store", default=None) + parser_lastExecutionChange.add_argument('-q', '--quiet', dest='quiet', action="store_true") + parser_lastExecutionChange.set_defaults(quiet=False) + + subparsers.add_parser('getAllGroupVars') + subparsers.add_parser('getAllHostVars') + subparsers.add_parser('getLonelyHosts') + subparsers.add_parser('getLonelyGroups') + subparsers.add_parser('getTemplatesWithSurvey') + subparsers.add_parser('getTemplatesNotUsingDefault') + subparsers.add_parser('getProjectsWithOldBranch') + subparsers.add_parser('displayJobTemplatesCredentials') + + parser_launchJob = subparsers.add_parser('launchJob') + parser_launchJob.add_argument('jsonfile', action="store", default=None) + parser_launchJob.add_argument('--remote_username', action='store', type=str, default="") + parser_launchJob.add_argument('--remote_password', action='store', type=str, default="") + parser_launchJob.add_argument('--tags', action="store", default="") + parser_launchJob.add_argument('--inventory', action="store", default="") + parser_launchJob.add_argument('--limit', action="store", default="") + parser_launchJob.add_argument('--si_version', action="store", default="") + parser_launchJob.add_argument('--job_type', action="store", default="") + parser_launchJob.add_argument('--disable_cooldown', dest='disable_cooldown', action='store_true') + parser_launchJob.set_defaults(disable_cooldown=False) + parser_launchJob.add_argument('--return_id', dest='return_id', action='store_true') + parser_launchJob.set_defaults(return_id=False) + + parser_stopJob = subparsers.add_parser('stopJob') + parser_stopJob.add_argument('job_id', action="store", default=None) + + parser_importAnsibleInventory = subparsers.add_parser('importAnsibleInventory') + parser_importAnsibleInventory.add_argument('file', type=str, action="store", default=None) + parser_importAnsibleInventory.add_argument('inventory', type=str, action="store", default=None) + parser_importAnsibleInventory.add_argument('--export', dest='export_host_file', action="store_true") + parser_importAnsibleInventory.set_defaults(export_host_file=False) + parser_importAnsibleInventory.add_argument('--org', dest='organization_name', type=str, + nargs='?', action="store", default="Default") + parser_importAnsibleInventory.add_argument('--groupvars', dest='group_var_directory_path', type=str, + nargs='?', action="store", default=None) + parser_importAnsibleInventory.add_argument('--hostvars', dest='host_var_directory_path', type=str, + nargs='?', action="store", default=None) + + parser_exportAnsibleInventory = subparsers.add_parser('exportAnsibleInventory') + parser_exportAnsibleInventory.add_argument('jsonfile', type=str, action="store", default=None) + parser_exportAnsibleInventory.add_argument('inventory_name', type=str, action="store", default=None) + parser_exportAnsibleInventory.add_argument('--bash', action="store_true") + + parser_importJobTemplates = subparsers.add_parser('importJobTemplates') + parser_importJobTemplates.add_argument('jsonfile', type=str, action="store", default=None) + + parser_exportAllJobTemplates = subparsers.add_parser('exportAllJobTemplates') + parser_exportAllJobTemplates.add_argument('jsonfile', type=str, action="store", default=None) + + import_object_type = ['credentials', 'projects'] + parser_importAll = subparsers.add_parser('importAll') + parser_importAll.add_argument('jsonfile', type=str, action="store", default=None) + parser_importAll.add_argument('object_type', type=str, choices=import_object_type, action="store", default=None) + parser_importAll.add_argument('--delete', dest='delete', action='store_true') + + export_object_type = ['credentials', 'projects', 'disabledHosts'] + parser_exportAll = subparsers.add_parser('exportAll') + parser_exportAll.add_argument('jsonfile', type=str, action="store", default=None) + parser_exportAll.add_argument('object_type', type=str, choices=export_object_type, action="store", default=None) + + args = parser.parse_args() + + logging.getLogger("requests").setLevel(logging.CRITICAL) + + logging.config.dictConfig({ + "version": 1, + "handlers": { + "console": { + "level": args.verbosity, + "class": "logging.StreamHandler", + "stream": "ext://sys.stdout", + }, + }, + "root": { + "level": args.verbosity, + "handlers": ["console"], + }, + }) + + mapp_call = { + 'addHost': add_host, + 'massImportHost': mass_import_host, + 'massImportInventory': mass_import_inventory, + 'deleteHost': delete_host, + 'massDeleteHost': mass_delete_host, + 'massChangeHostStatus': mass_change_host_status, + 'addGroup': add_group, + 'deleteGroup': delete_group, + 'addInventory': add_inventory, + 'deleteInventory': delete_inventory, + 'deleteJobTemplate': delete_job_template, + 'associate': associate_to_group, + 'associateVariable': associate_variable, + 'associateChildren': associate_children_to_group, + 'massAssociate': mass_associate, + 'disassociate': disassociate_from_group, + 'hostGroups': host_groups, + 'groupMembers': group_members, + 'groupListMembers': groupList_members, + 'groupVars': group_vars, + 'hostVars': host_vars, + 'lastExecutionStatus': last_execution_status, + 'lastExecutionChange': last_execution_change, + 'getAllGroupVars': all_group_vars, + 'getAllHostVars': all_host_vars, + 'getLonelyHosts': all_lonely_hosts, + 'getLonelyGroups': all_lonely_groups, + 'getTemplatesWithSurvey': all_templates_with_survey, + 'getTemplatesNotUsingDefault': all_not_default_project, + 'getProjectsWithOldBranch': all_projects_with_old_branch, + 'displayJobTemplatesCredentials': display_job_templates_credentials, + 'launchJob': launch_job, + 'stopJob': stop_job, + 'importAnsibleInventory': import_ansible_inventory, + 'exportAnsibleInventory': export_ansible_inventory, + 'importJobTemplates': import_job_templates, + 'exportAllJobTemplates': export_all_job_templates, + 'importAll': import_all, + 'exportAll': export_all, + } + + if args.subcommand == "lastExecutionStatus" or args.subcommand == "lastExecutionChange": + myargs = (args.quiet, args.id,) + elif args.subcommand == "hostVars": + myargs = (args.fqdn, args.nested) + elif args.subcommand in {"deleteHost", "hostGroups"}: + myargs = (args.fqdn,) + elif args.subcommand == "addHost": + myargs = (args.fqdn, args.inventory_name) + elif args.subcommand == "associate" or args.subcommand == "disassociate": + myargs = (args.fqdn, args.group_name, args.inventory_name) + elif args.subcommand == "launchJob": + if args.remote_username != "" and args.remote_password != "": + myargs = (args.remote_username, args.remote_password, args.jsonfile, args.tags, + args.inventory, args.limit, args.si_version, args.job_type, args.disable_cooldown, args.return_id) + else: + myargs = (args.username, args.password, args.jsonfile, args.tags, + args.inventory, args.limit, args.si_version, args.job_type, args.disable_cooldown, args.return_id) + elif args.subcommand == "stopJob": + myargs = (args.job_id,) + elif args.subcommand == "associateChildren": + myargs = (args.group_parent_name, args.group_child_name, args.inventory_name) + elif args.subcommand == "groupVars": + myargs = (args.group_name, args.inventory_name) + elif args.subcommand == "groupMembers": + myargs = (args.group_name, args.inventory_name, args.export) + elif args.subcommand == "groupListMembers": + myargs = (args.inventory_name, args.inverse, args.group_list) + elif args.subcommand in {"exportAllJobTemplates", "importJobTemplates"}: + myargs = (args.jsonfile,) + elif args.subcommand in {"exportAll"}: + myargs = (args.jsonfile, args.object_type) + elif args.subcommand in {"importAll"}: + myargs = (args.jsonfile, args.object_type, args.delete) + elif args.subcommand in {"massDeleteHost", "massImportHost", "massAssociate", "massImportInventory", "massChangeHostStatus"}: + myargs = (args.txtfile,) + elif args.subcommand in {"getAllGroupVars", "getAllHostVars", "getLonelyHosts", "getLonelyGroups", + "getTemplatesWithSurvey", "getTemplatesNotUsingDefault", "getProjectsWithOldBranch", "displayJobTemplatesCredentials"}: + myargs = () + elif args.subcommand == "importAnsibleInventory": + myargs = (args.file, args.inventory, args.organization_name, + args.export_host_file, args.group_var_directory_path, args.host_var_directory_path) + elif args.subcommand == "exportAnsibleInventory": + myargs = (args.jsonfile, args.inventory_name, args.bash) + elif args.subcommand == "addGroup": + myargs = (args.group_name, args.inventory_name, args.force_create) + elif args.subcommand == "deleteGroup": + myargs = (args.group_name, args.inventory_name) + elif args.subcommand == "deleteInventory": + myargs = (args.inventory_name,) + elif args.subcommand == "deleteJobTemplate": + myargs = (args.job_template_name,) + elif args.subcommand == "addInventory": + myargs = (args.inventory_name, args.organization_name, args.force_create) + elif args.subcommand == "associateVariable": + myargs = (args.type, args.name, args.key, args.value, args.inventory_name) + + try: + session = authentication(args.username, args.password) + mapp_call[args.subcommand](session, *myargs) + except FileNotFoundError as e: + logging.error("%s %s", type(e), e) + sys.exit(2) + except IndexError as e: + logging.error("%s %s", e, ': You should check your txtfile syntax !') + sys.exit(3) + except ActionFailure as e: + logging.error("%s", e) + sys.exit(4) + except Exception as e: # pylint: disable=broad-except + if args.verbosity == 'DEBUG': + logging.error("%s", e, exc_info=True) + else: + logging.error("%s %s", type(e), e) + sys.exit(1) + +if __name__ == '__main__': + main()