要將訓練好的模型實現自瞄我分為以下步驟

  1. 引入模型
  2. 擷取螢幕截圖
  3. 預測物體位置並取得中心座標
  4. 取得鼠標移動 offset
  5. 微調 offset
  6. 移動鼠標並射擊

以下將以 Aimlab Gridshot 為例實作

前置準備

想要引入模型須先準備一些依賴

  • hubconf.py
    引入模型的依賴文件,可於這裡下載,似乎也可直接在線調用

  • models、utils
    直接複製 YOLOv7 目錄中的 models、utils 兩個資料夾即可

  • 訓練好的模型

所有檔案放入同一目錄

folder

best.pt 為訓練好的模型,以下將解釋實現步驟

import 與設定參數

根據自己的配置設定參數,延遲根據電腦配置設定,可實際執行不斷調整至合適的延遲

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import sys
import time
import torch
import win32api
import win32con
from PyQt5.QtWidgets import QApplication


# data setting
GAME_WIDTH = 1920
GAME_HEIGHT = 1080

IMG_PATH = 'screenshot.bmp'

HUBCONF_FOLDER_PATH = r'.'
MODEL_PATH = 'best.pt'
CONFIDENCE_THRESHOLD = 0.4

MOVE_DELAY1 = 0.049
MOVE_DELAY2 = 0.033
ONE_SHOT_DELAY = 0.001

引入模型

使用 torch.hub.load 引入模型,須搭配 hubconf.py

1
2
3
4
5
# init model
device = torch.device('cuda')
model = torch.hub.load(HUBCONF_FOLDER_PATH, 'custom', MODEL_PATH,
source='local', force_reload=False)
model = model.to(device)

擷取螢幕截圖

在 Windows 下利用 PyQt 截圖是最快的,因此使用 PyQt 擷取螢幕

1
2
3
4
5
6
7
8
9
def screenshot(screen, img_path):
img = screen.grabWindow(0).toImage()
img.save(img_path)

# init PyQt
app = QApplication(sys.argv)
screen = QApplication.primaryScreen()

screenshot(screen, IMG_PATH)

預測物體位置並取得中心座標

將圖片送入模型,取得預測的四點座標及信心值
再根據設定的閾值將信心值較低的預測排除
最後以四點座標算出中心座標

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
def inference_by_image(model, img_path):
results = model(img_path)
results_pandas = results.pandas()

# box coordinates
x_mins = results_pandas.xyxy[0]['xmin']
x_maxs = results_pandas.xyxy[0]['xmax']
y_mins = results_pandas.xyxy[0]['ymin']
y_maxs = results_pandas.xyxy[0]['ymax']

confidences = results_pandas.xyxy[0]['confidence']

return x_mins, x_maxs, y_mins, y_maxs, confidences


def get_points_by_threshold(zip_object, threshold):
points = []
for x_min, x_max, y_min, y_max, conf in zip_object:
if conf > threshold:
points.append([int(x_min), int(x_max), int(y_min), int(y_max)])
return points


def get_center_pos(points):
center_pos = []
for point in points:
x_center = int(point[1] - (point[1] - point[0]) // 2)
y_center = int(point[3] - (point[3] - point[2]) // 2)
center_pos.append([x_center, y_center])
return center_pos

取得鼠標移動 offset

根據遊戲解析度算出鼠標從螢幕中心到物體中心需要移動的 offset

1
2
3
4
5
6
7
def get_move_offsets(center_pos, game_width, game_height):
move_offsets = []
for center in center_pos:
x_offset = int(center[0] - game_width // 2)
y_offset = int(center[1] - game_height // 2)
move_offsets.append([x_offset, y_offset])
return move_offsets

微調 offset

依據遊戲解析度、滑鼠靈敏度微調 offset
以下是根據我的設備來調整,每個人須做的調整可能不一

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
def fine_tuning(x_offset, y_offset):
if x_offset > 0:
x_offset += 43
if y_offset > 0:
y_offset += 44

if x_offset < 0:
x_offset -= 37
if y_offset < 0:
y_offset -= 44
if y_offset < -200:
y_offset -= 45
elif y_offset < -150:
y_offset -= 30

return x_offset, y_offset

移動鼠標並射擊

調用 win32api 實現
移動 -> 射擊 -> 回到原本位置
須加入延遲否則會造成未移動或未射擊等

1
2
3
4
5
6
7
def move_and_click_by_offsets(x_offset, y_offset, move_delay1, move_delay2):
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, x_offset, y_offset, 0, 0)
time.sleep(move_delay1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
time.sleep(move_delay2)
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, -x_offset, -y_offset, 0, 0)

完整程式碼

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import sys
import time
import torch
import win32api
import win32con
from PyQt5.QtWidgets import QApplication


# data setting
GAME_WIDTH = 1920
GAME_HEIGHT = 1080

IMG_PATH = 'screenshot.bmp'

HUBCONF_FOLDER_PATH = r'.'
MODEL_PATH = 'best.pt'
CONFIDENCE_THRESHOLD = 0.4

MOVE_DELAY1 = 0.049
MOVE_DELAY2 = 0.033
ONE_SHOT_DELAY = 0.001


def screenshot(screen, img_path):
img = screen.grabWindow(0).toImage()
img.save(img_path)


def inference_by_image(model, img_path):
results = model(img_path)
results_pandas = results.pandas()

# box coordinates
x_mins = results_pandas.xyxy[0]['xmin']
x_maxs = results_pandas.xyxy[0]['xmax']
y_mins = results_pandas.xyxy[0]['ymin']
y_maxs = results_pandas.xyxy[0]['ymax']

confidences = results_pandas.xyxy[0]['confidence']

return x_mins, x_maxs, y_mins, y_maxs, confidences


def get_points_by_threshold(zip_object, threshold):
points = []
for x_min, x_max, y_min, y_max, conf in zip_object:
if conf > threshold:
points.append([int(x_min), int(x_max), int(y_min), int(y_max)])
return points


def get_center_pos(points):
center_pos = []
for point in points:
x_center = int(point[1] - (point[1] - point[0]) // 2)
y_center = int(point[3] - (point[3] - point[2]) // 2)
center_pos.append([x_center, y_center])
return center_pos


def get_move_offsets(center_pos, game_width, game_height):
move_offsets = []
for center in center_pos:
x_offset = int(center[0] - game_width // 2)
y_offset = int(center[1] - game_height // 2)
move_offsets.append([x_offset, y_offset])
return move_offsets


def fine_tuning(x_offset, y_offset):
if x_offset > 0:
x_offset += 43
if y_offset > 0:
y_offset += 44

if x_offset < 0:
x_offset -= 37
if y_offset < 0:
y_offset -= 44
if y_offset < -200:
y_offset -= 45
elif y_offset < -150:
y_offset -= 30

return x_offset, y_offset


def move_and_click_by_offsets(x_offset, y_offset, move_delay1, move_delay2):
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, x_offset, y_offset, 0, 0)
time.sleep(move_delay1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
time.sleep(move_delay2)
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, -x_offset, -y_offset, 0, 0)


def main():
# init model
device = torch.device('cuda')
model = torch.hub.load(HUBCONF_FOLDER_PATH, 'custom', MODEL_PATH,
source='local', force_reload=False)
model = model.to(device)

# init PyQt
app = QApplication(sys.argv)
screen = QApplication.primaryScreen()

# inference and move to shot
while True:
screenshot(screen, IMG_PATH)
x_mins, x_maxs, y_mins, y_maxs, confidences = inference_by_image(model, IMG_PATH)
points = get_points_by_threshold(zip(x_mins, x_maxs, y_mins, y_maxs, confidences),
CONFIDENCE_THRESHOLD)

if len(points) == 3:
center_pos = get_center_pos(points)
move_offsets = get_move_offsets(center_pos, GAME_WIDTH, GAME_HEIGHT)

for x_offset, y_offset in move_offsets:
x_offset, y_offset = fine_tuning(x_offset, y_offset)
move_and_click_by_offsets(x_offset, y_offset, MOVE_DELAY1, MOVE_DELAY2)
time.sleep(ONE_SHOT_DELAY)


if __name__ == '__main__':
main()

結語

任何預測物體點擊的任務都可依此方法實現,在可使用絕對移動的情況下會更容易,可省去微調座標的步驟,鼠標移動改調用絕對移動即可