mirror of
https://github.com/jjaldridge2009/Project_Xs.git
synced 2024-11-20 17:32:43 +01:00
sysdvr support + manual wild seed finding + minor fixes
This commit is contained in:
parent
7b5aa2b160
commit
0046df2467
5
config.json
Normal file
5
config.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"SysDVR": true,
|
||||
"image": "./trainer/cave/eye.png",
|
||||
"view": [620, 340, 20, 20]
|
||||
}
|
52
src/position.py
Normal file
52
src/position.py
Normal file
@ -0,0 +1,52 @@
|
||||
import time
|
||||
import cv2
|
||||
import json
|
||||
|
||||
config = json.load(open("config.json"))
|
||||
|
||||
if config["SysDVR"]:
|
||||
from windowcapture import WindowCapture
|
||||
video = WindowCapture("SysDVR-Client [PID ")
|
||||
else:
|
||||
video = cv2.VideoCapture(0,cv2.CAP_DSHOW)
|
||||
video.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
|
||||
video.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
|
||||
video.set(cv2.CAP_PROP_BUFFERSIZE,1)
|
||||
roi_y = 0
|
||||
roi_x = 0
|
||||
roi_h = 10
|
||||
roi_w = 10
|
||||
UP = 2490368
|
||||
DOWN = 2621440
|
||||
LEFT = 2424832
|
||||
RIGHT = 2555904
|
||||
print("use up/down/left/right to move box, 8/4/6/2 to expand/shrink box, q to close")
|
||||
while True:
|
||||
_,frame = video.read()
|
||||
time_counter = time.perf_counter()
|
||||
|
||||
cv2.rectangle(frame,(roi_x,roi_y), (roi_x+roi_w,roi_y+roi_h), 255, 2)
|
||||
cv2.imshow("view", frame)
|
||||
keypress = cv2.waitKeyEx(1)
|
||||
if keypress == ord('q'):
|
||||
video.release()
|
||||
cv2.destroyAllWindows()
|
||||
exit()
|
||||
elif keypress == UP:
|
||||
roi_y -= 10
|
||||
elif keypress == DOWN:
|
||||
roi_y += 10
|
||||
elif keypress == LEFT:
|
||||
roi_x -= 10
|
||||
elif keypress == RIGHT:
|
||||
roi_x += 10
|
||||
elif keypress == ord('8'):
|
||||
roi_h -= 10
|
||||
elif keypress == ord('2'):
|
||||
roi_h += 10
|
||||
elif keypress == ord('4'):
|
||||
roi_w -= 10
|
||||
elif keypress == ord('6'):
|
||||
roi_w += 10
|
||||
if keypress != ord('q') and keypress != -1:
|
||||
print([roi_x,roi_y,roi_w,roi_h])
|
@ -16,7 +16,7 @@ def randrange(r,mi,ma):
|
||||
t = (r & 0x7fffff) / 8388607.0
|
||||
return t * mi + (1.0 - t) * ma
|
||||
|
||||
def tracking_blink(img, roi_x, roi_y, roi_w, roi_h, th = 0.9, size = 40)->Tuple[List[int],List[int],float]:
|
||||
def tracking_blink(img, roi_x, roi_y, roi_w, roi_h, th = 0.9, size = 40, sysdvr = False)->Tuple[List[int],List[int],float]:
|
||||
"""measuring the type and interval of player's blinks
|
||||
|
||||
Returns:
|
||||
@ -25,6 +25,10 @@ def tracking_blink(img, roi_x, roi_y, roi_w, roi_h, th = 0.9, size = 40)->Tuple[
|
||||
|
||||
eye = img
|
||||
|
||||
if sysdvr:
|
||||
from windowcapture import WindowCapture
|
||||
video = WindowCapture("SysDVR-Client [PID ")
|
||||
else:
|
||||
video = cv2.VideoCapture(0,cv2.CAP_DSHOW)
|
||||
video.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
|
||||
video.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
|
||||
@ -34,6 +38,7 @@ def tracking_blink(img, roi_x, roi_y, roi_w, roi_h, th = 0.9, size = 40)->Tuple[
|
||||
blinks = []
|
||||
intervals = []
|
||||
prev_time = 0
|
||||
w, h = eye.shape[::-1]
|
||||
|
||||
prev_roi = None
|
||||
debug_txt = ""
|
||||
@ -50,17 +55,18 @@ def tracking_blink(img, roi_x, roi_y, roi_w, roi_h, th = 0.9, size = 40)->Tuple[
|
||||
|
||||
prev_roi = roi
|
||||
res = cv2.matchTemplate(roi,eye,cv2.TM_CCOEFF_NORMED)
|
||||
_, match, _, _ = cv2.minMaxLoc(res)
|
||||
|
||||
cv2.imshow("",roi)
|
||||
cv2.waitKey(1)
|
||||
_, match, _, max_loc = cv2.minMaxLoc(res)
|
||||
|
||||
if 0.01<match<th:
|
||||
cv2.rectangle(frame,(roi_x,roi_y), (roi_x+roi_w,roi_y+roi_h), 255, 2)
|
||||
if state==IDLE:
|
||||
blinks.append(0)
|
||||
interval = (time_counter - prev_time)/1.018
|
||||
interval_round = round(interval)
|
||||
intervals.append(interval_round)
|
||||
print(f"Adv Since Last: {round((time_counter - prev_time)/1.018)} {(time_counter - prev_time)}")
|
||||
print("blink logged")
|
||||
print(f"Intervals {len(intervals)}/{size}")
|
||||
|
||||
if len(intervals)==size:
|
||||
offset_time = time_counter
|
||||
@ -76,16 +82,65 @@ def tracking_blink(img, roi_x, roi_y, roi_w, roi_h, th = 0.9, size = 40)->Tuple[
|
||||
blinks[-1] = 1
|
||||
debug_txt = debug_txt+"d"
|
||||
state = DOUBLE
|
||||
print("double blink logged")
|
||||
elif state==DOUBLE:
|
||||
pass
|
||||
else:
|
||||
max_loc = (max_loc[0] + roi_x,max_loc[1] + roi_y)
|
||||
bottom_right = (max_loc[0] + w, max_loc[1] + h)
|
||||
cv2.rectangle(frame,max_loc, bottom_right, 255, 2)
|
||||
|
||||
cv2.imshow("view", frame)
|
||||
keypress = cv2.waitKey(1)
|
||||
if keypress == ord('q'):
|
||||
cv2.destroyAllWindows()
|
||||
exit()
|
||||
|
||||
if state!=IDLE and time_counter - prev_time>0.7:
|
||||
state = IDLE
|
||||
print(debug_txt)
|
||||
# print(debug_txt)
|
||||
cv2.destroyAllWindows()
|
||||
return (blinks, intervals, offset_time)
|
||||
|
||||
def tracking_poke_blink(img, roi_x, roi_y, roi_w, roi_h, size = 60)->Tuple[List[int],List[int],float]:
|
||||
def tracking_blink_manual(size = 40)->Tuple[List[int],List[int],float]:
|
||||
"""measuring the type and interval of player's blinks
|
||||
|
||||
Returns:
|
||||
blinks:List[int],intervals:list[int],offset_time:float: [description]
|
||||
"""
|
||||
|
||||
state = IDLE
|
||||
blinks = []
|
||||
intervals = []
|
||||
prev_time = 0
|
||||
|
||||
offset_time = 0
|
||||
|
||||
while len(blinks)<size or state!=IDLE:
|
||||
input()
|
||||
time_counter = time.perf_counter()
|
||||
print(f"Adv Since Last: {round((time_counter - prev_time)/1.018)} {(time_counter - prev_time)}")
|
||||
|
||||
if prev_time != 0 and time_counter - prev_time<0.7:
|
||||
blinks[-1] = 1
|
||||
print("double blink logged")
|
||||
else:
|
||||
blinks.append(0)
|
||||
interval = (time_counter - prev_time)/1.018
|
||||
interval_round = round(interval)
|
||||
intervals.append(interval_round)
|
||||
print("blink logged")
|
||||
print(f"Intervals {len(intervals)}/{size}")
|
||||
|
||||
if len(intervals)==size:
|
||||
offset_time = time_counter
|
||||
prev_time = time_counter
|
||||
|
||||
|
||||
|
||||
return (blinks, intervals, offset_time)
|
||||
|
||||
def tracking_poke_blink(img, roi_x, roi_y, roi_w, roi_h, size = 60, sysdvr = False)->Tuple[List[int],List[int],float]:
|
||||
"""measuring the type and interval of pokemon's blinks
|
||||
|
||||
Returns:
|
||||
@ -94,6 +149,10 @@ def tracking_poke_blink(img, roi_x, roi_y, roi_w, roi_h, size = 60)->Tuple[List[
|
||||
|
||||
eye = img
|
||||
|
||||
if sysdvr:
|
||||
from windowcapture import WindowCapture
|
||||
video = WindowCapture("SysDVR-Client [PID ")
|
||||
else:
|
||||
video = cv2.VideoCapture(0,cv2.CAP_DSHOW)
|
||||
video.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
|
||||
video.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
|
||||
|
20
src/wild.py
20
src/wild.py
@ -1,15 +1,17 @@
|
||||
import rngtool
|
||||
import calc
|
||||
import cv2
|
||||
import time
|
||||
import json
|
||||
from xorshift import Xorshift
|
||||
|
||||
config = json.load(open("config.json"))
|
||||
|
||||
def expr():
|
||||
player_eye = cv2.imread("./trainer/ruins/eye.png", cv2.IMREAD_GRAYSCALE)
|
||||
player_eye = cv2.imread(config["image"], cv2.IMREAD_GRAYSCALE)
|
||||
if player_eye is None:
|
||||
print("path is wrong")
|
||||
return
|
||||
blinks, intervals, offset_time = rngtool.tracking_blink(player_eye, 910, 485, 50, 60)
|
||||
blinks, intervals, offset_time = rngtool.tracking_blink(player_eye, *config["view"], sysdvr=config["SysDVR"])
|
||||
prng = rngtool.recov(blinks, intervals)
|
||||
|
||||
waituntil = time.perf_counter()
|
||||
@ -19,18 +21,11 @@ def expr():
|
||||
state = prng.getState()
|
||||
print(hex(state[0]<<32|state[1]), hex(state[2]<<32|state[3]))
|
||||
|
||||
#timecounter reset
|
||||
advances = 0
|
||||
wild_prng = Xorshift(*prng.getState())
|
||||
wild_prng.getNextRandSequence(1)
|
||||
|
||||
advances = 0
|
||||
|
||||
for i in range(100):
|
||||
for _ in range(1000):
|
||||
advances += 1
|
||||
r = prng.next()
|
||||
wild_r = wild_prng.next()
|
||||
|
||||
waituntil += 1.018
|
||||
|
||||
print(f"advances:{advances}, blinks:{hex(r&0xF)}")
|
||||
@ -99,5 +94,4 @@ def reidentify():
|
||||
time.sleep(next_time)
|
||||
|
||||
if __name__ == "__main__":
|
||||
#firstspecify()
|
||||
reidentify()
|
||||
expr()
|
37
src/wild_manual.py
Normal file
37
src/wild_manual.py
Normal file
@ -0,0 +1,37 @@
|
||||
import rngtool
|
||||
import cv2
|
||||
import time
|
||||
import json
|
||||
|
||||
config = json.load(open("config.json"))
|
||||
|
||||
def expr():
|
||||
player_eye = cv2.imread(config["image"], cv2.IMREAD_GRAYSCALE)
|
||||
if player_eye is None:
|
||||
print("path is wrong")
|
||||
return
|
||||
blinks, intervals, offset_time = rngtool.tracking_blink_manual()
|
||||
prng = rngtool.recov(blinks, intervals)
|
||||
|
||||
waituntil = time.perf_counter()
|
||||
diff = round(waituntil-offset_time)
|
||||
prng.getNextRandSequence(diff)
|
||||
|
||||
state = prng.getState()
|
||||
print(hex(state[0]<<32|state[1]), hex(state[2]<<32|state[3]))
|
||||
|
||||
advances = 0
|
||||
|
||||
for _ in range(1000):
|
||||
advances += 1
|
||||
r = prng.next()
|
||||
waituntil += 1.018
|
||||
|
||||
print(f"advances:{advances}, blinks:{hex(r&0xF)}")
|
||||
|
||||
next_time = waituntil - time.perf_counter() or 0
|
||||
time.sleep(next_time)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
expr()
|
87
src/windowcapture.py
Normal file
87
src/windowcapture.py
Normal file
@ -0,0 +1,87 @@
|
||||
import numpy as np
|
||||
import win32gui, win32ui, win32con
|
||||
|
||||
# Class to monitor a window
|
||||
class WindowCapture:
|
||||
def __init__(self, partial_window_title):
|
||||
# set up variables
|
||||
self.w = 0
|
||||
self.h = 0
|
||||
self.hwnd = None
|
||||
self.cropped_x = 0
|
||||
self.cropped_y = 0
|
||||
self.offset_x = 0
|
||||
self.offset_y = 0
|
||||
|
||||
# a string contained in the window title, used to find windows who's name is not constant (pid of sysdvr changes)
|
||||
self.partial_window_title = partial_window_title
|
||||
# find the handle for the window we want to capture
|
||||
hwnds = []
|
||||
win32gui.EnumWindows(self.winEnumHandler, hwnds)
|
||||
if len(hwnds) == 0:
|
||||
raise Exception('Window not found')
|
||||
self.hwnd = hwnds[0]
|
||||
|
||||
# get the window size
|
||||
window_rect = win32gui.GetWindowRect(self.hwnd)
|
||||
self.w = window_rect[2] - window_rect[0]
|
||||
self.h = window_rect[3] - window_rect[1]
|
||||
|
||||
# account for the window border and titlebar and cut them off
|
||||
border_pixels = 8
|
||||
titlebar_pixels = 31
|
||||
self.w = self.w - (border_pixels * 2)
|
||||
self.h = self.h - titlebar_pixels - border_pixels
|
||||
self.cropped_x = border_pixels
|
||||
self.cropped_y = titlebar_pixels
|
||||
|
||||
# set the cropped coordinates offset so we can translate screenshot images into actual screen positions
|
||||
self.offset_x = window_rect[0] + self.cropped_x
|
||||
self.offset_y = window_rect[1] + self.cropped_y
|
||||
|
||||
# handler for finding the target window
|
||||
def winEnumHandler(self, hwnd, ctx):
|
||||
# check if window is not minimized
|
||||
if win32gui.IsWindowVisible(hwnd):
|
||||
# check if our partial title is contained in the actual title
|
||||
if self.partial_window_title in win32gui.GetWindowText(hwnd):
|
||||
# add to list
|
||||
ctx.append(hwnd)
|
||||
|
||||
# used to send a screenshot of the window to cv2
|
||||
def read(self):
|
||||
# get the window image data
|
||||
wDC = win32gui.GetWindowDC(self.hwnd)
|
||||
dcObj = win32ui.CreateDCFromHandle(wDC)
|
||||
cDC = dcObj.CreateCompatibleDC()
|
||||
dataBitMap = win32ui.CreateBitmap()
|
||||
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
|
||||
cDC.SelectObject(dataBitMap)
|
||||
cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY)
|
||||
|
||||
# convert the raw data into a format opencv can read
|
||||
signedIntsArray = dataBitMap.GetBitmapBits(True)
|
||||
img = np.fromstring(signedIntsArray, dtype='uint8')
|
||||
img.shape = (self.h, self.w, 4)
|
||||
|
||||
# free resources
|
||||
dcObj.DeleteDC()
|
||||
cDC.DeleteDC()
|
||||
win32gui.ReleaseDC(self.hwnd, wDC)
|
||||
win32gui.DeleteObject(dataBitMap.GetHandle())
|
||||
|
||||
# drop the alpha channel, to avoid throwing an error
|
||||
img = img[...,:3]
|
||||
|
||||
# make image C_CONTIGUOUS to avoid errors
|
||||
# https://github.com/opencv/opencv/issues/14866#issuecomment-580207109
|
||||
img = np.ascontiguousarray(img)
|
||||
|
||||
return True,img
|
||||
|
||||
# translate a pixel position on a screenshot image to a pixel position on the screen
|
||||
def get_screen_position(self, pos):
|
||||
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
|
||||
|
||||
def release(self):
|
||||
pass
|
@ -37,13 +37,13 @@ class Xorshift(object):
|
||||
|
||||
return self.w
|
||||
|
||||
def range(mi,ma):
|
||||
def range(self,mi,ma):
|
||||
return self.next() % (ma-mi) + min
|
||||
|
||||
def getNextRandSequence(length):
|
||||
def getNextRandSequence(self,length):
|
||||
return [self.next() for _ in range(length)]
|
||||
|
||||
def getPrevRandSequence(length):
|
||||
def getPrevRandSequence(self,length):
|
||||
return [self.prev() for _ in range(length)]
|
||||
|
||||
def getState(self):
|
||||
|
BIN
trainer/cave/eye.png
Normal file
BIN
trainer/cave/eye.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 427 B |
Loading…
Reference in New Issue
Block a user