Skip to content

Commit d0d79c6

Browse files
author
Ruilong Li
committed
7.28
1 parent 3137e38 commit d0d79c6

File tree

4 files changed

+118
-193
lines changed

4 files changed

+118
-193
lines changed

RTL/main.py

Lines changed: 56 additions & 180 deletions
Original file line numberDiff line numberDiff line change
@@ -251,24 +251,25 @@ def visulization(render_norm, render_tex=None):
251251

252252
render_size = 256
253253

254-
render_norm = render_norm.detach() * 255.0
255-
render_norm = torch.rot90(render_norm, 1, [0, 1]).permute(2, 0, 1).unsqueeze(0)
256-
render_norm = F.interpolate(render_norm, size=(render_size, render_size))
257-
render_norm = render_norm[0].cpu().numpy().transpose(1, 2, 0)
258-
# render_norm = cv2.cvtColor(render_norm, cv2.COLOR_BGR2RGB)
254+
if render_norm is not None:
255+
render_norm = render_norm.detach() * 255.0
256+
render_norm = torch.rot90(render_norm, 1, [0, 1]).permute(2, 0, 1).unsqueeze(0)
257+
render_norm = F.interpolate(render_norm, size=(render_size, render_size))
258+
render_norm = render_norm[0].cpu().numpy().transpose(1, 2, 0)
259+
reference = render_norm
259260

260261
if render_tex is not None:
261262
render_tex = render_tex.detach() * 255.0
262263
render_tex = torch.rot90(render_tex, 1, [0, 1]).permute(2, 0, 1).unsqueeze(0)
263264
render_tex = F.interpolate(render_tex, size=(render_size, render_size))
264265
render_tex = render_tex[0].cpu().numpy().transpose(1, 2, 0)
265-
# render_tex = cv2.cvtColor(render_tex, cv2.COLOR_BGR2RGB)
266+
reference = render_tex
266267

267268
bg = np.logical_and(
268269
np.logical_and(
269-
render_norm[:, :, 0] == 255,
270-
render_norm[:, :, 1] == 255),
271-
render_norm[:, :, 2] == 255,
270+
reference[:, :, 0] == 255,
271+
reference[:, :, 1] == 255),
272+
reference[:, :, 2] == 255,
272273
).reshape(render_size, render_size, 1)
273274
mask = ~bg
274275

@@ -368,7 +369,8 @@ def update_camera():
368369
**data_dict,
369370
"feat_tensor_C": netC.filter(
370371
data_dict["input_netC"].to(cuda_backbone_C, non_blocking=True),
371-
feat_prior=data_dict["feat_tensor_G"][-1][-1]) if netC else None
372+
feat_prior=data_dict["feat_tensor_G"][-1][-1]) \
373+
if (netC is not None) and (DESKTOP_MODE == 'TEXTURE' or SERVER_MODE == 'TEXTURE') else None
372374
},
373375

374376
# move feature to cuda_recon device
@@ -417,7 +419,7 @@ def update_camera():
417419
data_dict["Y"],
418420
data_dict["Z"],
419421
data_dict["calib_tensor"],
420-
data_dict["norm"])
422+
data_dict["norm"]) if (DESKTOP_MODE == 'NORM' or SERVER_MODE == 'NORM') else None
421423
},
422424

423425
# pifu render texture
@@ -430,7 +432,7 @@ def update_camera():
430432
data_dict["Y"],
431433
data_dict["Z"],
432434
data_dict["calib_tensor"],
433-
None) if netC else None
435+
None) if data_dict["feat_tensor_C"] else None
434436
},
435437

436438
# visualization
@@ -463,14 +465,13 @@ def main_loop():
463465
window_server = np.ones((256, 256, 3), dtype=np.uint8) * 255
464466
window_desktop = np.ones((512, 1024, 3), dtype=np.uint8) * 255
465467

466-
create_opengl_context(128, 128)
467-
renderer = AlbedoRender(width=128, height=128, multi_sample_rate=1)
468+
create_opengl_context(256, 256)
469+
renderer = AlbedoRender(width=256, height=256, multi_sample_rate=1)
468470
renderer.set_attrib(0, scene.vert_data)
469471
renderer.set_attrib(1, scene.uv_data)
470472
renderer.set_texture('TargetTexture', scene.texture_image)
471473

472474
def render(extrinsic, intrinsic):
473-
renderer.set_texture('TargetTexture', scene.texture_image)
474475
uniform_dict = {'ModelMat': extrinsic, 'PerspMat': intrinsic}
475476
renderer.draw(uniform_dict)
476477
color = (renderer.get_color() * 255).astype(np.uint8)
@@ -496,14 +497,14 @@ def render(extrinsic, intrinsic):
496497
])) # RGB
497498
elif DESKTOP_MODE == 'NORM':
498499
if render_norm is None:
499-
render_norm = np.ones((512, 512, 3), dtype=np.float32) * 255
500+
render_norm = np.ones((256, 256, 3), dtype=np.float32) * 255
500501
window_desktop = np.uint8(np.hstack([
501502
input * 255,
502503
cv2.resize(render_norm, (512, 512))
503504
])) # RGB
504505
elif DESKTOP_MODE == 'TEXTURE':
505506
if render_tex is None:
506-
render_tex = np.ones((512, 512, 3), dtype=np.float32) * 255
507+
render_tex = np.ones((256, 256, 3), dtype=np.float32) * 255
507508
window_desktop = np.uint8(np.hstack([
508509
input * 255,
509510
cv2.resize(render_tex, (512, 512))
@@ -512,9 +513,41 @@ def render(extrinsic, intrinsic):
512513
window_desktop = None
513514

514515
if DESKTOP_MODE is not None:
515-
window_desktop = cv2.resize(window_desktop, (2400, 1200))
516+
# window_desktop = cv2.resize(window_desktop, (2400, 1200))
517+
cv2.namedWindow('window_desktop', cv2.WINDOW_NORMAL)
518+
cv2.setWindowProperty('window_desktop', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
516519
cv2.imshow('window_desktop', window_desktop[:, :, ::-1])
517520

521+
if args.use_server:
522+
if DESKTOP_MODE == 'NORM':
523+
if SERVER_MODE is None:
524+
background = np.ones((256, 256, 3), dtype=np.float32) * 255
525+
else:
526+
background = render(extrinsic, intrinsic)
527+
if mask is None:
528+
window_server = background
529+
else:
530+
window_server = np.uint8(mask * render_norm + (1 - mask) * background)
531+
elif DESKTOP_MODE == 'TEXTURE':
532+
if SERVER_MODE is None:
533+
background = np.ones((256, 256, 3), dtype=np.float32) * 255
534+
else:
535+
background = render(extrinsic, intrinsic)
536+
if mask is None:
537+
window_server = background
538+
else:
539+
window_server = np.uint8(mask * render_tex + (1 - mask) * background)
540+
else:
541+
if render_norm is not None:
542+
window_server = np.uint8(render_norm)
543+
544+
# yield window_desktop, window_server
545+
(flag, encodedImage) = cv2.imencode(".jpg", window_server[:, :, ::-1])
546+
if not flag:
547+
continue
548+
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
549+
bytearray(encodedImage) + b'\r\n')
550+
518551
key = cv2.waitKey(1)
519552
if key == ord('q'):
520553
DESKTOP_MODE = 'SEGM'
@@ -527,10 +560,10 @@ def render(extrinsic, intrinsic):
527560

528561
elif key == ord('a'):
529562
SERVER_MODE = 'SEGM'
530-
elif key == ord('s'):
531-
SERVER_MODE = 'NORM'
532-
elif key == ord('d'):
533-
SERVER_MODE = 'TEXTURE'
563+
# elif key == ord('s'):
564+
# SERVER_MODE = 'NORM'
565+
# elif key == ord('d'):
566+
# SERVER_MODE = 'TEXTURE'
534567
elif key == ord('f'):
535568
SERVER_MODE = None
536569

@@ -547,30 +580,7 @@ def render(extrinsic, intrinsic):
547580
elif key == ord('n'):
548581
VIEW_MODE = 'LOAD'
549582

550-
if args.use_server:
551-
if SERVER_MODE == 'NORM':
552-
background = render(extrinsic, intrinsic)
553-
if mask is None:
554-
window_server = background
555-
else:
556-
window_server = np.uint8(mask * render_norm + (1 - mask) * background)
557-
elif SERVER_MODE == 'TEXTURE':
558-
background = render(extrinsic, intrinsic)
559-
if mask is None:
560-
window_server = background
561-
else:
562-
window_server = np.uint8(mask * render_tex + (1 - mask) * background)
563-
else:
564-
if render_norm is not None:
565-
window_server = np.uint8(render_norm)
566-
567-
# yield window_desktop, window_server
568-
(flag, encodedImage) = cv2.imencode(".jpg", window_server[:, :, ::-1])
569-
if not flag:
570-
continue
571-
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
572-
bytearray(encodedImage) + b'\r\n')
573-
583+
574584

575585
if __name__ == '__main__':
576586
if args.use_server:
@@ -579,15 +589,6 @@ def render(extrinsic, intrinsic):
579589
########################################
580590
app = Flask(__name__)
581591

582-
def img_base64(img_path):
583-
with open(img_path,"rb") as f:
584-
data = f.read()
585-
print("data:", getsizeof(data))
586-
assert data[-2:] == b'\xff\xd9'
587-
base64_str = b64encode(data).decode('utf-8')
588-
print("base64:", getsizeof(base64_str))
589-
return base64_str
590-
591592
@app.route("/")
592593
def index():
593594
return render_template("test_flask.html")
@@ -604,128 +605,3 @@ def video_feed():
604605
print('start main_loop.')
605606
for _ in main_loop():
606607
pass
607-
608-
# @torch.no_grad()
609-
# def main_loop():
610-
# for data_dict in tqdm.tqdm(loader):
611-
# # for visualization on the ubuntu main screen
612-
# input4c = data_dict["segm"].cpu().numpy()[0].transpose(1, 2, 0) # [512, 512, 4]
613-
# input = (input4c[:, :, 0:3] * 0.5) + 0.5
614-
# segmentation = (input4c[:, :, 0:3] * input4c[:, :, 3:4] * 0.5) + 0.5
615-
616-
# render_norm = data_dict["render_norm"] # [256, 256, 3] RGB
617-
# render_tex = data_dict["render_tex"] # [256, 256, 3] RGB
618-
# mask = data_dict["mask"]
619-
# extrinsic = data_dict["extrinsic"]
620-
# intrinsic = data_dict["intrinsic"]
621-
622-
# if DESKTOP_MODE == 'SEGM':
623-
# window_desktop = np.uint8(np.hstack([
624-
# input * 255,
625-
# segmentation * 255
626-
# ])) # RGB
627-
# elif DESKTOP_MODE == 'NORM':
628-
# if render_norm is None:
629-
# render_norm = np.zeros((512, 512, 3), dtype=np.float32)
630-
# window_desktop = np.uint8(np.hstack([
631-
# input * 255,
632-
# cv2.resize(render_norm, (512, 512))
633-
# ])) # RGB
634-
# elif DESKTOP_MODE == 'TEXTURE':
635-
# if render_tex is None:
636-
# render_tex = np.zeros((512, 512, 3), dtype=np.float32)
637-
# window_desktop = np.uint8(np.hstack([
638-
# input * 255,
639-
# cv2.resize(render_tex, (512, 512))
640-
# ])) # RGB
641-
# else:
642-
# window_desktop = None
643-
644-
# # if SERVER_MODE == 'NORM':
645-
# # background = scene.render(extrinsic, intrinsic)
646-
# # if mask is None:
647-
# # window_server = background
648-
# # else:
649-
# # window_server = np.uint8(mask * render_norm + (1 - mask) * background)
650-
# # elif SERVER_MODE == 'TEXTURE':
651-
# # background = scene.render(extrinsic, intrinsic)
652-
# # if mask is None:
653-
# # window_server = background
654-
# # else:
655-
# # window_server = np.uint8(mask * render_tex + (1 - mask) * background)
656-
# # else:
657-
# # window_server = None
658-
659-
# yield window_desktop
660-
661-
662-
# # access server:
663-
# # http://localhost:9999/scripts/unit_tests/test_server.html
664-
# if __name__ == '__main__':
665-
# import asyncio
666-
# import websockets
667-
# import threading
668-
# import time
669-
# import random
670-
# import glob
671-
# from base64 import b64encode
672-
# from sys import getsizeof
673-
# from io import BytesIO
674-
# from PIL import Image
675-
676-
677-
# def img_base64(img_path):
678-
# with open(img_path,"rb") as f:
679-
# data = f.read()
680-
# print("data:", getsizeof(data))
681-
# assert data[-2:] == b'\xff\xd9'
682-
# base64_str = b64encode(data).decode('utf-8')
683-
# print("base64:", getsizeof(base64_str))
684-
# return base64_str
685-
686-
# async def send(client, data):
687-
# await client.send(data)
688-
689-
# async def handler(client, path):
690-
# # Register.
691-
# print("Websocket Client Connected.", client)
692-
# clients.append(client)
693-
# while True:
694-
# try:
695-
# # print("ping", client)
696-
# pong_waiter = await client.ping()
697-
# await pong_waiter
698-
# # print("pong", client)
699-
# time.sleep(3)
700-
# except Exception as e:
701-
# clients.remove(client)
702-
# print("Websocket Client Disconnected", client)
703-
# break
704-
705-
# clients = []
706-
# start_server = websockets.serve(handler, "192.168.1.232", 5555)
707-
708-
# asyncio.get_event_loop().run_until_complete(start_server)
709-
# threading.Thread(target = asyncio.get_event_loop().run_forever).start()
710-
711-
# print(f"Socket Server Running on 192.168.1.232:5555. Starting main loop.")
712-
713-
# for window_desktop in main_loop():
714-
# # message_clients = clients.copy()
715-
# # for client in message_clients:
716-
# # pil_img = Image.fromarray(window_server)
717-
# # buff = BytesIO()
718-
# # pil_img.save(buff, format="JPEG")
719-
# # data = b64encode(buff.getvalue()).decode("utf-8")
720-
721-
# # print("Sending data to client")
722-
# # try:
723-
# # asyncio.run(send(client, data))
724-
# # except:
725-
# # # Clients might have disconnected during the messaging process,
726-
# # # just ignore that, they will have been removed already.
727-
# # pass
728-
# window_desktop = cv2.resize(window_desktop, (0, 0), fx=2, fy=2)
729-
# cv2.imshow('window_desktop', window_desktop[:, :, ::-1])
730-
# cv2.waitKey(1)
731-

RTL/run_pifu.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
python RTL/main.py \
22
--image_folder "/home/rui/local/projects/PIFu-RealTime/zenTelePort/data/recording/test" \
33
--use_server \
4+
--loop \
45
-- \
56
netG.projection orthogonal \
67
netG.backbone.IMF PIFuHGFilters \

RTL/scene.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -98,11 +98,11 @@ def __init__(self, size=(512, 512)):
9898
self.vert_data, self.uv_data, self.texture_image = _load_grass()
9999
self.intrinsic = _load_intrinsic()
100100

101-
create_opengl_context(size[0], size[1])
102-
self.renderer = AlbedoRender(width=size[0], height=size[1], multi_sample_rate=1)
103-
self.renderer.set_attrib(0, self.vert_data)
104-
self.renderer.set_attrib(1, self.uv_data)
105-
self.renderer.set_texture('TargetTexture', self.texture_image)
101+
# create_opengl_context(size[0], size[1])
102+
# self.renderer = AlbedoRender(width=size[0], height=size[1], multi_sample_rate=1)
103+
# self.renderer.set_attrib(0, self.vert_data)
104+
# self.renderer.set_attrib(1, self.uv_data)
105+
# self.renderer.set_texture('TargetTexture', self.texture_image)
106106

107107
self.extrinsic = np.array([
108108
[1.0, 0.0, 0.0, 0.0],
@@ -159,6 +159,6 @@ def render(self, extrinsic, intrinsic):
159159
for _ in tqdm.tqdm(range(10000)):
160160
extrinsic, intrinsic = scene.update_camera()
161161
background = scene.render(extrinsic, intrinsic)
162-
print (extrinsic)
162+
# print (extrinsic)
163163
cv2.imshow('scene', background)
164164
cv2.waitKey(15)

0 commit comments

Comments
 (0)