Spaces:
Running
Running
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +57 -41
src/streamlit_app.py CHANGED
@@ -14,33 +14,27 @@ st.markdown(""" | |
14 | <h3 style="text-align: center;">SynthDa Interpolation Demo Viewer</h3> |
15 | <p style="text-align: center;"> |
16 | AutoSynthDa blends two input motion videos to <strong>generate kinematically coherent, synthetic action videos</strong>.<br> |
17 | - Use the |
18 | Source: <a href="https://github.com/nvidia/synthda" target="_blank">github.com/nvidia/synthda</a> |
19 | </p> |
20 | """, unsafe_allow_html=True) |
21 | |
22 | - # |
23 | - st.markdown( |
24 | - '<p style="text-align: center;"><strong>Use the slider to control the interpolation between Input Video 1 (left) and Input Video 2 (right).</strong></p>', |
25 | - unsafe_allow_html=True |
26 | - ) |
27 | |
28 | - |
29 | - weight = st.slider("Interpolation Weight", 0.1, 0.9, 0.5, step=0.1) |
30 | |
31 | - |
32 | - |
33 | - |
34 | - |
35 | - interp_text = "Showing Input Video 2 (no interpolation)" |
36 | else: |
37 | - w2 = round(1.0 - |
38 | - |
39 | |
40 | - st.markdown(f |
41 | |
42 | - |
43 | - filename_interp = f"videos_generated_{weight:.1f}.mp4" |
44 | filename_input1 = "videos_generated_0.0.mp4" |
45 | filename_input2 = "videos_generated_1.0.mp4" |
46 | |
@@ -48,27 +42,45 @@ video_interp = os.path.join(VIDEO_FOLDER_1, filename_interp) | |
48 | video_input1 = os.path.join(VIDEO_FOLDER_1, filename_input1) |
49 | video_input2 = os.path.join(VIDEO_FOLDER_1, filename_input2) |
50 | |
51 | - exists_interp = os.path.exists(video_interp) |
52 | - exists_1 = os.path.exists(video_input1) |
53 | - exists_2 = os.path.exists(video_input2) |
54 | - |
55 | - st.markdown("### Demo Set 1: Falling Real-Real Interpolation") |
56 | col1, col2, col3 = st.columns(3) |
57 | |
58 | with col1: |
59 | - st.markdown(" |
60 | - |
| |
| |
| |
61 | |
62 | with col2: |
63 | - st.markdown(" |
64 | - |
| |
| |
| |
65 | |
66 | with col3: |
67 | - st.markdown(" |
68 | - |
| |
| |
| |
69 | |
70 | # ---------- Row 2: synthda_demo_fall_2 ---------- |
71 | - |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
72 | filename_input1_2 = "videos_generated_0.0.mp4" |
73 | filename_input2_2 = "videos_generated_1.0.mp4" |
74 | |
@@ -76,21 +88,25 @@ video_interp2 = os.path.join(VIDEO_FOLDER_2, filename_interp2) | |
76 | video_input1_2 = os.path.join(VIDEO_FOLDER_2, filename_input1_2) |
77 | video_input2_2 = os.path.join(VIDEO_FOLDER_2, filename_input2_2) |
78 | |
79 | - exists_interp2 = os.path.exists(video_interp2) |
80 | - exists_1_2 = os.path.exists(video_input1_2) |
81 | - exists_2_2 = os.path.exists(video_input2_2) |
82 | - |
83 | - st.markdown("### Demo Set 2: Fall Demo Interpolation") |
84 | col4, col5, col6 = st.columns(3) |
85 | |
86 | with col4: |
87 | - st.markdown(" |
88 | - |
| |
| |
| |
89 | |
90 | with col5: |
91 | - st.markdown(" |
92 | - |
| |
| |
| |
93 | |
94 | with col6: |
95 | - st.markdown(" |
96 | - |
| |
| |
|
| |
14 | <h3 style="text-align: center;">SynthDa Interpolation Demo Viewer</h3> |
15 | <p style="text-align: center;"> |
16 | AutoSynthDa blends two input motion videos to <strong>generate kinematically coherent, synthetic action videos</strong>.<br> |
17 | + Use the sliders below to explore how the system interpolates motion from one video to another.<br> |
18 | Source: <a href="https://github.com/nvidia/synthda" target="_blank">github.com/nvidia/synthda</a> |
19 | </p> |
20 | """, unsafe_allow_html=True) |
21 | |
22 | + # ---------- Row 1: synthda_falling_realreal ---------- |
23 | + st.markdown("## Demo Set 1: Falling Real-Real Interpolation") |
| |
| |
| |
24 | |
25 | + weight1 = st.slider("Interpolation Weight (Set 1)", 0.0, 1.0, 0.5, step=0.1) |
| |
26 | |
27 | + if weight1 == 0.0: |
28 | + interp_text1 = "Showing Input Video 1 (no interpolation)" |
29 | + elif weight1 == 1.0: |
30 | + interp_text1 = "Showing Input Video 2 (no interpolation)" |
| |
31 | else: |
32 | + w2 = round(1.0 - weight1, 1) |
33 | + interp_text1 = f"Generated motion: {weight1:.1f} from Input Video 1 + {w2:.1f} from Input Video 2" |
34 | |
35 | + st.markdown(f"**{interp_text1}**") |
36 | |
37 | + filename_interp = f"videos_generated_{weight1:.1f}.mp4" |
| |
38 | filename_input1 = "videos_generated_0.0.mp4" |
39 | filename_input2 = "videos_generated_1.0.mp4" |
40 | |
| |
42 | video_input1 = os.path.join(VIDEO_FOLDER_1, filename_input1) |
43 | video_input2 = os.path.join(VIDEO_FOLDER_1, filename_input2) |
44 | |
| |
| |
| |
| |
| |
45 | col1, col2, col3 = st.columns(3) |
46 | |
47 | with col1: |
48 | + st.markdown("**Input Video 1 (Generated)**") |
49 | + if os.path.exists(video_input1): |
50 | + st.video(video_input1) |
51 | + else: |
52 | + st.error("Video 1 not found") |
53 | |
54 | with col2: |
55 | + st.markdown("**Interpolated Video**") |
56 | + if os.path.exists(video_interp): |
57 | + st.video(video_interp) |
58 | + else: |
59 | + st.error("Interpolated video not found") |
60 | |
61 | with col3: |
62 | + st.markdown("**Input Video 2 (Real)**") |
63 | + if os.path.exists(video_input2): |
64 | + st.video(video_input2) |
65 | + else: |
66 | + st.error("Video 2 not found") |
67 | |
68 | # ---------- Row 2: synthda_demo_fall_2 ---------- |
69 | + st.markdown("## Demo Set 2: Fall Demo Interpolation") |
70 | + |
71 | + weight2 = st.slider("Interpolation Weight (Set 2)", 0.0, 1.0, 0.5, step=0.1) |
72 | + |
73 | + if weight2 == 0.0: |
74 | + interp_text2 = "Showing Input Video 1 (no interpolation)" |
75 | + elif weight2 == 1.0: |
76 | + interp_text2 = "Showing Input Video 2 (no interpolation)" |
77 | + else: |
78 | + w2 = round(1.0 - weight2, 1) |
79 | + interp_text2 = f"Generated motion: {weight2:.1f} from Input Video 1 + {w2:.1f} from Input Video 2" |
80 | + |
81 | + st.markdown(f"**{interp_text2}**") |
82 | + |
83 | + filename_interp2 = f"videos_generated_{weight2:.1f}.mp4" |
84 | filename_input1_2 = "videos_generated_0.0.mp4" |
85 | filename_input2_2 = "videos_generated_1.0.mp4" |
86 | |
| |
88 | video_input1_2 = os.path.join(VIDEO_FOLDER_2, filename_input1_2) |
89 | video_input2_2 = os.path.join(VIDEO_FOLDER_2, filename_input2_2) |
90 | |
| |
| |
| |
| |
| |
91 | col4, col5, col6 = st.columns(3) |
92 | |
93 | with col4: |
94 | + st.markdown("**Input Video 1 (Generated)**") |
95 | + if os.path.exists(video_input1_2): |
96 | + st.video(video_input1_2) |
97 | + else: |
98 | + st.error("Video 1 not found") |
99 | |
100 | with col5: |
101 | + st.markdown("**Interpolated Video**") |
102 | + if os.path.exists(video_interp2): |
103 | + st.video(video_interp2) |
104 | + else: |
105 | + st.error("Interpolated video not found") |
106 | |
107 | with col6: |
108 | + st.markdown("**Input Video 2 (Real)**") |
109 | + if os.path.exists(video_input2_2): |
110 | + st.video(video_input2_2) |
111 | + else: |
112 | + st.error("Video 2 not found") |