Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@ mp_degree=2
pp_degree=1
sharding_degree=4
sharding=stage1
virtual_pp_degree=1
use_recompute=True
eval_freq=25
use_pipeline_parallel=False
sequence_parallel=False
bs_item=32
fp_item=bf16
run_mode=MP2-SD4-stage1-mbs4-acc2
Expand All @@ -26,8 +31,10 @@ max_iter=50000

model=gpt
micro_bs=4
acc=2
seed=3589

bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/prepare.sh
# run
bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${dp_degree} ${mp_degree} ${pp_degree} ${micro_bs} ${bs_item} ${run_mode} ${device_num} \
${max_iter} ${sharding} ${sharding_degree} 2>&1;
${max_iter} ${sharding} ${sharding_degree} ${virtual_pp_degree} ${use_recompute} ${eval_freq} ${use_pipeline_parallel} ${sequence_parallel} ${acc} ${seed} 2>&1;
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

model_item=CE_gpt-345m_seqlen1024_pretrain
dp_degree=1
mp_degree=2
pp_degree=4
bs_item=32
fp_item=bf16
run_mode=MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8
device_num=N2C16
max_iter=50000
sharding=stage1
sharding_degree=2
virtual_pp_degree=2
use_recompute=True
eval_freq=25
use_pipeline_parallel=True
sequence_parallel=True

model=gpt
micro_bs=2
acc=8
seed=3589

bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/prepare.sh
# run
bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${dp_degree} ${mp_degree} ${pp_degree} ${micro_bs} ${bs_item} ${run_mode} ${device_num} \
${max_iter} ${sharding} ${sharding_degree} ${virtual_pp_degree} ${use_recompute} ${eval_freq} ${use_pipeline_parallel} ${sequence_parallel} ${acc} ${seed} 2>&1;
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

model_item=gpt-345m_seqlen1024_pretrain
dp_degree=1
mp_degree=2
pp_degree=4
bs_item=32
fp_item=bf16
run_mode=MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8
device_num=N2C16
max_iter=100
sharding=stage1
sharding_degree=2

virtual_pp_degree=2
use_recompute=True
eval_freq=25
use_pipeline_parallel=True
sequence_parallel=True

model=gpt
micro_bs=2
acc=8

bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/prepare.sh
# run
bash ./test_tipc/dygraph/hybrid_parallelism/ce_gpt/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${dp_degree} ${mp_degree} ${pp_degree} ${micro_bs} ${bs_item} ${run_mode} ${device_num} \
${max_iter} ${sharding} ${sharding_degree} ${virtual_pp_degree} ${use_recompute} ${eval_freq} ${use_pipeline_parallel} ${sequence_parallel} ${acc} 2>&1;
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,13 @@ function _set_params(){
sharding_degree=${12:-"1"}
num_workers=0 # (可选)
base_batch_size=$global_batch_size
virtual_pp_degree=${13:-"2"} # (可选) virtualpp数据并行度
vpp_degree=${13:-"1"} # (可选) virtualpp数据并行度
use_recompute=${14:-"True"} # (可选)是否打开recompute
eval_freq=${15:-"25"} # (可选)模型评估间隔
use_pipeline_parallel=${16:-"False"} # (可选)是否开启pipeline_parallel_config
sequence_parallel=${17:-"False"} # (可选)是否开启sequence_parallel
acc=${18:-"2"}
seed=${19:-"1234"}
# 以下为通用执行命令,无特殊可不用修改
model_name=${model_item}_bs${global_batch_size}_${fp_item}_${run_mode} # (必填) 且格式不要改动,与竞品名称对齐
device=${CUDA_VISIBLE_DEVICES//,/ }
Expand Down Expand Up @@ -108,10 +110,11 @@ function _train(){
--tensor_parallel_degree ${mp_degree} \
--pipeline_parallel_degree ${pp_degree} \
${pp_config_disable_partial_send_recv} \
--virtual_pp_degree ${vpp_degree} \
--sequence_parallel ${sequence_parallel} \
--split 949,50,1 \
--max_seq_length 1024 \
--seed 1234 \
--seed ${seed} \
--fuse_attention_qkv True \
--use_flash_attention True \
--bf16 ${bf16} \
Expand All @@ -125,7 +128,7 @@ function _train(){
--dataloader_num_workers 1 \
--eval_steps 1000 \
--disable_tqdm True \
--gradient_accumulation_steps 2 \
--gradient_accumulation_steps ${acc} \
--weight_decay 0.01\
--max_steps ${max_iter}\
--save_steps 5000\
Expand All @@ -150,7 +153,7 @@ function _train(){
run_pretrain.py ${train_cmd}"
workerlog_id=0
;;
DP8-mbs2-acc2|SD8-stage1-mbs2-acc2|SD8-stage2-mbs2-acc2|SD8-stage3-mbs2-acc2|MP2-SD4-stage1-mbs4-acc2|MP2-SP2-PP2-DP2-mbs8-acc2|MP8-mbs16-acc2|MP2-PP2-DP2-mbs8-acc2|MP2-PP2-SD2-Stage1-mbs8-acc2|MP2-SP2-PP2-SD2-Stage1-mbs8-acc2) echo "run run_mode: ${run_mode}"
DP8-mbs2-acc2|SD8-stage1-mbs2-acc2|SD8-stage2-mbs2-acc2|SD8-stage3-mbs2-acc2|MP2-SD4-stage1-mbs4-acc2|MP2-SP2-PP2-DP2-mbs8-acc2|MP8-mbs16-acc2|MP2-PP2-DP2-mbs8-acc2|MP2-PP2-SD2-Stage1-mbs8-acc2|MP2-SP2-PP2-SD2-Stage1-mbs8-acc2|MP2-SP2-PP4-VPP2-SD2-stage1-mbs2-acc8) echo "run run_mode: ${run_mode}"
train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --devices=0,1,2,3,4,5,6,7 ${PADDLE_RANK_OPTION}\
run_pretrain.py ${train_cmd}"
workerlog_id=0
Expand Down