By combining sklearn's KFold and torch.utils.data.Subset, this could be easily achieved.
kf = KFold(n_splits=params.training.k_folds, shuffle=True, random_state=42)
for i, (train_index, valid_index) in enumerate(kf.split(train_set_)):
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Splitting the dataset
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
train_set = Subset(train_set_, train_index)
valid_set = Subset(train_set_, valid_index)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=True)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Rest of the code using fold's train_loader and valid_loader
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++