|  | 
|  | 1 | +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. | 
|  | 2 | +
 | 
|  | 3 | +Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 4 | +you may not use this file except in compliance with the License. | 
|  | 5 | +You may obtain a copy of the License at | 
|  | 6 | +
 | 
|  | 7 | + http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 8 | +
 | 
|  | 9 | +Unless required by applicable law or agreed to in writing, software | 
|  | 10 | +distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 11 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See | 
|  | 12 | +the License for the specific language governing permissions and | 
|  | 13 | +limitations under the License. */ | 
|  | 14 | + | 
|  | 15 | +#include <gtest/gtest.h> | 
|  | 16 | +#include <memory> | 
|  | 17 | + | 
|  | 18 | +#include "paddle/phi/api/include/strings_api.h" | 
|  | 19 | +#include "paddle/phi/api/lib/utils/allocator.h" | 
|  | 20 | +#include "paddle/phi/backends/all_context.h" | 
|  | 21 | +#include "paddle/phi/core/kernel_registry.h" | 
|  | 22 | +#include "paddle/phi/core/string_tensor.h" | 
|  | 23 | + | 
|  | 24 | +PD_DECLARE_KERNEL(strings_lower, CPU, ALL_LAYOUT); | 
|  | 25 | +PD_DECLARE_KERNEL(strings_upper, CPU, ALL_LAYOUT); | 
|  | 26 | + | 
|  | 27 | +namespace paddle { | 
|  | 28 | +namespace tests { | 
|  | 29 | + | 
|  | 30 | +using phi::CPUPlace; | 
|  | 31 | +using phi::StringTensor; | 
|  | 32 | +using phi::StringTensorMeta; | 
|  | 33 | + | 
|  | 34 | +TEST(API, case_convert) { | 
|  | 35 | + auto cpu = CPUPlace(); | 
|  | 36 | + const auto alloc = | 
|  | 37 | + std::make_shared<paddle::experimental::DefaultAllocator>(cpu); | 
|  | 38 | + // 1. create tensor | 
|  | 39 | + const phi::DDim dims({1, 2}); | 
|  | 40 | + StringTensorMeta meta(dims); | 
|  | 41 | + auto cpu_strings_x = std::make_shared<phi::StringTensor>( | 
|  | 42 | + alloc.get(), phi::StringTensorMeta(meta)); | 
|  | 43 | + phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); | 
|  | 44 | + auto* dev_ctx = pool.Get(phi::CPUPlace()); | 
|  | 45 | + | 
|  | 46 | + pstring* cpu_strings_x_data = | 
|  | 47 | + dev_ctx->template Alloc<pstring>(cpu_strings_x.get()); | 
|  | 48 | + std::string strs[] = {"A Short Pstring.", | 
|  | 49 | + "A Large Pstring Whose Length Is Longer Than 22."}; | 
|  | 50 | + for (int i = 0; i < 2; ++i) { | 
|  | 51 | + cpu_strings_x_data[i] = strs[i]; | 
|  | 52 | + } | 
|  | 53 | + // 2. get expected results | 
|  | 54 | + std::string expected_results[] = {strs[0], strs[0], strs[1], strs[1]}; | 
|  | 55 | + std::transform( | 
|  | 56 | + strs[0].begin(), strs[0].end(), expected_results[0].begin(), ::tolower); | 
|  | 57 | + std::transform( | 
|  | 58 | + strs[0].begin(), strs[0].end(), expected_results[1].begin(), ::toupper); | 
|  | 59 | + std::transform( | 
|  | 60 | + strs[1].begin(), strs[1].end(), expected_results[2].begin(), ::tolower); | 
|  | 61 | + std::transform( | 
|  | 62 | + strs[1].begin(), strs[1].end(), expected_results[3].begin(), ::toupper); | 
|  | 63 | + // 3. test API, ascii encoding | 
|  | 64 | + paddle::experimental::Tensor x(cpu_strings_x); | 
|  | 65 | + auto lower_out = paddle::experimental::strings::lower(x, false); | 
|  | 66 | + auto upper_out = paddle::experimental::strings::upper(x, false); | 
|  | 67 | + | 
|  | 68 | + auto lower_tensor = | 
|  | 69 | + std::dynamic_pointer_cast<phi::StringTensor>(lower_out.impl()); | 
|  | 70 | + auto upper_tensor = | 
|  | 71 | + std::dynamic_pointer_cast<phi::StringTensor>(upper_out.impl()); | 
|  | 72 | + ASSERT_EQ(lower_tensor->dims(), dims); | 
|  | 73 | + ASSERT_EQ(upper_tensor->dims(), dims); | 
|  | 74 | + | 
|  | 75 | + auto lower_tensor_ptr = lower_tensor->data(); | 
|  | 76 | + auto upper_tensor_ptr = upper_tensor->data(); | 
|  | 77 | + | 
|  | 78 | + const std::string cpu_results[] = {lower_tensor_ptr[0].data(), | 
|  | 79 | + upper_tensor_ptr[0].data(), | 
|  | 80 | + lower_tensor_ptr[1].data(), | 
|  | 81 | + upper_tensor_ptr[1].data()}; | 
|  | 82 | + | 
|  | 83 | + for (int i = 0; i < 4; ++i) { | 
|  | 84 | + ASSERT_EQ(cpu_results[i], expected_results[i]); | 
|  | 85 | + } | 
|  | 86 | +} | 
|  | 87 | + | 
|  | 88 | +TEST(API, case_convert_utf8) { | 
|  | 89 | + auto cpu = CPUPlace(); | 
|  | 90 | + const auto alloc = | 
|  | 91 | + std::make_shared<paddle::experimental::DefaultAllocator>(cpu); | 
|  | 92 | + // 1. create tensor | 
|  | 93 | + const phi::DDim dims({1, 2}); | 
|  | 94 | + StringTensorMeta meta(dims); | 
|  | 95 | + auto cpu_strings_x = std::make_shared<phi::StringTensor>( | 
|  | 96 | + alloc.get(), phi::StringTensorMeta(meta)); | 
|  | 97 | + phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); | 
|  | 98 | + auto* dev_ctx = pool.Get(phi::CPUPlace()); | 
|  | 99 | + | 
|  | 100 | + pstring* cpu_strings_x_data = | 
|  | 101 | + dev_ctx->template Alloc<pstring>(cpu_strings_x.get()); | 
|  | 102 | + std::string strs[] = {"óÓsscHloëË", "óÓsscHloëËóÓsscHloëËóÓsscHloëË"}; | 
|  | 103 | + for (int i = 0; i < 2; ++i) { | 
|  | 104 | + cpu_strings_x_data[i] = strs[i]; | 
|  | 105 | + } | 
|  | 106 | + // 2. get expected results | 
|  | 107 | + std::string expected_results[] = {"óósschloëë", | 
|  | 108 | + "ÓÓSSCHLOËË", | 
|  | 109 | + "óósschloëëóósschloëëóósschloëë", | 
|  | 110 | + "ÓÓSSCHLOËËÓÓSSCHLOËËÓÓSSCHLOËË"}; | 
|  | 111 | + // 3. test API, ascii encoding | 
|  | 112 | + paddle::experimental::Tensor x(cpu_strings_x); | 
|  | 113 | + auto lower_out = paddle::experimental::strings::lower(x, true); | 
|  | 114 | + auto upper_out = paddle::experimental::strings::upper(x, true); | 
|  | 115 | + | 
|  | 116 | + auto lower_tensor = | 
|  | 117 | + std::dynamic_pointer_cast<phi::StringTensor>(lower_out.impl()); | 
|  | 118 | + auto upper_tensor = | 
|  | 119 | + std::dynamic_pointer_cast<phi::StringTensor>(upper_out.impl()); | 
|  | 120 | + ASSERT_EQ(lower_tensor->dims(), dims); | 
|  | 121 | + ASSERT_EQ(upper_tensor->dims(), dims); | 
|  | 122 | + | 
|  | 123 | + auto lower_tensor_ptr = lower_tensor->data(); | 
|  | 124 | + auto upper_tensor_ptr = upper_tensor->data(); | 
|  | 125 | + | 
|  | 126 | + const char* cpu_results[] = {lower_tensor_ptr[0].data(), | 
|  | 127 | + upper_tensor_ptr[0].data(), | 
|  | 128 | + lower_tensor_ptr[1].data(), | 
|  | 129 | + upper_tensor_ptr[1].data()}; | 
|  | 130 | + | 
|  | 131 | + for (int i = 0; i < 4; ++i) { | 
|  | 132 | + ASSERT_EQ(std::string(cpu_results[i]), expected_results[i]); | 
|  | 133 | + } | 
|  | 134 | +} | 
|  | 135 | + | 
|  | 136 | +} // namespace tests | 
|  | 137 | +} // namespace paddle | 
0 commit comments