Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ private void checkModuleServices(ModuleReference mref) {
throw new GradleException(
String.format(
Locale.ROOT,
"Expected provides {} in module %s with provides {}.",
"Expected provides %s in module %s with provides %s.",
service,
mref.descriptor().name(),
mref.descriptor().provides()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ public void onNewClusterState(ClusterState state1) {
public void onClusterServiceClose() {
logger.debug(
() -> format(
"[{}] cluster service closed while waiting for enough shards to be started.",
"[%s] cluster service closed while waiting for enough shards to be started.",
Arrays.toString(indexNames)
)
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ public ClusterState execute(BatchExecutionContext<CloseIndicesTask> batchExecuti
if (shardsAcknowledged == false) {
logger.debug(
() -> format(
"[{}] indices closed, but the operation timed out while "
"[%s] indices closed, but the operation timed out while "
+ "waiting for enough shards to be started.",
Arrays.toString(waitForIndices)
)
Expand Down Expand Up @@ -916,7 +916,7 @@ public void openIndices(final OpenIndexClusterStateUpdateRequest request, final
if (shardsAcknowledged == false) {
logger.debug(
() -> format(
"[{}] indices opened, but the operation timed out while waiting for enough shards to be started.",
"[%s] indices opened, but the operation timed out while waiting for enough shards to be started.",
Arrays.toString(indexNames)
)
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ private void deleteFailedDeployment(
Exception exception,
ActionListener<CreateTrainedModelAssignmentAction.Response> listener
) {
logger.trace(() -> format("[{}] Deleting failed deployment", modelId), exception);
logger.trace(() -> format("[%s] Deleting failed deployment", modelId), exception);
trainedModelAssignmentService.deleteModelAssignment(modelId, ActionListener.wrap(pTask -> listener.onFailure(exception), e -> {
logger.error(
() -> format(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ static ClusterState removeRoutingToUnassignableNodes(ClusterState currentState)
if (routedNodeIdsToRemove.isEmpty() == false) {
logger.debug(
() -> format(
"[%s] removing routing entries to nodes {} because they have been removed or are shutting down",
"[%s] removing routing entries to nodes %s because they have been removed or are shutting down",
assignment.getModelId(),
routedNodeIdsToRemove
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ XContentBuilder maybeBuildUpdatedDocument(

final List<RoleDescriptor> keyRoles = request.getRoleDescriptors();
if (keyRoles != null) {
logger.trace(() -> format("Building API key doc with updated role descriptors [{}]", keyRoles));
logger.trace(() -> format("Building API key doc with updated role descriptors [%s]", keyRoles));
addRoleDescriptors(builder, keyRoles);
} else {
assert currentApiKeyDoc.roleDescriptorsBytes != null;
Expand All @@ -551,7 +551,7 @@ XContentBuilder maybeBuildUpdatedDocument(
) == false : "API key doc to be updated contains reserved metadata";
final Map<String, Object> metadata = request.getMetadata();
if (metadata != null) {
logger.trace(() -> format("Building API key doc with updated metadata [{}]", metadata));
logger.trace(() -> format("Building API key doc with updated metadata [%s]", metadata));
builder.field("metadata_flattened", metadata);
} else {
builder.rawField(
Expand Down Expand Up @@ -1507,7 +1507,7 @@ public void onResponse(ClearSecurityCacheResponse nodes) {

@Override
public void onFailure(Exception e) {
logger.error(() -> format("unable to clear API key cache [{}]", clearApiKeyCacheRequest.cacheName()), e);
logger.error(() -> format("unable to clear API key cache [%s]", clearApiKeyCacheRequest.cacheName()), e);
listener.onFailure(new ElasticsearchException("clearing the API key cache failed; please clear the caches manually", e));
}
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ public void checkPrivileges(
final Role userRole = ((RBACAuthorizationInfo) authorizationInfo).getRole();
logger.trace(
() -> format(
"Check whether role [{}] has privileges [{}]",
"Check whether role [%s] has privileges [%s]",
Strings.arrayToCommaDelimitedString(userRole.names()),
privilegesToCheck
)
Expand Down Expand Up @@ -586,7 +586,7 @@ public void checkPrivileges(
.map(RoleDescriptor.ApplicationResourcePrivileges::getApplication)
.collect(Collectors.toSet());
for (String applicationName : applicationNames) {
logger.debug(() -> format("Checking privileges for application [{}]", applicationName));
logger.debug(() -> format("Checking privileges for application [%s]", applicationName));
final ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder = privilegesToCheck.runDetailedCheck()
? ResourcePrivilegesMap.builder()
: null;
Expand Down