instance_id
stringlengths 17
39
| repo
stringclasses 8
values | issue_id
stringlengths 14
34
| pr_id
stringlengths 14
34
| linking_methods
sequencelengths 1
3
| base_commit
stringlengths 40
40
| merge_commit
stringlengths 0
40
⌀ | hints_text
sequencelengths 0
106
| resolved_comments
sequencelengths 0
119
| created_at
unknown | labeled_as
sequencelengths 0
7
| problem_title
stringlengths 7
174
| problem_statement
stringlengths 0
55.4k
| gold_files
sequencelengths 0
10
| gold_files_postpatch
sequencelengths 1
10
| test_files
sequencelengths 0
60
| gold_patch
stringlengths 220
5.83M
| test_patch
stringlengths 386
194k
⌀ | split_random
stringclasses 3
values | split_time
stringclasses 3
values | issue_start_time
timestamp[ns] | issue_created_at
unknown | issue_by_user
stringlengths 3
21
| split_repo
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
provectus/kafka-ui/190_236 | provectus/kafka-ui | provectus/kafka-ui/190 | provectus/kafka-ui/236 | [
"timestamp(timedelta=1.0, similarity=0.853964306323165)",
"connected"
] | 575d83eafffe017d7d703df8d83a6a2d2f08d4f2 | 7c86adbd6724acf75107b31cbebc0a34691c067f | [] | [
"Is there a way to avoid custom css?",
"Can we wrap this to the hook? It might be useful for schema registry",
"It is important to reduce custom stuff. Let's use native title",
"tip: `Record<string, string>`",
"You can use Bulma helpers https://bulma.io/documentation/helpers/flexbox-helpers/#justify-content ",
"Same here",
"What is this div for?",
"Yep, got rid of it"
] | "2021-03-10T13:30:05Z" | [
"type/enhancement",
"good first issue",
"scope/frontend"
] | Topic message : json export feature | Hi,
I like the tree presentation for topic message.
It would be also really nice to have a way to copy the message content in json format (so we can export it somewhere else and work on it)
For now, copying from the UI is not usable as it does not let to export real json.
Version used : 0.0.9 | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx",
"kafka-ui-react-app/src/components/common/JSONViewer/JSONViewer.tsx"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx",
"kafka-ui-react-app/src/components/common/JSONViewer/DynamicButton.tsx",
"kafka-ui-react-app/src/components/common/JSONViewer/JSONViewer.tsx",
"kafka-ui-react-app/src/components/common/JSONViewer/__tests__/DynamicButton.spec.tsx",
"kafka-ui-react-app/src/lib/hooks/__tests__/useDataSaver.spec.tsx",
"kafka-ui-react-app/src/lib/hooks/useDataSaver.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx
index 0f7b13de23c..a541a725e3a 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx
@@ -21,7 +21,7 @@ const MessageItem: React.FC<MessageItemProp> = ({
<td style={{ width: 150 }}>{offset}</td>
<td style={{ width: 100 }}>{partition}</td>
<td style={{ wordBreak: 'break-word' }}>
- {content && <JSONViewer data={content as { [key: string]: string }} />}
+ {content && <JSONViewer data={content as Record<string, string>} />}
</td>
</tr>
);
diff --git a/kafka-ui-react-app/src/components/common/JSONViewer/DynamicButton.tsx b/kafka-ui-react-app/src/components/common/JSONViewer/DynamicButton.tsx
new file mode 100644
index 00000000000..a72592d5e92
--- /dev/null
+++ b/kafka-ui-react-app/src/components/common/JSONViewer/DynamicButton.tsx
@@ -0,0 +1,44 @@
+import React from 'react';
+
+interface ButtonProps {
+ callback: () => void;
+ classes?: string;
+ title: string;
+ style?: { [key: string]: string | number };
+ text: {
+ default: string;
+ dynamic: string;
+ };
+}
+
+const DynamicButton: React.FC<ButtonProps> = ({
+ callback,
+ classes,
+ title,
+ style,
+ text,
+ children,
+}) => {
+ const [buttonText, setButtonText] = React.useState(text.default);
+ let timeout: number;
+ const clickHandler = () => {
+ callback();
+ setButtonText(text.dynamic);
+ timeout = window.setTimeout(() => setButtonText(text.default), 3000);
+ };
+ React.useEffect(() => () => window.clearTimeout(timeout), [callback]);
+ return (
+ <button
+ className={classes}
+ title={title}
+ type="button"
+ style={style}
+ onClick={clickHandler}
+ >
+ {children}
+ <span>{buttonText}</span>
+ </button>
+ );
+};
+
+export default DynamicButton;
diff --git a/kafka-ui-react-app/src/components/common/JSONViewer/JSONViewer.tsx b/kafka-ui-react-app/src/components/common/JSONViewer/JSONViewer.tsx
index 6627e2d072e..68d38a1909a 100644
--- a/kafka-ui-react-app/src/components/common/JSONViewer/JSONViewer.tsx
+++ b/kafka-ui-react-app/src/components/common/JSONViewer/JSONViewer.tsx
@@ -1,6 +1,8 @@
import React from 'react';
import JSONTree from 'react-json-tree';
+import useDataSaver from 'lib/hooks/useDataSaver';
import theme from './themes/google';
+import DynamicButton from './DynamicButton';
interface JSONViewerProps {
data: {
@@ -8,8 +10,45 @@ interface JSONViewerProps {
};
}
-const JSONViewer: React.FC<JSONViewerProps> = ({ data }) => (
- <JSONTree data={data} theme={theme} shouldExpandNode={() => true} hideRoot />
-);
+const JSONViewer: React.FC<JSONViewerProps> = ({ data }) => {
+ const { copyToClipboard, saveFile } = useDataSaver();
+ const copyButtonHandler = () => {
+ copyToClipboard(JSON.stringify(data));
+ };
+ const buttonClasses = 'button is-link is-outlined is-small is-centered';
+ return (
+ <div>
+ <JSONTree
+ data={data}
+ theme={theme}
+ shouldExpandNode={() => true}
+ hideRoot
+ />
+ <div className="field has-addons is-justify-content-flex-end">
+ <DynamicButton
+ callback={copyButtonHandler}
+ classes={`${buttonClasses} mr-1`}
+ title="Copy the message to the clipboard"
+ text={{ default: 'Copy', dynamic: 'Copied!' }}
+ >
+ <span className="icon">
+ <i className="far fa-clipboard" />
+ </span>
+ </DynamicButton>
+ <button
+ className={buttonClasses}
+ title="Download the message as a .json/.txt file"
+ type="button"
+ onClick={() => saveFile(JSON.stringify(data), `topic-message`)}
+ >
+ <span className="icon">
+ <i className="fas fa-file-download" />
+ </span>
+ <span>Save</span>
+ </button>
+ </div>
+ </div>
+ );
+};
export default JSONViewer;
diff --git a/kafka-ui-react-app/src/components/common/JSONViewer/__tests__/DynamicButton.spec.tsx b/kafka-ui-react-app/src/components/common/JSONViewer/__tests__/DynamicButton.spec.tsx
new file mode 100644
index 00000000000..2e6ef202e3b
--- /dev/null
+++ b/kafka-ui-react-app/src/components/common/JSONViewer/__tests__/DynamicButton.spec.tsx
@@ -0,0 +1,24 @@
+import { mount, shallow } from 'enzyme';
+import React from 'react';
+import DynamicButton from '../DynamicButton';
+
+describe('DynamicButton', () => {
+ const mockCallback = jest.fn();
+ const text = { default: 'DefaultText', dynamic: 'DynamicText' };
+ it('exectutes callback', () => {
+ const component = shallow(
+ <DynamicButton callback={mockCallback} title="title" text={text} />
+ );
+ component.simulate('click');
+ expect(mockCallback).toBeCalled();
+ });
+
+ it('changes the text', () => {
+ const component = mount(
+ <DynamicButton callback={mockCallback} title="title" text={text} />
+ );
+ expect(component.text()).toEqual(text.default);
+ component.simulate('click');
+ expect(component.text()).toEqual(text.dynamic);
+ });
+});
diff --git a/kafka-ui-react-app/src/lib/hooks/__tests__/useDataSaver.spec.tsx b/kafka-ui-react-app/src/lib/hooks/__tests__/useDataSaver.spec.tsx
new file mode 100644
index 00000000000..4897bafccb1
--- /dev/null
+++ b/kafka-ui-react-app/src/lib/hooks/__tests__/useDataSaver.spec.tsx
@@ -0,0 +1,40 @@
+import useDataSaver from '../useDataSaver';
+
+describe('useDataSaver hook', () => {
+ const content = {
+ title: 'title',
+ };
+ it('downloads the file', () => {
+ const link: HTMLAnchorElement = document.createElement('a');
+ link.click = jest.fn();
+ const mockCreate = jest
+ .spyOn(document, 'createElement')
+ .mockImplementation(() => link);
+ const { saveFile } = useDataSaver();
+ saveFile(JSON.stringify(content), 'fileName');
+
+ expect(mockCreate).toHaveBeenCalledTimes(1);
+ expect(link.download).toEqual('fileName.json');
+ expect(link.href).toEqual(
+ `data:text/json;charset=utf-8,${encodeURIComponent(
+ JSON.stringify(content)
+ )}`
+ );
+ expect(link.click).toHaveBeenCalledTimes(1);
+ });
+
+ it('copies the data to the clipboard', () => {
+ Object.assign(navigator, {
+ clipboard: {
+ writeText: jest.fn(),
+ },
+ });
+ jest.spyOn(navigator.clipboard, 'writeText');
+ const { copyToClipboard } = useDataSaver();
+ copyToClipboard(JSON.stringify(content));
+
+ expect(navigator.clipboard.writeText).toHaveBeenCalledWith(
+ JSON.stringify(content)
+ );
+ });
+});
diff --git a/kafka-ui-react-app/src/lib/hooks/useDataSaver.tsx b/kafka-ui-react-app/src/lib/hooks/useDataSaver.tsx
new file mode 100644
index 00000000000..3fc23a168ad
--- /dev/null
+++ b/kafka-ui-react-app/src/lib/hooks/useDataSaver.tsx
@@ -0,0 +1,27 @@
+const useDataSaver = () => {
+ const copyToClipboard = (content: string) => {
+ if (navigator.clipboard) navigator.clipboard.writeText(content);
+ };
+
+ const saveFile = (content: string, fileName: string) => {
+ let extension = 'json';
+ try {
+ JSON.parse(content);
+ } catch (e) {
+ extension = 'txt';
+ }
+ const dataStr = `data:text/json;charset=utf-8,${encodeURIComponent(
+ content
+ )}`;
+ const downloadAnchorNode = document.createElement('a');
+ downloadAnchorNode.setAttribute('href', dataStr);
+ downloadAnchorNode.setAttribute('download', `${fileName}.${extension}`);
+ document.body.appendChild(downloadAnchorNode);
+ downloadAnchorNode.click();
+ downloadAnchorNode.remove();
+ };
+
+ return { copyToClipboard, saveFile };
+};
+
+export default useDataSaver;
| null | train | train | 2021-03-17T17:34:57 | "2021-02-10T22:05:55Z" | giom-l | train |
provectus/kafka-ui/304_310 | provectus/kafka-ui | provectus/kafka-ui/304 | provectus/kafka-ui/310 | [
"keyword_pr_to_issue"
] | 7672f5e4cdd0441b0f2d3aacede4fcef1125a3b8 | 8f4fc12f121fe033b22abae8a828ad97f9c7e74b | [
"@martenlindblad, thank you for creating this issue. We'll try to add this feature in nearest releases. "
] | [] | "2021-03-24T13:23:24Z" | [
"type/enhancement"
] | Provide jars in releases. | Hi,
We run kafka-ui in a non-linux environment. It's quite tricky to compile the fat jar without docker/linux.
Can you please provide a compiled jar so we can deploy without building it ourselves?
Thank you. | [
".github/workflows/release.yaml",
".github/workflows/tags.yaml"
] | [
".github/workflows/release.yaml"
] | [] | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 8b1877e4776..8903288f10f 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -5,6 +5,8 @@ on:
jobs:
release:
runs-on: ubuntu-latest
+ outputs:
+ version: ${{steps.prep.outputs.version}}
steps:
- uses: actions/checkout@v2
- run: |
@@ -41,6 +43,11 @@ jobs:
echo ::set-output name=version::${VERSION}
- name: Build with Maven
run: mvn clean package -Pprod
+ - name: Archive JAR
+ uses: actions/upload-artifact@v2
+ with:
+ name: kafka-ui-${{ steps.prep.outputs.version }}
+ path: kafka-ui-api/target/kafka-ui-api-${{ steps.prep.outputs.version }}.jar
#################
# #
# Docker images #
@@ -87,4 +94,66 @@ jobs:
build-args: |
JAR_FILE=kafka-ui-api-${{ steps.prep.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
- cache-to: type=local,dest=/tmp/.buildx-cache
\ No newline at end of file
+ cache-to: type=local,dest=/tmp/.buildx-cache
+ charts:
+ runs-on: ubuntu-latest
+ needs: release
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 1
+ - run: |
+ git config user.name github-actions
+ git config user.email [email protected]
+ - uses: azure/setup-helm@v1
+ - name: update appVersion
+ run: |
+ export version=${{needs.release.outputs.version}}
+ sed -i "s/appVersion:.*/appVersion: ${version}/" charts/kafka-ui/Chart.yaml
+ - name:
+ run: |
+ export VERSION=${{needs.release.outputs.version}}
+ MSG=$(helm package --app-version ${VERSION} charts/kafka-ui)
+ git fetch origin
+ git stash
+ git checkout -b gh-pages origin/gh-pages
+ helm repo index .
+ git add -f ${MSG##*/} index.yaml
+ git commit -m "release ${VERSION}"
+ git push
+ gh-release:
+ runs-on: ubuntu-latest
+ needs: release
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - run: |
+ git config user.name github-actions
+ git config user.email [email protected]
+ - id: generate
+ shell: /usr/bin/bash -x -e {0}
+ run: |
+ VERSION=${{needs.release.outputs.version}}
+ CHANGELOG=$(git --no-pager log --oneline --pretty=format:"- %s" `git tag --sort=-creatordate | grep '^v.*' | head -n2 | tail -n1`.. | uniq | grep -v '^- Merge\|^- skip')
+ CHANGELOG="${CHANGELOG//'%'/'%25'}"
+ CHANGELOG="${CHANGELOG//$'\n'/'%0A'}"
+ CHANGELOG="${CHANGELOG//$'\r'/'%0D'}"
+ echo ${CHANGELOG}
+ echo "::set-output name=changelog::${CHANGELOG}"
+ - name: Download release JAR
+ uses: actions/download-artifact@v2
+ with:
+ name: kafka-ui-${{needs.release.outputs.version}}
+ path: .
+ - id: create_release
+ uses: softprops/action-gh-release@v1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ name: ${{needs.release.outputs.version}}
+ draft: false
+ tag_name: "v${{needs.release.outputs.version}}"
+ prerelease: false
+ files: kafka-ui-api-${{needs.release.outputs.version}}.jar
+ body: ${{steps.generate.outputs.changelog}}
\ No newline at end of file
diff --git a/.github/workflows/tags.yaml b/.github/workflows/tags.yaml
deleted file mode 100644
index 562d0f5214d..00000000000
--- a/.github/workflows/tags.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-name: after_release
-on:
- push:
- tags:
- - "v**"
-jobs:
- charts:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 1
- - run: |
- git config user.name github-actions
- git config user.email [email protected]
- - uses: azure/setup-helm@v1
- - name: update appVersion
- run: |
- export version=${GITHUB_REF##*/}
- sed -i "s/appVersion:.*/appVersion: ${version}/" charts/kafka-ui/Chart.yaml
- - name:
- run: |
- export VERSION=${GITHUB_REF##*/}
- MSG=$(helm package --app-version ${VERSION} charts/kafka-ui)
- git fetch origin
- git stash
- git checkout -b gh-pages origin/gh-pages
- helm repo index .
- git add -f ${MSG##*/} index.yaml
- git commit -m "release ${VERSION}"
- git push
- gh-release:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
- - run: |
- git config user.name github-actions
- git config user.email [email protected]
- - id: generate
- shell: /usr/bin/bash -x -e {0}
- run: |
- VERSION=${GITHUB_REF##*/}
- CHANGELOG=$(git --no-pager log --oneline --pretty=format:"- %s" `git tag --sort=-creatordate | grep '^v.*' | head -n2 | tail -n1`.. | uniq | grep -v '^- Merge\|^- skip')
- CHANGELOG="${CHANGELOG//'%'/'%25'}"
- CHANGELOG="${CHANGELOG//$'\n'/'%0A'}"
- CHANGELOG="${CHANGELOG//$'\r'/'%0D'}"
- echo ${CHANGELOG}
- echo "::set-output name=changelog::${CHANGELOG}"
- echo "::set-output name=version::${VERSION}"
- - id: create_release
- uses: actions/github-script@v3
- env:
- CHANGELOG: ${{steps.generate.outputs.changelog}}
- VERSION: ${{steps.generate.outputs.version}}
- with:
- github-token: ${{secrets.GITHUB_TOKEN}}
- script: |
- github.repos.createRelease({
- owner: context.repo.owner,
- repo: context.repo.repo,
- tag_name: context.ref,
- name: "Release "+process.env.VERSION,
- body: process.env.CHANGELOG,
- draft: false,
- prerelease: false
- });
| null | train | train | 2021-03-24T11:24:10 | "2021-03-24T07:49:28Z" | martenlindblad | train |
provectus/kafka-ui/309_312 | provectus/kafka-ui | provectus/kafka-ui/309 | provectus/kafka-ui/312 | [
"timestamp(timedelta=0.0, similarity=0.8929031241841806)",
"connected"
] | a256709580b34683ad6861adf5ff610ba55025ea | 8d2f929a52109281559e588f77e708e6f1bd60cc | [] | [] | "2021-03-24T14:17:27Z" | [
"scope/backend",
"scope/frontend"
] | Split models for topic creation & update endpoints | Currently the same model "TopicFormData" is used for topic creation and update. This is not convenient because some fields (name, partitions, replicationFactor) are marked as required, which is only suitable for create. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts",
"kafka-ui-react-app/src/redux/interfaces/topic.ts"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts",
"kafka-ui-react-app/src/redux/interfaces/topic.ts"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java",
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/ReadOnlyModeTests.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
index bb1c123141d..be76f78c46a 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
@@ -3,8 +3,9 @@
import com.provectus.kafka.ui.api.TopicsApi;
import com.provectus.kafka.ui.model.Topic;
import com.provectus.kafka.ui.model.TopicConfig;
+import com.provectus.kafka.ui.model.TopicCreation;
import com.provectus.kafka.ui.model.TopicDetails;
-import com.provectus.kafka.ui.model.TopicFormData;
+import com.provectus.kafka.ui.model.TopicUpdate;
import com.provectus.kafka.ui.model.TopicsResponse;
import com.provectus.kafka.ui.service.ClusterService;
import java.util.Optional;
@@ -26,8 +27,8 @@ public class TopicsController implements TopicsApi {
@Override
public Mono<ResponseEntity<Topic>> createTopic(
- String clusterName, @Valid Mono<TopicFormData> topicFormData, ServerWebExchange exchange) {
- return clusterService.createTopic(clusterName, topicFormData)
+ String clusterName, @Valid Mono<TopicCreation> topicCreation, ServerWebExchange exchange) {
+ return clusterService.createTopic(clusterName, topicCreation)
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
}
@@ -70,8 +71,8 @@ public Mono<ResponseEntity<TopicsResponse>> getTopics(String clusterName, @Valid
@Override
public Mono<ResponseEntity<Topic>> updateTopic(
- String clusterId, String topicName, @Valid Mono<TopicFormData> topicFormData,
+ String clusterId, String topicName, @Valid Mono<TopicUpdate> topicUpdate,
ServerWebExchange exchange) {
- return clusterService.updateTopic(clusterId, topicName, topicFormData).map(ResponseEntity::ok);
+ return clusterService.updateTopic(clusterId, topicName, topicUpdate).map(ResponseEntity::ok);
}
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java
index 3bded1837c7..6bfdfeab7c8 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java
@@ -71,7 +71,7 @@ private Mono<ServerResponse> renderErrorResponse(ServerRequest request) {
private Mono<ServerResponse> renderDefault(Throwable throwable, ServerRequest request) {
var response = new ErrorResponse()
.code(ErrorCode.UNEXPECTED.code())
- .message(throwable.getMessage())
+ .message(coalesce(throwable.getMessage(), "Unexpected internal error"))
.requestId(requestId(request))
.timestamp(currentTimestamp());
return ServerResponse
@@ -84,7 +84,7 @@ private Mono<ServerResponse> render(CustomBaseException baseException, ServerReq
ErrorCode errorCode = baseException.getErrorCode();
var response = new ErrorResponse()
.code(errorCode.code())
- .message(baseException.getMessage())
+ .message(coalesce(baseException.getMessage(), "Internal error"))
.requestId(requestId(request))
.timestamp(currentTimestamp());
return ServerResponse
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
index 18afa02d494..6f07f070ce0 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
@@ -15,9 +15,10 @@
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.Topic;
import com.provectus.kafka.ui.model.TopicConfig;
+import com.provectus.kafka.ui.model.TopicCreation;
import com.provectus.kafka.ui.model.TopicDetails;
-import com.provectus.kafka.ui.model.TopicFormData;
import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.model.TopicUpdate;
import com.provectus.kafka.ui.model.TopicsResponse;
import com.provectus.kafka.ui.util.ClusterUtil;
import java.util.Collection;
@@ -125,9 +126,9 @@ public Optional<List<TopicConfig>> getTopicConfigs(String name, String topicName
.collect(Collectors.toList()));
}
- public Mono<Topic> createTopic(String clusterName, Mono<TopicFormData> topicFormData) {
+ public Mono<Topic> createTopic(String clusterName, Mono<TopicCreation> topicCreation) {
return clustersStorage.getClusterByName(clusterName).map(cluster ->
- kafkaService.createTopic(cluster, topicFormData)
+ kafkaService.createTopic(cluster, topicCreation)
.doOnNext(t -> updateCluster(t, clusterName, cluster))
.map(clusterMapper::toTopic)
).orElse(Mono.empty());
@@ -200,9 +201,9 @@ public Flux<Broker> getBrokers(String clusterName) {
@SneakyThrows
public Mono<Topic> updateTopic(String clusterName, String topicName,
- Mono<TopicFormData> topicFormData) {
+ Mono<TopicUpdate> topicUpdate) {
return clustersStorage.getClusterByName(clusterName).map(cl ->
- topicFormData
+ topicUpdate
.flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
.doOnNext(t -> updateCluster(t, clusterName, cl))
.map(clusterMapper::toTopic)
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index 34832187250..4ceb965d412 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -12,7 +12,8 @@
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.Metric;
import com.provectus.kafka.ui.model.ServerStatus;
-import com.provectus.kafka.ui.model.TopicFormData;
+import com.provectus.kafka.ui.model.TopicCreation;
+import com.provectus.kafka.ui.model.TopicUpdate;
import com.provectus.kafka.ui.util.ClusterUtil;
import com.provectus.kafka.ui.util.JmxClusterUtil;
import com.provectus.kafka.ui.util.JmxMetricsName;
@@ -223,8 +224,8 @@ private Mono<String> createTopic(AdminClient adminClient, NewTopic newTopic) {
@SneakyThrows
public Mono<InternalTopic> createTopic(AdminClient adminClient,
- Mono<TopicFormData> topicFormData) {
- return topicFormData.flatMap(
+ Mono<TopicCreation> topicCreation) {
+ return topicCreation.flatMap(
topicData -> {
NewTopic newTopic = new NewTopic(topicData.getName(), topicData.getPartitions(),
topicData.getReplicationFactor().shortValue());
@@ -242,9 +243,9 @@ public Mono<InternalTopic> createTopic(AdminClient adminClient,
);
}
- public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicFormData> topicFormData) {
+ public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicCreation> topicCreation) {
return getOrCreateAdminClient(cluster)
- .flatMap(ac -> createTopic(ac.getAdminClient(), topicFormData));
+ .flatMap(ac -> createTopic(ac.getAdminClient(), topicCreation));
}
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
@@ -320,16 +321,16 @@ public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
@SneakyThrows
public Mono<InternalTopic> updateTopic(KafkaCluster cluster, String topicName,
- TopicFormData topicFormData) {
+ TopicUpdate topicUpdate) {
ConfigResource topicCr = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
return getOrCreateAdminClient(cluster)
.flatMap(ac -> {
if (ac.getSupportedFeatures()
.contains(ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS)) {
- return incrementalAlterConfig(topicFormData, topicCr, ac)
+ return incrementalAlterConfig(topicUpdate, topicCr, ac)
.flatMap(c -> getUpdatedTopic(ac, topicName));
} else {
- return alterConfig(topicFormData, topicCr, ac)
+ return alterConfig(topicUpdate, topicCr, ac)
.flatMap(c -> getUpdatedTopic(ac, topicName));
}
});
@@ -341,9 +342,9 @@ private Mono<InternalTopic> getUpdatedTopic(ExtendedAdminClient ac, String topic
.filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow());
}
- private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigResource topicCr,
+ private Mono<String> incrementalAlterConfig(TopicUpdate topicUpdate, ConfigResource topicCr,
ExtendedAdminClient ac) {
- List<AlterConfigOp> listOp = topicFormData.getConfigs().entrySet().stream()
+ List<AlterConfigOp> listOp = topicUpdate.getConfigs().entrySet().stream()
.flatMap(cfg -> Stream.of(new AlterConfigOp(new ConfigEntry(cfg.getKey(), cfg.getValue()),
AlterConfigOp.OpType.SET))).collect(Collectors.toList());
return ClusterUtil.toMono(
@@ -352,9 +353,9 @@ private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigR
}
@SuppressWarnings("deprecation")
- private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource topicCr,
+ private Mono<String> alterConfig(TopicUpdate topicUpdate, ConfigResource topicCr,
ExtendedAdminClient ac) {
- List<ConfigEntry> configEntries = topicFormData.getConfigs().entrySet().stream()
+ List<ConfigEntry> configEntries = topicUpdate.getConfigs().entrySet().stream()
.flatMap(cfg -> Stream.of(new ConfigEntry(cfg.getKey(), cfg.getValue())))
.collect(Collectors.toList());
Config config = new Config(configEntries);
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index e50ab98fccb..c1531a62aed 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -162,7 +162,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/TopicFormData'
+ $ref: '#/components/schemas/TopicCreation'
responses:
201:
description: Created
@@ -215,7 +215,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/TopicFormData'
+ $ref: '#/components/schemas/TopicUpdate'
responses:
200:
description: Updated
@@ -1281,7 +1281,7 @@ components:
required:
- name
- TopicFormData:
+ TopicCreation:
type: object
properties:
name:
@@ -1296,6 +1296,18 @@ components:
type: string
required:
- name
+ - partitions
+ - replicationFactor
+
+ TopicUpdate:
+ type: object
+ properties:
+ configs:
+ type: object
+ additionalProperties:
+ type: string
+ required:
+ - configs
Broker:
type: object
diff --git a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
index 0b3e024182c..50942d410dd 100644
--- a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
+++ b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
@@ -4,7 +4,8 @@ import {
MessagesApi,
Configuration,
Topic,
- TopicFormData,
+ TopicCreation,
+ TopicUpdate,
TopicConfig,
} from 'generated-sources';
import {
@@ -136,7 +137,7 @@ export const fetchTopicConfig = (
}
};
-const formatTopicFormData = (form: TopicFormDataRaw): TopicFormData => {
+const formatTopicCreation = (form: TopicFormDataRaw): TopicCreation => {
const {
name,
partitions,
@@ -172,6 +173,36 @@ const formatTopicFormData = (form: TopicFormDataRaw): TopicFormData => {
};
};
+const formatTopicUpdate = (form: TopicFormDataRaw): TopicUpdate => {
+ const {
+ cleanupPolicy,
+ retentionBytes,
+ retentionMs,
+ maxMessageBytes,
+ minInSyncReplicas,
+ customParams,
+ } = form;
+
+ return {
+ configs: {
+ 'cleanup.policy': cleanupPolicy,
+ 'retention.ms': retentionMs,
+ 'retention.bytes': retentionBytes,
+ 'max.message.bytes': maxMessageBytes,
+ 'min.insync.replicas': minInSyncReplicas,
+ ...Object.values(customParams || {}).reduce(
+ (result: TopicFormFormattedParams, customParam: TopicConfig) => {
+ return {
+ ...result,
+ [customParam.name]: customParam.value,
+ };
+ },
+ {}
+ ),
+ },
+ };
+};
+
export const createTopic = (
clusterName: ClusterName,
form: TopicFormDataRaw
@@ -180,7 +211,7 @@ export const createTopic = (
try {
const topic: Topic = await topicsApiClient.createTopic({
clusterName,
- topicFormData: formatTopicFormData(form),
+ topicCreation: formatTopicCreation(form),
});
const state = getState().topics;
@@ -210,7 +241,7 @@ export const updateTopic = (
const topic: Topic = await topicsApiClient.updateTopic({
clusterName,
topicName: form.name,
- topicFormData: formatTopicFormData(form),
+ topicUpdate: formatTopicUpdate(form),
});
const state = getState().topics;
diff --git a/kafka-ui-react-app/src/redux/interfaces/topic.ts b/kafka-ui-react-app/src/redux/interfaces/topic.ts
index d7c2ee99fcb..a45f1daabca 100644
--- a/kafka-ui-react-app/src/redux/interfaces/topic.ts
+++ b/kafka-ui-react-app/src/redux/interfaces/topic.ts
@@ -3,7 +3,7 @@ import {
TopicDetails,
TopicMessage,
TopicConfig,
- TopicFormData,
+ TopicCreation,
GetTopicMessagesRequest,
} from 'generated-sources';
@@ -55,7 +55,7 @@ export interface TopicsState {
messages: TopicMessage[];
}
-export type TopicFormFormattedParams = TopicFormData['configs'];
+export type TopicFormFormattedParams = TopicCreation['configs'];
export interface TopicFormDataRaw {
name: string;
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java
index ee4ebb67a0b..0e46ed9e25b 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java
@@ -1,6 +1,6 @@
package com.provectus.kafka.ui;
-import com.provectus.kafka.ui.model.TopicFormData;
+import com.provectus.kafka.ui.model.TopicCreation;
import com.provectus.kafka.ui.model.TopicMessage;
import com.provectus.kafka.ui.producer.KafkaTestProducer;
import java.util.Map;
@@ -27,7 +27,7 @@ public void shouldDeleteRecords() {
var topicName = UUID.randomUUID().toString();
webTestClient.post()
.uri("/api/clusters/{clusterName}/topics", LOCAL)
- .bodyValue(new TopicFormData()
+ .bodyValue(new TopicCreation()
.name(topicName)
.partitions(1)
.replicationFactor(1)
diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/ReadOnlyModeTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/ReadOnlyModeTests.java
index 3e2ee1c5f8b..3ef65ce75fb 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/ReadOnlyModeTests.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/ReadOnlyModeTests.java
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui;
-import com.provectus.kafka.ui.model.TopicFormData;
+import com.provectus.kafka.ui.model.TopicCreation;
+import com.provectus.kafka.ui.model.TopicUpdate;
import java.util.Map;
import java.util.UUID;
import lombok.extern.log4j.Log4j2;
@@ -24,7 +25,7 @@ public void shouldCreateTopicForNonReadonlyCluster() {
var topicName = UUID.randomUUID().toString();
webTestClient.post()
.uri("/api/clusters/{clusterName}/topics", LOCAL)
- .bodyValue(new TopicFormData()
+ .bodyValue(new TopicCreation()
.name(topicName)
.partitions(1)
.replicationFactor(1)
@@ -40,7 +41,7 @@ public void shouldNotCreateTopicForReadonlyCluster() {
var topicName = UUID.randomUUID().toString();
webTestClient.post()
.uri("/api/clusters/{clusterName}/topics", SECOND_LOCAL)
- .bodyValue(new TopicFormData()
+ .bodyValue(new TopicCreation()
.name(topicName)
.partitions(1)
.replicationFactor(1)
@@ -56,7 +57,7 @@ public void shouldUpdateTopicForNonReadonlyCluster() {
var topicName = UUID.randomUUID().toString();
webTestClient.post()
.uri("/api/clusters/{clusterName}/topics", LOCAL)
- .bodyValue(new TopicFormData()
+ .bodyValue(new TopicCreation()
.name(topicName)
.partitions(1)
.replicationFactor(1)
@@ -67,10 +68,7 @@ public void shouldUpdateTopicForNonReadonlyCluster() {
.isOk();
webTestClient.patch()
.uri("/api/clusters/{clusterName}/topics/{topicName}", LOCAL, topicName)
- .bodyValue(new TopicFormData()
- .name(topicName)
- .partitions(2)
- .replicationFactor(1)
+ .bodyValue(new TopicUpdate()
.configs(Map.of())
)
.exchange()
@@ -83,10 +81,7 @@ public void shouldNotUpdateTopicForReadonlyCluster() {
var topicName = UUID.randomUUID().toString();
webTestClient.patch()
.uri("/api/clusters/{clusterName}/topics/{topicName}", SECOND_LOCAL, topicName)
- .bodyValue(new TopicFormData()
- .name(topicName)
- .partitions(1)
- .replicationFactor(1)
+ .bodyValue(new TopicUpdate()
.configs(Map.of())
)
.exchange()
| val | train | 2021-03-24T14:51:40 | "2021-03-24T13:11:09Z" | iliax | train |
provectus/kafka-ui/299_332 | provectus/kafka-ui | provectus/kafka-ui/299 | provectus/kafka-ui/332 | [
"connected"
] | d759912c05c633e12d1c0ef04fc03869a11ff047 | aedf3c6536375461e50d11e013f5f6118d123e00 | [
"@antipova0803, thank you for creating issue. We'll try to find a root cause and fix it soon. Look's like we are triyng to connect jmx port after topic settings update without success. ",
"Issue is not connected with Null Pointer Exception on BE. There is not request to BE after Submit button click (you can see it if you open UI console).\r\n@workshur can you please check what is going on UI ? "
] | [] | "2021-03-30T14:49:15Z" | [
"scope/frontend"
] | Null Pointer Exception after saving changes during edit settings of the Topic | **Pre-conditions:**
Kafka has a Topic
**Steps:**
1. Navigate to the Topic
2. Click “Edit settings”
3. Click “Submit”
**Expected:**
Successfully saving of the setting changes
**Actual:**
The app is freezing. Null point exception in the console.
```
java.lang.NullPointerException: null
at org.apache.commons.pool2.impl.GenericKeyedObjectPool.returnObject(GenericKeyedObjectPool.java:470) ~[commons-pool2-2.2.jar!/:2.2]
at com.provectus.kafka.ui.util.JmxClusterUtil.getJmxMetrics(JmxClusterUtil.java:47) ~[classes!/:?]
at com.provectus.kafka.ui.service.KafkaService.lambda$getJmxMetric$53(KafkaService.java:413) ~[classes!/:?]
at java.util.Optional.map(Optional.java:258) ~[?:?]
at com.provectus.kafka.ui.service.KafkaService.getJmxMetric(KafkaService.java:413) ~[classes!/:?]
at com.provectus.kafka.ui.service.KafkaService.lambda$fillBrokerMetrics$55(KafkaService.java:424) ~[classes!/:?]
...
ERROR com.provectus.kafka.ui.util.JmxClusterUtil - Cannot invalidate object in pool, service:jmx:rmi:///jndi/rmi://kafka1:9998/jmxrmi
``` | [
"kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx
index b52735e9588..5daf7c408ba 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx
@@ -21,7 +21,11 @@ interface Props {
isFetched: boolean;
isTopicUpdated: boolean;
fetchTopicConfig: (clusterName: ClusterName, topicName: TopicName) => void;
- updateTopic: (clusterName: ClusterName, form: TopicFormDataRaw) => void;
+ updateTopic: (
+ clusterName: ClusterName,
+ topicName: TopicName,
+ form: TopicFormDataRaw
+ ) => void;
}
const DEFAULTS = {
@@ -104,7 +108,7 @@ const Edit: React.FC<Props> = ({
});
const onSubmit = async (data: TopicFormDataRaw) => {
- updateTopic(clusterName, data);
+ updateTopic(clusterName, topicName, data);
setIsSubmitting(true); // Keep this action after updateTopic to prevent redirect before update.
};
diff --git a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
index 50942d410dd..6565aa23c78 100644
--- a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
+++ b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
@@ -234,13 +234,14 @@ export const createTopic = (
export const updateTopic = (
clusterName: ClusterName,
+ topicName: TopicName,
form: TopicFormDataRaw
): PromiseThunkResult => async (dispatch, getState) => {
dispatch(actions.updateTopicAction.request());
try {
const topic: Topic = await topicsApiClient.updateTopic({
clusterName,
- topicName: form.name,
+ topicName,
topicUpdate: formatTopicUpdate(form),
});
| null | train | train | 2021-03-29T13:31:17 | "2021-03-23T16:32:31Z" | antipova0803 | train |
provectus/kafka-ui/315_335 | provectus/kafka-ui | provectus/kafka-ui/315 | provectus/kafka-ui/335 | [
"connected"
] | 7c0cc3bf141e67ed6645488c89f223a5fe3147a7 | 7bc01811c39aaf824fa1c991271fea2063b3ecbf | [] | [] | "2021-04-01T14:16:02Z" | [
"type/enhancement",
"scope/frontend"
] | Display application version in UI | It would be cool to show which version (git sha or release tag) of the application is launched on UI, this will allow mentioning this info in newly created issues for faster resolving | [
"kafka-ui-api/pom.xml",
"kafka-ui-react-app/src/components/App.tsx",
"kafka-ui-react-app/src/components/__test__/App.spec.tsx",
"kafka-ui-react-app/src/components/__test__/__snapshots__/App.spec.tsx.snap",
"kafka-ui-react-app/src/lib/constants.ts"
] | [
"kafka-ui-api/pom.xml",
"kafka-ui-react-app/src/components/App.tsx",
"kafka-ui-react-app/src/components/Version/Version.tsx",
"kafka-ui-react-app/src/components/Version/__tests__/Version.spec.tsx",
"kafka-ui-react-app/src/components/Version/__tests__/__snapshots__/Version.spec.tsx.snap",
"kafka-ui-react-app/src/components/__tests__/App.spec.tsx",
"kafka-ui-react-app/src/components/__tests__/__snapshots__/App.spec.tsx.snap",
"kafka-ui-react-app/src/lib/constants.ts"
] | [] | diff --git a/kafka-ui-api/pom.xml b/kafka-ui-api/pom.xml
index e4f57e5bd36..a16cb803452 100644
--- a/kafka-ui-api/pom.xml
+++ b/kafka-ui-api/pom.xml
@@ -253,6 +253,29 @@
<id>prod</id>
<build>
<plugins>
+ <plugin>
+ <groupId>pl.project13.maven</groupId>
+ <artifactId>git-commit-id-plugin</artifactId>
+ <version>4.0.0</version>
+ <executions>
+ <execution>
+ <id>get-the-git-infos</id>
+ <goals>
+ <goal>revision</goal>
+ </goals>
+ <phase>initialize</phase>
+ </execution>
+ </executions>
+ <configuration>
+ <generateGitPropertiesFile>true</generateGitPropertiesFile>
+ <generateGitPropertiesFilename>${project.build.outputDirectory}/git.properties</generateGitPropertiesFilename>
+ <includeOnlyProperties>
+ <includeOnlyProperty>^git.build.(time|version)$</includeOnlyProperty>
+ <includeOnlyProperty>^git.commit.id.(abbrev|full)$</includeOnlyProperty>
+ </includeOnlyProperties>
+ <commitIdGenerationMode>full</commitIdGenerationMode>
+ </configuration>
+ </plugin>
<plugin>
<artifactId>maven-resources-plugin</artifactId>
<executions>
@@ -279,6 +302,10 @@
<version>${frontend-maven-plugin.version}</version>
<configuration>
<workingDirectory>../kafka-ui-react-app</workingDirectory>
+ <environmentVariables>
+ <REACT_APP_TAG>v${project.version}</REACT_APP_TAG>
+ <REACT_APP_COMMIT>${git.commit.id.abbrev}</REACT_APP_COMMIT>
+ </environmentVariables>
</configuration>
<executions>
<execution>
diff --git a/kafka-ui-react-app/src/components/App.tsx b/kafka-ui-react-app/src/components/App.tsx
index 9daf3dfb8a7..29f81d5828b 100644
--- a/kafka-ui-react-app/src/components/App.tsx
+++ b/kafka-ui-react-app/src/components/App.tsx
@@ -1,15 +1,17 @@
import './App.scss';
import React from 'react';
import { Switch, Route } from 'react-router-dom';
+import { GIT_TAG, GIT_COMMIT } from 'lib/constants';
import { Alerts } from 'redux/interfaces';
import NavContainer from './Nav/NavContainer';
import PageLoader from './common/PageLoader/PageLoader';
import Dashboard from './Dashboard/Dashboard';
import Cluster from './Cluster/Cluster';
+import Version from './Version/Version';
import Alert from './Alert/Alert';
export interface AppProps {
- isClusterListFetched: boolean;
+ isClusterListFetched?: boolean;
alerts: Alerts;
fetchClustersList: () => void;
}
@@ -35,6 +37,11 @@ const App: React.FC<AppProps> = ({
Kafka UI
</a>
</div>
+ <div className="navbar-end">
+ <div className="navbar-item mr-2">
+ <Version tag={GIT_TAG} commit={GIT_COMMIT} />
+ </div>
+ </div>
</nav>
<main className="Layout__container">
diff --git a/kafka-ui-react-app/src/components/Version/Version.tsx b/kafka-ui-react-app/src/components/Version/Version.tsx
new file mode 100644
index 00000000000..954679ba279
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Version/Version.tsx
@@ -0,0 +1,35 @@
+import React from 'react';
+import { GIT_REPO_LINK } from 'lib/constants';
+
+export interface VesionProps {
+ tag?: string;
+ commit?: string;
+}
+
+const Version: React.FC<VesionProps> = ({ tag, commit }) => {
+ if (!tag) {
+ return null;
+ }
+
+ return (
+ <div className="is-size-7 has-text-grey">
+ <span className="has-text-grey-light mr-1">Version:</span>
+ <span className="mr-1">{tag}</span>
+ {commit && (
+ <>
+ <span>(</span>
+ <a
+ title="Current commit"
+ target="__blank"
+ href={`${GIT_REPO_LINK}/commit/${commit}`}
+ >
+ {commit}
+ </a>
+ <span>)</span>
+ </>
+ )}
+ </div>
+ );
+};
+
+export default Version;
diff --git a/kafka-ui-react-app/src/components/Version/__tests__/Version.spec.tsx b/kafka-ui-react-app/src/components/Version/__tests__/Version.spec.tsx
new file mode 100644
index 00000000000..84e4c011f5d
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Version/__tests__/Version.spec.tsx
@@ -0,0 +1,29 @@
+import React from 'react';
+import { mount } from 'enzyme';
+import Version from '../Version';
+
+const tag = 'v1.0.1-SHAPSHOT';
+const commit = '123sdf34';
+
+describe('Version', () => {
+ it('shows nothing if tag is not defined', () => {
+ const component = mount(<Version />);
+ expect(component.html()).toEqual(null);
+ });
+
+ it('shows current tag when only tag is defined', () => {
+ const component = mount(<Version tag={tag} />);
+ expect(component.text()).toContain(tag);
+ });
+
+ it('shows current tag and commit', () => {
+ const component = mount(<Version tag={tag} commit={commit} />);
+ expect(component.text()).toContain(tag);
+ expect(component.text()).toContain(commit);
+ });
+
+ it('matches snapshot', () => {
+ const component = mount(<Version tag={tag} commit={commit} />);
+ expect(component).toMatchSnapshot();
+ });
+});
diff --git a/kafka-ui-react-app/src/components/Version/__tests__/__snapshots__/Version.spec.tsx.snap b/kafka-ui-react-app/src/components/Version/__tests__/__snapshots__/Version.spec.tsx.snap
new file mode 100644
index 00000000000..6b3eb009dd0
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Version/__tests__/__snapshots__/Version.spec.tsx.snap
@@ -0,0 +1,36 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`Version matches snapshot 1`] = `
+<Version
+ commit="123sdf34"
+ tag="v1.0.1-SHAPSHOT"
+>
+ <div
+ className="is-size-7 has-text-grey"
+ >
+ <span
+ className="has-text-grey-light mr-1"
+ >
+ Version:
+ </span>
+ <span
+ className="mr-1"
+ >
+ v1.0.1-SHAPSHOT
+ </span>
+ <span>
+ (
+ </span>
+ <a
+ href="https://github.com/provectus/kafka-ui/commit/123sdf34"
+ target="__blank"
+ title="Current commit"
+ >
+ 123sdf34
+ </a>
+ <span>
+ )
+ </span>
+ </div>
+</Version>
+`;
diff --git a/kafka-ui-react-app/src/components/__test__/App.spec.tsx b/kafka-ui-react-app/src/components/__tests__/App.spec.tsx
similarity index 64%
rename from kafka-ui-react-app/src/components/__test__/App.spec.tsx
rename to kafka-ui-react-app/src/components/__tests__/App.spec.tsx
index 7118f1c9990..f37dc02f111 100644
--- a/kafka-ui-react-app/src/components/__test__/App.spec.tsx
+++ b/kafka-ui-react-app/src/components/__tests__/App.spec.tsx
@@ -1,7 +1,7 @@
import React from 'react';
import { mount, shallow } from 'enzyme';
import { Provider } from 'react-redux';
-import { StaticRouter } from 'react-router';
+import { StaticRouter } from 'react-router-dom';
import configureStore from 'redux/store/configureStore';
import App, { AppProps } from '../App';
@@ -18,12 +18,7 @@ describe('App', () => {
/>
);
- it('matches snapshot with initial props', () => {
- const wrapper = shallow(setupComponent());
- expect(wrapper).toMatchSnapshot();
- });
-
- it('correctly mounts App component', () => {
+ it('handles fetchClustersList', () => {
const wrapper = mount(
<Provider store={store}>
<StaticRouter>{setupComponent()}</StaticRouter>
@@ -33,12 +28,13 @@ describe('App', () => {
expect(fetchClustersList).toHaveBeenCalledTimes(1);
});
- it('correctly renders PageLoader', () => {
- const wrapper = shallow(setupComponent({ isClusterListFetched: false }));
- expect(wrapper.exists('PageLoader')).toBeTruthy();
-
- wrapper.setProps({ isClusterListFetched: true });
- expect(wrapper.exists('PageLoader')).toBeFalsy();
+ it('shows PageLoader until cluster list is fetched', () => {
+ const component = shallow(setupComponent({ isClusterListFetched: false }));
+ expect(component.exists('.Layout__container PageLoader')).toBeTruthy();
+ expect(component.exists('.Layout__container Switch')).toBeFalsy();
+ component.setProps({ isClusterListFetched: true });
+ expect(component.exists('.Layout__container PageLoader')).toBeFalsy();
+ expect(component.exists('.Layout__container Switch')).toBeTruthy();
});
it('correctly renders alerts', () => {
@@ -57,4 +53,9 @@ describe('App', () => {
expect(wrapper.exists('Alert')).toBeTruthy();
expect(wrapper.find('Alert').length).toEqual(1);
});
+
+ it('matches snapshot', () => {
+ const component = shallow(setupComponent());
+ expect(component).toMatchSnapshot();
+ });
});
diff --git a/kafka-ui-react-app/src/components/__test__/__snapshots__/App.spec.tsx.snap b/kafka-ui-react-app/src/components/__tests__/__snapshots__/App.spec.tsx.snap
similarity index 82%
rename from kafka-ui-react-app/src/components/__test__/__snapshots__/App.spec.tsx.snap
rename to kafka-ui-react-app/src/components/__tests__/__snapshots__/App.spec.tsx.snap
index 48041898329..c6531a057f5 100644
--- a/kafka-ui-react-app/src/components/__test__/__snapshots__/App.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/__tests__/__snapshots__/App.spec.tsx.snap
@@ -1,6 +1,6 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
-exports[`App matches snapshot with initial props 1`] = `
+exports[`App matches snapshot 1`] = `
<div
className="Layout"
>
@@ -19,6 +19,15 @@ exports[`App matches snapshot with initial props 1`] = `
Kafka UI
</a>
</div>
+ <div
+ className="navbar-end"
+ >
+ <div
+ className="navbar-item mr-2"
+ >
+ <Version />
+ </div>
+ </div>
</nav>
<main
className="Layout__container"
diff --git a/kafka-ui-react-app/src/lib/constants.ts b/kafka-ui-react-app/src/lib/constants.ts
index 76de060f45f..f49da1ba0da 100644
--- a/kafka-ui-react-app/src/lib/constants.ts
+++ b/kafka-ui-react-app/src/lib/constants.ts
@@ -43,3 +43,7 @@ export const MILLISECONDS_IN_SECOND = 1_000;
export const BYTES_IN_GB = 1_073_741_824;
export const PER_PAGE = 25;
+
+export const GIT_REPO_LINK = 'https://github.com/provectus/kafka-ui';
+export const GIT_TAG = process.env.REACT_APP_TAG;
+export const GIT_COMMIT = process.env.REACT_APP_COMMIT;
| null | val | train | 2021-04-05T08:42:58 | "2021-03-25T09:36:19Z" | RustamGimadiev | train |
provectus/kafka-ui/121_360 | provectus/kafka-ui | provectus/kafka-ui/121 | provectus/kafka-ui/360 | [
"connected"
] | dbadff8f2e2e9ce11af0f658be5d96e6c05caa9b | 7bfae45162c623fda80968ef3d346160b5797321 | [
"Add consumer groups list details per topic level"
] | [] | "2021-04-07T12:32:56Z" | [
"type/enhancement",
"scope/frontend"
] | Topic Details: Display consumers | Add ability to view list of consumers for topic. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
index cd5336cf2b5..ba52586ee09 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
@@ -3,6 +3,7 @@
import com.provectus.kafka.ui.api.ConsumerGroupsApi;
import com.provectus.kafka.ui.model.ConsumerGroup;
import com.provectus.kafka.ui.model.ConsumerGroupDetails;
+import com.provectus.kafka.ui.model.TopicConsumerGroups;
import com.provectus.kafka.ui.service.ClusterService;
import lombok.RequiredArgsConstructor;
import lombok.extern.log4j.Log4j2;
@@ -34,4 +35,11 @@ public Mono<ResponseEntity<Flux<ConsumerGroup>>> getConsumerGroups(String cluste
.map(ResponseEntity::ok)
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
}
+
+ @Override
+ public Mono<ResponseEntity<TopicConsumerGroups>> getTopicConsumerGroups(
+ String clusterName, String topicName, ServerWebExchange exchange) {
+ return clusterService.getTopicConsumerGroupDetail(clusterName, topicName)
+ .map(ResponseEntity::ok);
+ }
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
index 6f07f070ce0..70743d5f6f6 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
@@ -15,33 +15,28 @@
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.Topic;
import com.provectus.kafka.ui.model.TopicConfig;
+import com.provectus.kafka.ui.model.TopicConsumerGroups;
import com.provectus.kafka.ui.model.TopicCreation;
import com.provectus.kafka.ui.model.TopicDetails;
import com.provectus.kafka.ui.model.TopicMessage;
import com.provectus.kafka.ui.model.TopicUpdate;
import com.provectus.kafka.ui.model.TopicsResponse;
import com.provectus.kafka.ui.util.ClusterUtil;
-import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.Properties;
-import java.util.UUID;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
+import reactor.util.function.Tuples;
@Service
@RequiredArgsConstructor
@@ -142,42 +137,29 @@ public Mono<ConsumerGroupDetails> getConsumerGroupDetail(String clusterName,
return kafkaService.getOrCreateAdminClient(cluster).map(ac ->
ac.getAdminClient().describeConsumerGroups(Collections.singletonList(consumerGroupId)).all()
).flatMap(groups ->
- groupMetadata(cluster, consumerGroupId)
+ kafkaService.groupMetadata(cluster, consumerGroupId)
.flatMap(offsets -> {
Map<TopicPartition, Long> endOffsets =
- topicPartitionsEndOffsets(cluster, offsets.keySet());
- return ClusterUtil.toMono(groups).map(s -> s.get(consumerGroupId).members().stream()
- .flatMap(c -> Stream.of(ClusterUtil
- .convertToConsumerTopicPartitionDetails(c, offsets, endOffsets)))
- .collect(Collectors.toList()).stream()
- .flatMap(t -> t.stream().flatMap(Stream::of)).collect(Collectors.toList()));
- })
- )
- .map(c -> new ConsumerGroupDetails().consumers(c).consumerGroupId(consumerGroupId));
-
- }
-
- public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
- String consumerGroupId) {
- return
- kafkaService.getOrCreateAdminClient(cluster)
- .map(ac -> ac.getAdminClient().listConsumerGroupOffsets(consumerGroupId)
- .partitionsToOffsetAndMetadata())
- .flatMap(ClusterUtil::toMono);
- }
-
- public Map<TopicPartition, Long> topicPartitionsEndOffsets(
- KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
- Properties properties = new Properties();
- properties.putAll(cluster.getProperties());
- properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
- properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
- properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
- properties.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
-
- try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
- return consumer.endOffsets(topicPartitions);
- }
+ kafkaService.topicPartitionsEndOffsets(cluster, offsets.keySet());
+ return ClusterUtil.toMono(groups).map(s ->
+ Tuples.of(
+ s.get(consumerGroupId),
+ s.get(consumerGroupId).members().stream()
+ .flatMap(c ->
+ Stream.of(
+ ClusterUtil.convertToConsumerTopicPartitionDetails(
+ c, offsets, endOffsets, consumerGroupId
+ )
+ )
+ )
+ .collect(Collectors.toList()).stream()
+ .flatMap(t ->
+ t.stream().flatMap(Stream::of)
+ ).collect(Collectors.toList())
+ )
+ );
+ }).map(c -> ClusterUtil.convertToConsumerGroupDetails(c.getT1(), c.getT2()))
+ );
}
public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
@@ -186,6 +168,13 @@ public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
.flatMap(kafkaService::getConsumerGroups);
}
+ public Mono<TopicConsumerGroups> getTopicConsumerGroupDetail(
+ String clusterName, String topicName) {
+ return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
+ .switchIfEmpty(Mono.error(ClusterNotFoundException::new))
+ .flatMap(c -> kafkaService.getTopicConsumerGroups(c, topicName));
+ }
+
public Flux<Broker> getBrokers(String clusterName) {
return kafkaService
.getOrCreateAdminClient(clustersStorage.getClusterByName(clusterName).orElseThrow())
@@ -251,4 +240,6 @@ public Mono<Void> deleteTopicMessages(String clusterName, String topicName,
return consumingService.offsetsForDeletion(cluster, topicName, partitions)
.flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
}
+
+
}
\ No newline at end of file
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index 4ceb965d412..259351ef029 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -12,6 +12,7 @@
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.Metric;
import com.provectus.kafka.ui.model.ServerStatus;
+import com.provectus.kafka.ui.model.TopicConsumerGroups;
import com.provectus.kafka.ui.model.TopicCreation;
import com.provectus.kafka.ui.model.TopicUpdate;
import com.provectus.kafka.ui.util.ClusterUtil;
@@ -38,12 +39,14 @@
import org.apache.kafka.clients.admin.AlterConfigOp;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.ConfigEntry;
+import org.apache.kafka.clients.admin.ConsumerGroupDescription;
import org.apache.kafka.clients.admin.ConsumerGroupListing;
import org.apache.kafka.clients.admin.ListTopicsOptions;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.RecordsToDelete;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.ConfigResource;
@@ -296,15 +299,71 @@ private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
);
}
+ public Mono<Collection<ConsumerGroupDescription>> getConsumerGroupsInternal(
+ KafkaCluster cluster) {
+ return getOrCreateAdminClient(cluster).flatMap(ac ->
+ ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
+ .flatMap(s ->
+ ClusterUtil.toMono(
+ ac.getAdminClient().describeConsumerGroups(
+ s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())
+ ).all()
+ ).map(Map::values)
+ )
+ );
+ }
+
public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
- return getOrCreateAdminClient(cluster)
- .flatMap(ac -> ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
- .flatMap(s -> ClusterUtil.toMono(ac.getAdminClient()
- .describeConsumerGroups(
- s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList()))
- .all()))
- .map(s -> s.values().stream()
- .map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList())));
+ return getConsumerGroupsInternal(cluster)
+ .map(c -> c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList()));
+ }
+
+ public Mono<TopicConsumerGroups> getTopicConsumerGroups(KafkaCluster cluster, String topic) {
+ final Map<TopicPartition, Long> endOffsets = topicEndOffsets(cluster, topic);
+
+ return getConsumerGroupsInternal(cluster)
+ .flatMapIterable(c ->
+ c.stream()
+ .map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic))
+ .filter(Optional::isPresent)
+ .map(Optional::get)
+ .map(d ->
+ groupMetadata(cluster, d.groupId())
+ .flatMapIterable(meta ->
+ d.members().stream().flatMap(m ->
+ ClusterUtil.convertToConsumerTopicPartitionDetails(
+ m, meta, endOffsets, d.groupId()
+ ).stream()
+ ).collect(Collectors.toList())
+ )
+ ).collect(Collectors.toList())
+ ).flatMap(f -> f).collectList().map(l -> new TopicConsumerGroups().consumers(l));
+ }
+
+ public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
+ String consumerGroupId) {
+ return getOrCreateAdminClient(cluster).map(ac ->
+ ac.getAdminClient()
+ .listConsumerGroupOffsets(consumerGroupId)
+ .partitionsToOffsetAndMetadata()
+ ).flatMap(ClusterUtil::toMono);
+ }
+
+ public Map<TopicPartition, Long> topicEndOffsets(
+ KafkaCluster cluster, String topic) {
+ try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
+ final List<TopicPartition> topicPartitions = consumer.partitionsFor(topic).stream()
+ .map(i -> new TopicPartition(i.topic(), i.partition()))
+ .collect(Collectors.toList());
+ return consumer.endOffsets(topicPartitions);
+ }
+ }
+
+ public Map<TopicPartition, Long> topicPartitionsEndOffsets(
+ KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
+ try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
+ return consumer.endOffsets(topicPartitions);
+ }
}
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
@@ -571,4 +630,6 @@ public Mono<Void> deleteTopicMessages(KafkaCluster cluster, Map<TopicPartition,
return getOrCreateAdminClient(cluster).map(ExtendedAdminClient::getAdminClient)
.map(ac -> ac.deleteRecords(records)).then();
}
+
+
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
index 3bad2dcea4d..7abd4b64dde 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
@@ -5,6 +5,7 @@
import com.provectus.kafka.ui.deserialization.RecordDeserializer;
import com.provectus.kafka.ui.model.ConsumerGroup;
+import com.provectus.kafka.ui.model.ConsumerGroupDetails;
import com.provectus.kafka.ui.model.ConsumerTopicPartitionDetail;
import com.provectus.kafka.ui.model.ExtendedAdminClient;
import com.provectus.kafka.ui.model.InternalPartition;
@@ -30,6 +31,7 @@
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
+import org.apache.kafka.clients.admin.MemberAssignment;
import org.apache.kafka.clients.admin.MemberDescription;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.ConsumerRecord;
@@ -77,20 +79,40 @@ public static ConsumerGroup convertToConsumerGroup(ConsumerGroupDescription c) {
.flatMap(m -> m.assignment().topicPartitions().stream().flatMap(t -> Stream.of(t.topic())))
.collect(Collectors.toSet()).size();
consumerGroup.setNumTopics(numTopics);
+ consumerGroup.setSimple(c.isSimpleConsumerGroup());
+ Optional.ofNullable(c.state())
+ .ifPresent(s -> consumerGroup.setState(s.name()));
+ Optional.ofNullable(c.coordinator())
+ .ifPresent(coord -> consumerGroup.setCoordintor(coord.host()));
+ consumerGroup.setPartitionAssignor(c.partitionAssignor());
return consumerGroup;
}
+ public static ConsumerGroupDetails convertToConsumerGroupDetails(
+ ConsumerGroupDescription desc, List<ConsumerTopicPartitionDetail> consumers
+ ) {
+ return new ConsumerGroupDetails()
+ .consumers(consumers)
+ .consumerGroupId(desc.groupId())
+ .simple(desc.isSimpleConsumerGroup())
+ .coordintor(Optional.ofNullable(desc.coordinator()).map(Node::host).orElse(""))
+ .state(Optional.ofNullable(desc.state()).map(Enum::name).orElse(""))
+ .partitionAssignor(desc.partitionAssignor());
+ }
+
public static List<ConsumerTopicPartitionDetail> convertToConsumerTopicPartitionDetails(
MemberDescription consumer,
Map<TopicPartition, OffsetAndMetadata> groupOffsets,
- Map<TopicPartition, Long> endOffsets
+ Map<TopicPartition, Long> endOffsets,
+ String groupId
) {
return consumer.assignment().topicPartitions().stream()
.map(tp -> {
- Long currentOffset = Optional.ofNullable(
- groupOffsets.get(tp)).map(o -> o.offset()).orElse(0L);
- Long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L);
+ long currentOffset = Optional.ofNullable(groupOffsets.get(tp))
+ .map(OffsetAndMetadata::offset).orElse(0L);
+ long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L);
ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail();
+ cd.setGroupId(groupId);
cd.setConsumerId(consumer.consumerId());
cd.setHost(consumer.host());
cd.setTopic(tp.topic());
@@ -250,4 +272,42 @@ public static <T, R> Map<T, R> toSingleMap(Stream<Map<T, R>> streamOfMaps) {
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
}
+ public static Optional<ConsumerGroupDescription> filterConsumerGroupTopic(
+ ConsumerGroupDescription description, String topic) {
+ final List<MemberDescription> members = description.members().stream()
+ .map(m -> filterConsumerMemberTopic(m, topic))
+ .filter(m -> !m.assignment().topicPartitions().isEmpty())
+ .collect(Collectors.toList());
+
+ if (!members.isEmpty()) {
+ return Optional.of(
+ new ConsumerGroupDescription(
+ description.groupId(),
+ description.isSimpleConsumerGroup(),
+ members,
+ description.partitionAssignor(),
+ description.state(),
+ description.coordinator()
+ )
+ );
+ } else {
+ return Optional.empty();
+ }
+ }
+
+ public static MemberDescription filterConsumerMemberTopic(
+ MemberDescription description, String topic) {
+ final Set<TopicPartition> topicPartitions = description.assignment().topicPartitions()
+ .stream().filter(tp -> tp.topic().equals(topic))
+ .collect(Collectors.toSet());
+ MemberAssignment assignment = new MemberAssignment(topicPartitions);
+ return new MemberDescription(
+ description.consumerId(),
+ description.groupInstanceId(),
+ description.clientId(),
+ description.host(),
+ assignment
+ );
+ }
+
}
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index c1531a62aed..86bd3cd0ece 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -346,6 +346,31 @@ paths:
404:
description: Not found
+ /api/clusters/{clusterName}/topics/{topicName}/consumergroups:
+ get:
+ tags:
+ - Consumer Groups
+ summary: get Consumer Groups By Topics
+ operationId: getTopicConsumerGroups
+ parameters:
+ - name: clusterName
+ in: path
+ required: true
+ schema:
+ type: string
+ - name: topicName
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ 200:
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/TopicConsumerGroups'
+
/api/clusters/{clusterName}/consumer-groups/{id}:
get:
tags:
@@ -1330,6 +1355,14 @@ components:
type: integer
numTopics:
type: integer
+ simple:
+ type: boolean
+ partitionAssignor:
+ type: string
+ state:
+ type: string
+ coordintor:
+ type: string
required:
- clusterId
- consumerGroupId
@@ -1397,6 +1430,8 @@ components:
ConsumerTopicPartitionDetail:
type: object
properties:
+ groupId:
+ type: string
consumerId:
type: string
topic:
@@ -1416,12 +1451,28 @@ components:
format: int64
required:
- consumerId
-
+
+ TopicConsumerGroups:
+ type: object
+ properties:
+ consumers:
+ type: array
+ items:
+ $ref: '#/components/schemas/ConsumerTopicPartitionDetail'
+
ConsumerGroupDetails:
type: object
properties:
consumerGroupId:
type: string
+ simple:
+ type: boolean
+ partitionAssignor:
+ type: string
+ state:
+ type: string
+ coordintor:
+ type: string
consumers:
type: array
items:
| null | train | train | 2021-04-07T12:21:47 | "2020-11-24T11:11:13Z" | soffest | train |
provectus/kafka-ui/356_361 | provectus/kafka-ui | provectus/kafka-ui/356 | provectus/kafka-ui/361 | [
"keyword_pr_to_issue",
"timestamp(timedelta=0.0, similarity=0.9390184601428092)"
] | dbadff8f2e2e9ce11af0f658be5d96e6c05caa9b | eef45fc6ab307dc5d91f9234e063cc9f21936e5e | [] | [
"You already have this status enum, please just share it",
"Please use camel case for enums",
"Method looks overcomplicated, could we split it?",
"Could we pass number of failed tasks instead? I think it will be more useful ",
"Could you please move it into mapper?",
"Fixed, thanks",
"Fixed, thanks",
"Fixed, thanks",
"Fixed, thanks",
"Fixed, thanks"
] | "2021-04-07T13:23:11Z" | [
"scope/backend"
] | Add endpoint to fetch all cluster connectors | /clusters/:clusterName/connectors
Connector:
- connect: string
- name: string
- class/plugin
- type: source | sync
- topics: [topicName]
- status: failed | running?
- tasks_count: int
- hasFailedTasks: bool | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConnectServiceTests.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
index 06136192a4f..c6f4b61b7e0 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
@@ -6,6 +6,7 @@
import com.provectus.kafka.ui.model.ConnectorAction;
import com.provectus.kafka.ui.model.ConnectorPlugin;
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
+import com.provectus.kafka.ui.model.FullConnectorInfo;
import com.provectus.kafka.ui.model.NewConnector;
import com.provectus.kafka.ui.model.Task;
import com.provectus.kafka.ui.service.KafkaConnectService;
@@ -62,6 +63,14 @@ public Mono<ResponseEntity<Void>> deleteConnector(String clusterName, String con
.map(ResponseEntity::ok);
}
+ @Override
+ public Mono<ResponseEntity<Flux<FullConnectorInfo>>> getAllConnectors(
+ String clusterName,
+ ServerWebExchange exchange
+ ) {
+ return Mono.just(ResponseEntity.ok(kafkaConnectService.getAllConnectors(clusterName)));
+ }
+
@Override
public Mono<ResponseEntity<Map<String, Object>>> getConnectorConfig(String clusterName,
String connectName,
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java
index 8c20b3866de..5d18463d288 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java
@@ -7,8 +7,15 @@
import com.provectus.kafka.ui.model.ConnectorPlugin;
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
import com.provectus.kafka.ui.model.ConnectorStatus;
+import com.provectus.kafka.ui.model.ConnectorTaskStatus;
+import com.provectus.kafka.ui.model.FullConnectorInfo;
import com.provectus.kafka.ui.model.Task;
import com.provectus.kafka.ui.model.TaskStatus;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import org.apache.commons.lang3.tuple.Triple;
import org.mapstruct.Mapper;
@Mapper(componentModel = "spring")
@@ -28,4 +35,33 @@ public interface KafkaConnectMapper {
ConnectorPluginConfigValidationResponse fromClient(
com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse
connectorPluginConfigValidationResponse);
+
+ default FullConnectorInfo fullConnectorInfoFromTuple(Triple<Connector, Map<String, Object>,
+ List<Task>> triple) {
+ Function<Map<String, Object>, List<String>> getTopicsFromConfig = config -> {
+ var topic = config.get("topic");
+ if (topic != null) {
+ return List.of((String) topic);
+ }
+ return Arrays.asList(((String) config.get("topics")).split(","));
+ };
+
+ return new FullConnectorInfo()
+ .connect(triple.getLeft().getConnect())
+ .name(triple.getLeft().getName())
+ .connectorClass((String) triple.getMiddle().get("connector.class"))
+ .type(triple.getLeft().getType())
+ .topics(getTopicsFromConfig.apply(triple.getMiddle()))
+ .status(
+ triple.getLeft().getStatus().getState()
+ )
+ .tasksCount(triple.getRight().size())
+ .failedTasksCount((int) triple.getRight().stream()
+ .map(Task::getStatus)
+ .map(TaskStatus::getState)
+ .filter(ConnectorTaskStatus.FAILED::equals)
+ .count());
+ }
+
+ ;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
index 85e163f1747..cb381edb468 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
@@ -1,5 +1,7 @@
package com.provectus.kafka.ui.service;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
import com.provectus.kafka.ui.client.KafkaConnectClients;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.ConnectNotFoundException;
@@ -10,16 +12,21 @@
import com.provectus.kafka.ui.model.ConnectorAction;
import com.provectus.kafka.ui.model.ConnectorPlugin;
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
+import com.provectus.kafka.ui.model.FullConnectorInfo;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.KafkaConnectCluster;
import com.provectus.kafka.ui.model.NewConnector;
import com.provectus.kafka.ui.model.Task;
import java.util.Collection;
+import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
+import lombok.SneakyThrows;
import lombok.extern.log4j.Log4j2;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.commons.lang3.tuple.Triple;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@@ -31,6 +38,7 @@ public class KafkaConnectService {
private final ClustersStorage clustersStorage;
private final ClusterMapper clusterMapper;
private final KafkaConnectMapper kafkaConnectMapper;
+ private final ObjectMapper objectMapper;
public Mono<Flux<Connect>> getConnects(String clusterName) {
return Mono.just(
@@ -43,6 +51,38 @@ public Mono<Flux<Connect>> getConnects(String clusterName) {
);
}
+ public Flux<FullConnectorInfo> getAllConnectors(String clusterName) {
+ return getConnects(clusterName)
+ .flatMapMany(Function.identity())
+ .flatMap(connect -> getConnectorNames(clusterName, connect))
+ .flatMap(pair -> getConnector(clusterName, pair.getLeft(), pair.getRight()))
+ .flatMap(connector ->
+ getConnectorConfig(clusterName, connector.getConnect(), connector.getName())
+ .map(config -> Pair.of(connector, config))
+ )
+ .flatMap(pair ->
+ getConnectorTasks(clusterName, pair.getLeft().getConnect(), pair.getLeft().getName())
+ .collectList()
+ .map(tasks -> Triple.of(pair.getLeft(), pair.getRight(), tasks))
+ )
+ .map(kafkaConnectMapper::fullConnectorInfoFromTuple);
+ }
+
+ private Flux<Pair<String, String>> getConnectorNames(String clusterName, Connect connect) {
+ return getConnectors(clusterName, connect.getName())
+ .collectList().map(e -> e.get(0))
+ // for some reason `getConnectors` method returns the response as a single string
+ .map(this::parseToList)
+ .flatMapMany(Flux::fromIterable)
+ .map(connector -> Pair.of(connect.getName(), connector));
+ }
+
+ @SneakyThrows
+ private List<String> parseToList(String json) {
+ return objectMapper.readValue(json, new TypeReference<>() {
+ });
+ }
+
public Flux<String> getConnectors(String clusterName, String connectName) {
return getConnectAddress(clusterName, connectName)
.flatMapMany(connect ->
@@ -76,6 +116,7 @@ public Mono<Connector> getConnector(String clusterName, String connectName,
var status = connectorStatus.getConnector();
connector.status(kafkaConnectMapper.fromClient(status));
return (Connector) new Connector()
+ .connect(connectName)
.status(kafkaConnectMapper.fromClient(status))
.type(connector.getType())
.tasks(connector.getTasks())
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index c1531a62aed..1f102da2065 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -720,11 +720,33 @@ paths:
items:
$ref: '#/components/schemas/Connect'
+ /api/clusters/{clusterName}/connectors:
+ get:
+ tags:
+ - Kafka Connect
+ summary: get all kafka connectors
+ operationId: getAllConnectors
+ parameters:
+ - name: clusterName
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ 200:
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: '#/components/schemas/FullConnectorInfo'
+
/api/clusters/{clusterName}/connects/{connectName}/connectors:
get:
tags:
- Kafka Connect
- summary: get all connectors from Kafka Connect service
+ summary: get connectors for provided kafka connect instance
operationId: getConnectors
parameters:
- name: clusterName
@@ -1565,13 +1587,17 @@ components:
items:
$ref: '#/components/schemas/TaskId'
type:
- type: string
- enum:
- - source
- - sink
+ $ref: '#/components/schemas/ConnectorType'
status:
$ref: '#/components/schemas/ConnectorStatus'
+ connect:
+ type: string
+ ConnectorType:
+ type: string
+ enum:
+ - SOURCE
+ - SINK
TaskStatus:
type: object
@@ -1579,12 +1605,7 @@ components:
id:
type: integer
state:
- type: string
- enum:
- - RUNNING
- - FAILED
- - PAUSED
- - UNASSIGNED
+ $ref: '#/components/schemas/ConnectorTaskStatus'
worker_id:
type: string
trace:
@@ -1594,15 +1615,18 @@ components:
type: object
properties:
state:
- type: string
- enum:
- - RUNNING
- - FAILED
- - PAUSED
- - UNASSIGNED
+ $ref: '#/components/schemas/ConnectorTaskStatus'
worker_id:
type: string
+ ConnectorTaskStatus:
+ type: string
+ enum:
+ - RUNNING
+ - FAILED
+ - PAUSED
+ - UNASSIGNED
+
ConnectorAction:
type: string
enum:
@@ -1709,3 +1733,25 @@ components:
type: array
items:
$ref: '#/components/schemas/ConnectorPluginConfig'
+
+ FullConnectorInfo:
+ type: object
+ properties:
+ connect:
+ type: string
+ name:
+ type: string
+ connector_class:
+ type: string
+ type:
+ $ref: '#/components/schemas/ConnectorType'
+ topics:
+ type: array
+ items:
+ type: string
+ status:
+ $ref: '#/components/schemas/ConnectorTaskStatus'
+ tasks_count:
+ type: integer
+ failed_tasks_count:
+ type: integer
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConnectServiceTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConnectServiceTests.java
index 1dd47f6c07f..444ca10c7c1 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConnectServiceTests.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConnectServiceTests.java
@@ -9,6 +9,8 @@
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
import com.provectus.kafka.ui.model.ConnectorPluginConfigValue;
import com.provectus.kafka.ui.model.ConnectorStatus;
+import com.provectus.kafka.ui.model.ConnectorTaskStatus;
+import com.provectus.kafka.ui.model.ConnectorType;
import com.provectus.kafka.ui.model.NewConnector;
import com.provectus.kafka.ui.model.TaskId;
import java.util.List;
@@ -100,13 +102,14 @@ public void shouldReturnNotFoundForNonExistingConnectName() {
@Test
public void shouldRetrieveConnector() {
Connector expected = (Connector) new Connector()
+ .connect(connectName)
.status(new ConnectorStatus()
- .state(ConnectorStatus.StateEnum.RUNNING)
+ .state(ConnectorTaskStatus.RUNNING)
.workerId("kafka-connect:8083"))
.tasks(List.of(new TaskId()
.connector(connectorName)
.task(0)))
- .type(Connector.TypeEnum.SINK)
+ .type(ConnectorType.SINK)
.name(connectorName)
.config(config);
webTestClient.get()
| test | train | 2021-04-07T12:21:47 | "2021-04-07T10:18:02Z" | workshur | train |
provectus/kafka-ui/408_430 | provectus/kafka-ui | provectus/kafka-ui/408 | provectus/kafka-ui/430 | [
"connected"
] | 5ae73948a3c37eef1c51528b52a8e3c0ccd5f298 | a427a55c30d1849a00b6f91453979bc1a2eaf01f | [] | [] | "2021-05-07T13:56:07Z" | [
"type/enhancement",
"scope/backend"
] | _schemas topic show as external | Conventionally all internal topics start with underscore "_", that is the only difference between external and internal topics. If this UI want to provide different functionality based on topic being internal or external, then it should follow this convention.
However now only `__consumer_offsets` shows up as internal topic.
If it were to follow this convention then `_schemas` should also be considered internal topic. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
index 7abd4b64dde..889079eaefa 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
@@ -139,7 +139,9 @@ public static InternalTopicConfig mapToInternalTopicConfig(ConfigEntry configEnt
public static InternalTopic mapToInternalTopic(TopicDescription topicDescription) {
var topic = InternalTopic.builder();
- topic.internal(topicDescription.isInternal());
+ topic.internal(
+ topicDescription.isInternal() || topicDescription.name().startsWith("_")
+ );
topic.name(topicDescription.name());
List<InternalPartition> partitions = topicDescription.partitions().stream().map(
| null | train | train | 2021-05-07T15:39:24 | "2021-04-30T02:45:58Z" | akamensky | train |
provectus/kafka-ui/377_477 | provectus/kafka-ui | provectus/kafka-ui/377 | provectus/kafka-ui/477 | [
"timestamp(timedelta=0.0, similarity=0.9392999660925465)",
"connected"
] | f3ca164411ee51f367fb43ecc78fb6aa6665cde7 | 1377e02fcb27fa96b03900afbc0f8d15792aaa5c | [] | [
"\r\n```suggestion\r\n messageKey={key}\r\n```",
"since the key column is sometimes empty, I suggest moving it to the second position. After timestamp. cc: @germanosin ",
"I'm not sure we need to set width for key column",
"messageKey",
"done",
"done",
"done",
"done"
] | "2021-05-19T15:11:31Z" | [
"scope/frontend"
] | The key of the message is not visible / accessible | It would be nice to see the key of a message on the messages list. | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessagesTable.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/MessageItem.spec.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessageItem.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessagesTable.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/fixtures.ts"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessagesTable.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/MessageItem.spec.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessageItem.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessagesTable.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/fixtures.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx
index ba92a70f847..29b63fe2346 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessageItem.tsx
@@ -12,6 +12,7 @@ export interface MessageItemProp {
offset: TopicMessage['offset'];
timestamp: TopicMessage['timestamp'];
content?: TopicMessage['content'];
+ messageKey?: TopicMessage['key'];
}
const MessageItem: React.FC<MessageItemProp> = ({
@@ -19,6 +20,7 @@ const MessageItem: React.FC<MessageItemProp> = ({
offset,
timestamp,
content,
+ messageKey,
}) => {
const { copyToClipboard, saveFile } = useDataSaver(
'topic-message',
@@ -27,6 +29,7 @@ const MessageItem: React.FC<MessageItemProp> = ({
return (
<tr>
<td style={{ width: 200 }}>{format(timestamp, 'yyyy-MM-dd HH:mm:ss')}</td>
+ <td>{messageKey}</td>
<td style={{ width: 150 }}>{offset}</td>
<td style={{ width: 100 }}>{partition}</td>
<td style={{ wordBreak: 'break-word' }}>
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessagesTable.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessagesTable.tsx
index 7152d7eab33..3a487a1ca13 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessagesTable.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/MessagesTable.tsx
@@ -20,6 +20,7 @@ const MessagesTable: React.FC<MessagesTableProp> = ({ messages, onNext }) => {
<thead>
<tr>
<th>Timestamp</th>
+ <th>Key</th>
<th>Offset</th>
<th>Partition</th>
<th>Content</th>
@@ -28,13 +29,14 @@ const MessagesTable: React.FC<MessagesTableProp> = ({ messages, onNext }) => {
</thead>
<tbody>
{messages.map(
- ({ partition, offset, timestamp, content }: TopicMessage) => (
+ ({ partition, offset, timestamp, content, key }: TopicMessage) => (
<MessageItem
key={`message-${timestamp.getTime()}-${offset}`}
partition={partition}
offset={offset}
timestamp={timestamp}
content={content}
+ messageKey={key}
/>
)
)}
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/MessageItem.spec.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/MessageItem.spec.tsx
index 25648dab07d..b723f7517a5 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/MessageItem.spec.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/MessageItem.spec.tsx
@@ -14,7 +14,7 @@ describe('MessageItem', () => {
const wrapper = shallow(<MessageItem {...messages[0]} />);
expect(wrapper.find('tr').length).toEqual(1);
- expect(wrapper.find('td').length).toEqual(5);
+ expect(wrapper.find('td').length).toEqual(6);
expect(wrapper.find('MessageContent').length).toEqual(1);
});
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessageItem.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessageItem.spec.tsx.snap
index 2e81720530a..c1da94e9f80 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessageItem.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessageItem.spec.tsx.snap
@@ -11,6 +11,7 @@ exports[`MessageItem when content is defined matches snapshot 1`] = `
>
mocked date
</td>
+ <td />
<td
style={
Object {
@@ -84,6 +85,7 @@ exports[`MessageItem when content is undefined matches snapshot 1`] = `
>
mocked date
</td>
+ <td />
<td
style={
Object {
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessagesTable.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessagesTable.spec.tsx.snap
index bbf1d6a9e86..6763c4528bb 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessagesTable.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/__snapshots__/MessagesTable.spec.tsx.snap
@@ -10,6 +10,9 @@ exports[`MessagesTable when topic contains messages matches snapshot 1`] = `
<th>
Timestamp
</th>
+ <th>
+ Key
+ </th>
<th>
Offset
</th>
@@ -33,12 +36,14 @@ exports[`MessagesTable when topic contains messages matches snapshot 1`] = `
}
}
key="message-802310400000-2"
+ messageKey="1"
offset={2}
partition={1}
timestamp={1995-06-05T00:00:00.000Z}
/>
<MessageItem
key="message-1596585600000-20"
+ messageKey="1"
offset={20}
partition={2}
timestamp={2020-08-05T00:00:00.000Z}
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/fixtures.ts b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/fixtures.ts
index 7a197be5a4a..cb24a054b69 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/fixtures.ts
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/__test__/fixtures.ts
@@ -9,12 +9,14 @@ export const messages: TopicMessage[] = [
foo: 'bar',
key: 'val',
},
+ key: '1',
},
{
partition: 2,
offset: 20,
timestamp: new Date(Date.UTC(2020, 7, 5)),
content: undefined,
+ key: '1',
},
];
| null | train | train | 2021-05-19T11:59:25 | "2021-04-14T09:50:12Z" | pawelkoston | train |
provectus/kafka-ui/481_494 | provectus/kafka-ui | provectus/kafka-ui/481 | provectus/kafka-ui/494 | [
"connected",
"timestamp(timedelta=0.0, similarity=0.9575203044687753)"
] | 1377e02fcb27fa96b03900afbc0f8d15792aaa5c | acdcf2eab687de9289c7080bac04d9cde9cab5f8 | [
"I just notice that it also appears on \"All topics\" screen, as well as \"Delete Topic\". \r\nLet me know if leaving that in this issue is ok or if I may create another one.\r\n\r\n"
] | [
"pls use context instead",
"useContext",
"done",
"done"
] | "2021-05-24T17:56:30Z" | [
"type/bug",
"scope/frontend"
] | Clear messages should not be present when in readonly mode | **Describe the bug**
I'm currently using the master branch image (Version:v0.0.11-SNAPSHOT(1377e02)) and I configured the readonly mode.
However, when I browse a topic, on "Overview" tab, I can see I can clear messages from a partition.
I'm not sure that should be allowed
**Set up**
`docker run -p 8080:8080 -e KAFKA_CLUSTERS_0_NAME=whatever -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=servers:9092 -e KAFKA_CLUSTERS_0_READONLY=true -d provectuslabs/kafka-ui:master`
**Expected behavior**
When in readonly mode, I shouldn't be able to modify anything.
**Screenshots**

| [
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx"
] | [
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
index ca92c22f5a6..07074be71fa 100644
--- a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
@@ -9,6 +9,7 @@ import {
import DropdownItem from 'components/common/Dropdown/DropdownItem';
import Dropdown from 'components/common/Dropdown/Dropdown';
import ConfirmationModal from 'components/common/ConfirmationModal/ConfirmationModal';
+import ClusterContext from 'components/contexts/ClusterContext';
export interface ListItemProps {
topic: TopicWithDetailedInfo;
@@ -23,6 +24,8 @@ const ListItem: React.FC<ListItemProps> = ({
clusterName,
clearTopicMessages,
}) => {
+ const { isReadOnly } = React.useContext(ClusterContext);
+
const [isDeleteTopicConfirmationVisible, setDeleteTopicConfirmationVisible] =
React.useState(false);
@@ -65,7 +68,7 @@ const ListItem: React.FC<ListItemProps> = ({
</div>
</td>
<td className="topic-action-block">
- {!internal ? (
+ {!internal && !isReadOnly ? (
<>
<div className="has-text-right">
<Dropdown
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
index 52811ac353e..d9deb345dce 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
@@ -5,6 +5,7 @@ import Dropdown from 'components/common/Dropdown/Dropdown';
import DropdownItem from 'components/common/Dropdown/DropdownItem';
import MetricsWrapper from 'components/common/Dashboard/MetricsWrapper';
import Indicator from 'components/common/Dashboard/Indicator';
+import ClusterContext from 'components/contexts/ClusterContext';
import BytesFormatted from 'components/common/BytesFormatted/BytesFormatted';
interface Props extends Topic, TopicDetails {
@@ -30,76 +31,82 @@ const Overview: React.FC<Props> = ({
clusterName,
topicName,
clearTopicMessages,
-}) => (
- <>
- <MetricsWrapper>
- <Indicator label="Partitions">{partitionCount}</Indicator>
- <Indicator label="Replication Factor">{replicationFactor}</Indicator>
- <Indicator label="URP" title="Under replicated partitions">
- {underReplicatedPartitions}
- </Indicator>
- <Indicator label="In sync replicas">
- {inSyncReplicas}
- <span className="subtitle has-text-weight-light">
- {' '}
- of
- {replicas}
- </span>
- </Indicator>
- <Indicator label="Type">
- <span className={`tag ${internal ? 'is-light' : 'is-primary'}`}>
- {internal ? 'Internal' : 'External'}
- </span>
- </Indicator>
- <Indicator label="Segment Size" title="">
- <BytesFormatted value={segmentSize} />
- </Indicator>
- <Indicator label="Segment count">{segmentCount}</Indicator>
- </MetricsWrapper>
- <div className="box">
- <table className="table is-striped is-fullwidth">
- <thead>
- <tr>
- <th>Partition ID</th>
- <th>Broker leader</th>
- <th>Min offset</th>
- <th>Max offset</th>
- <th> </th>
- </tr>
- </thead>
- <tbody>
- {partitions?.map(({ partition, leader, offsetMin, offsetMax }) => (
- <tr key={`partition-list-item-key-${partition}`}>
- <td>{partition}</td>
- <td>{leader}</td>
- <td>{offsetMin}</td>
- <td>{offsetMax}</td>
- <td className="has-text-right">
- {!internal ? (
- <Dropdown
- label={
- <span className="icon">
- <i className="fas fa-cog" />
- </span>
- }
- right
- >
- <DropdownItem
- onClick={() =>
- clearTopicMessages(clusterName, topicName, [partition])
+}) => {
+ const { isReadOnly } = React.useContext(ClusterContext);
+
+ return (
+ <>
+ <MetricsWrapper>
+ <Indicator label="Partitions">{partitionCount}</Indicator>
+ <Indicator label="Replication Factor">{replicationFactor}</Indicator>
+ <Indicator label="URP" title="Under replicated partitions">
+ {underReplicatedPartitions}
+ </Indicator>
+ <Indicator label="In sync replicas">
+ {inSyncReplicas}
+ <span className="subtitle has-text-weight-light">
+ {' '}
+ of
+ {replicas}
+ </span>
+ </Indicator>
+ <Indicator label="Type">
+ <span className={`tag ${internal ? 'is-light' : 'is-primary'}`}>
+ {internal ? 'Internal' : 'External'}
+ </span>
+ </Indicator>
+ <Indicator label="Segment Size" title="">
+ <BytesFormatted value={segmentSize} />
+ </Indicator>
+ <Indicator label="Segment count">{segmentCount}</Indicator>
+ </MetricsWrapper>
+ <div className="box">
+ <table className="table is-striped is-fullwidth">
+ <thead>
+ <tr>
+ <th>Partition ID</th>
+ <th>Broker leader</th>
+ <th>Min offset</th>
+ <th>Max offset</th>
+ <th> </th>
+ </tr>
+ </thead>
+ <tbody>
+ {partitions?.map(({ partition, leader, offsetMin, offsetMax }) => (
+ <tr key={`partition-list-item-key-${partition}`}>
+ <td>{partition}</td>
+ <td>{leader}</td>
+ <td>{offsetMin}</td>
+ <td>{offsetMax}</td>
+ <td className="has-text-right">
+ {!internal && !isReadOnly ? (
+ <Dropdown
+ label={
+ <span className="icon">
+ <i className="fas fa-cog" />
+ </span>
}
+ right
>
- <span className="has-text-danger">Clear Messages</span>
- </DropdownItem>
- </Dropdown>
- ) : null}
- </td>
- </tr>
- ))}
- </tbody>
- </table>
- </div>
- </>
-);
+ <DropdownItem
+ onClick={() =>
+ clearTopicMessages(clusterName, topicName, [
+ partition,
+ ])
+ }
+ >
+ <span className="has-text-danger">Clear Messages</span>
+ </DropdownItem>
+ </Dropdown>
+ ) : null}
+ </td>
+ </tr>
+ ))}
+ </tbody>
+ </table>
+ </div>
+ </>
+ );
+};
export default Overview;
| null | train | train | 2021-05-20T11:56:02 | "2021-05-21T07:47:08Z" | giom-l | train |
provectus/kafka-ui/503_504 | provectus/kafka-ui | provectus/kafka-ui/503 | provectus/kafka-ui/504 | [
"timestamp(timedelta=40.0, similarity=0.8853908123904989)",
"connected"
] | 30ceb98491702d982baaf9e88bd354ce4dc184ee | 721cf0e750d1f9d3108fa1e503cc72e466ec5df9 | [] | [] | "2021-05-30T16:19:46Z" | [
"type/bug",
"type/enhancement"
] | Kafka Connect: 500 error/NPE and not valid list of connector's topics when getting all connector info | **Describe the bug**
(A clear and concise description of what the bug is.)
* When requesting `GET /api/clusters/{clusterName}/connectors` there is 500 error code due to NullPointerException.
* As far as I reviewed the source code, it's realized we get the connector's topics incorrectly.
Actually, we get a list of topics from a connector's configuration which is not correct. We need to get the topics list only used by the connector [from this API](https://docs.confluent.io/platform/current/connect/references/restapi.html#get--connectors-(string-name)-topics)
**Set up**
(How do you run the app?)
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Run any source connector, e.g. [Debezium MySQL](https://debezium.io/documentation/reference/connectors/mysql.html)
2. Go to Kafka Connect page in the app
**Expected behavior**
(A clear and concise description of what you expected to happen)
A list of topics should be those topics that are used by a connector.
**Screenshots**
(If applicable, add screenshots to help explain your problem)
**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/connect/InternalConnectInfo.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java
index b1bcd44d8c3..968b27286f7 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java
@@ -11,11 +11,8 @@
import com.provectus.kafka.ui.model.FullConnectorInfo;
import com.provectus.kafka.ui.model.Task;
import com.provectus.kafka.ui.model.TaskStatus;
-import java.util.Arrays;
+import com.provectus.kafka.ui.model.connect.InternalConnectInfo;
import java.util.List;
-import java.util.Map;
-import java.util.function.Function;
-import org.apache.commons.lang3.tuple.Triple;
import org.mapstruct.Mapper;
@Mapper(componentModel = "spring")
@@ -36,32 +33,22 @@ ConnectorPluginConfigValidationResponse fromClient(
com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse
connectorPluginConfigValidationResponse);
- default FullConnectorInfo fullConnectorInfoFromTuple(Triple<Connector, Map<String, Object>,
- List<Task>> triple) {
- Function<Map<String, Object>, List<String>> getTopicsFromConfig = config -> {
- var topic = config.get("topic");
- if (topic != null) {
- return List.of((String) topic);
- }
- return Arrays.asList(((String) config.get("topics")).split(","));
- };
-
+ default FullConnectorInfo fullConnectorInfoFromTuple(InternalConnectInfo connectInfo) {
+ Connector connector = connectInfo.getConnector();
+ List<Task> tasks = connectInfo.getTasks();
+ int failedTasksCount = (int) tasks.stream()
+ .map(Task::getStatus)
+ .map(TaskStatus::getState)
+ .filter(ConnectorTaskStatus.FAILED::equals)
+ .count();
return new FullConnectorInfo()
- .connect(triple.getLeft().getConnect())
- .name(triple.getLeft().getName())
- .connectorClass((String) triple.getMiddle().get("connector.class"))
- .type(triple.getLeft().getType())
- .topics(getTopicsFromConfig.apply(triple.getMiddle()))
- .status(
- triple.getLeft().getStatus()
- )
- .tasksCount(triple.getRight().size())
- .failedTasksCount((int) triple.getRight().stream()
- .map(Task::getStatus)
- .map(TaskStatus::getState)
- .filter(ConnectorTaskStatus.FAILED::equals)
- .count());
+ .connect(connector.getConnect())
+ .name(connector.getName())
+ .connectorClass((String) connectInfo.getConfig().get("connector.class"))
+ .type(connector.getType())
+ .topics(connectInfo.getTopics())
+ .status(connector.getStatus())
+ .tasksCount(tasks.size())
+ .failedTasksCount(failedTasksCount);
}
-
- ;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/connect/InternalConnectInfo.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/connect/InternalConnectInfo.java
new file mode 100644
index 00000000000..4c177c628d6
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/connect/InternalConnectInfo.java
@@ -0,0 +1,17 @@
+package com.provectus.kafka.ui.model.connect;
+
+import com.provectus.kafka.ui.model.Connector;
+import com.provectus.kafka.ui.model.Task;
+import java.util.List;
+import java.util.Map;
+import lombok.Builder;
+import lombok.Data;
+
+@Data
+@Builder(toBuilder = true)
+public class InternalConnectInfo {
+ private final Connector connector;
+ private final Map<String, Object> config;
+ private final List<Task> tasks;
+ private final List<String> topics;
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
index cb381edb468..bea3dbb98b0 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
@@ -3,6 +3,7 @@
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.provectus.kafka.ui.client.KafkaConnectClients;
+import com.provectus.kafka.ui.connect.model.ConnectorTopics;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.ConnectNotFoundException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
@@ -17,6 +18,7 @@
import com.provectus.kafka.ui.model.KafkaConnectCluster;
import com.provectus.kafka.ui.model.NewConnector;
import com.provectus.kafka.ui.model.Task;
+import com.provectus.kafka.ui.model.connect.InternalConnectInfo;
import java.util.Collection;
import java.util.List;
import java.util.Map;
@@ -26,7 +28,6 @@
import lombok.SneakyThrows;
import lombok.extern.log4j.Log4j2;
import org.apache.commons.lang3.tuple.Pair;
-import org.apache.commons.lang3.tuple.Triple;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@@ -58,16 +59,47 @@ public Flux<FullConnectorInfo> getAllConnectors(String clusterName) {
.flatMap(pair -> getConnector(clusterName, pair.getLeft(), pair.getRight()))
.flatMap(connector ->
getConnectorConfig(clusterName, connector.getConnect(), connector.getName())
- .map(config -> Pair.of(connector, config))
- )
- .flatMap(pair ->
- getConnectorTasks(clusterName, pair.getLeft().getConnect(), pair.getLeft().getName())
- .collectList()
- .map(tasks -> Triple.of(pair.getLeft(), pair.getRight(), tasks))
+ .map(config -> InternalConnectInfo.builder()
+ .connector(connector)
+ .config(config)
+ .build()
+ )
)
+ .flatMap(connectInfo -> {
+ Connector connector = connectInfo.getConnector();
+ return getConnectorTasks(clusterName, connector.getConnect(), connector.getName())
+ .collectList()
+ .map(tasks -> InternalConnectInfo.builder()
+ .connector(connector)
+ .config(connectInfo.getConfig())
+ .tasks(tasks)
+ .build()
+ );
+ })
+ .flatMap(connectInfo -> {
+ Connector connector = connectInfo.getConnector();
+ return getConnectorTopics(clusterName, connector.getConnect(), connector.getName())
+ .map(ct -> InternalConnectInfo.builder()
+ .connector(connector)
+ .config(connectInfo.getConfig())
+ .tasks(connectInfo.getTasks())
+ .topics(ct.getTopics())
+ .build()
+ );
+ })
.map(kafkaConnectMapper::fullConnectorInfoFromTuple);
}
+ private Mono<ConnectorTopics> getConnectorTopics(String clusterName, String connectClusterName,
+ String connectorName) {
+ return getConnectAddress(clusterName, connectClusterName)
+ .flatMap(connectUrl -> KafkaConnectClients
+ .withBaseUrl(connectUrl)
+ .getConnectorTopics(connectorName)
+ .map(result -> result.get(connectorName))
+ );
+ }
+
private Flux<Pair<String, String>> getConnectorNames(String clusterName, Connect connect) {
return getConnectors(clusterName, connect.getName())
.collectList().map(e -> e.get(0))
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
index 237da9abb20..e6e1b6ced6f 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
@@ -231,6 +231,28 @@ paths:
items:
$ref: '#/components/schemas/ConnectorTask'
+ /connectors/{connectorName}/topics:
+ get:
+ tags:
+ - KafkaConnectClient
+ summary: The set of topic names the connector has been using since its creation or since the last time its set of active topics was reset
+ operationId: getConnectorTopics
+ parameters:
+ - name: connectorName
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ 200:
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/ConnectorTopics'
+
/connectors/{connectorName}/tasks/{taskId}/status:
get:
tags:
@@ -499,3 +521,11 @@ components:
items:
$ref: '#/components/schemas/ConnectorPluginConfig'
+ ConnectorTopics:
+ type: object
+ properties:
+ topics:
+ type: array
+ items:
+ type: string
+
| null | train | train | 2021-05-25T11:53:17 | "2021-05-30T16:12:17Z" | IldarAlmakaev | train |
provectus/kafka-ui/518_523 | provectus/kafka-ui | provectus/kafka-ui/518 | provectus/kafka-ui/523 | [
"keyword_pr_to_issue",
"connected"
] | fcc703ddd60cc5c2cef8a5e338f832244a6d0e9c | a1ee6a30c05ac6fcf3bfa3e126d438a55376b0f0 | [] | [] | "2021-06-03T16:25:12Z" | [
"type/bug"
] | Default pagination for backend and frontend are different. | **Describe the bug**
Default pagination for backend is 20:
```java
private static final Integer DEFAULT_PAGE_SIZE = 20;
```
Default pagination for fronted is 25:
```ts
export const PER_PAGE = 25;
```
This result in this behavior:
1. When you first open the topics list in the browser you get 20 topics (1…20)
2. When you click the “Next page” you get 25 topics from 25 to 50
3. If you want to see topics 21...25 you need to click “Previous” to see them
4. If you happen to have 21...25 topics the next page is empty
**Set up**
docker master image
**Expected behavior**
Default pagination for backend and frontend should be same.
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
index 63dd249200b..26285840461 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
@@ -43,7 +43,7 @@
@Service
@RequiredArgsConstructor
public class ClusterService {
- private static final Integer DEFAULT_PAGE_SIZE = 20;
+ private static final Integer DEFAULT_PAGE_SIZE = 25;
private final ClustersStorage clustersStorage;
private final ClusterMapper clusterMapper;
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java
index 76e3e79944d..4f27026d2da 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java
@@ -33,7 +33,7 @@ class ClusterServiceTest {
private ClustersStorage clustersStorage;
@Test
- public void shouldListFirst20Topics() {
+ public void shouldListFirst25Topics() {
var topicName = UUID.randomUUID().toString();
when(clustersStorage.getClusterByName(topicName))
@@ -51,8 +51,8 @@ public void shouldListFirst20Topics() {
var topics = clusterService.getTopics(topicName,
Optional.empty(), Optional.empty(), Optional.empty(),
Optional.empty(), Optional.empty());
- assertThat(topics.getPageCount()).isEqualTo(5);
- assertThat(topics.getTopics()).hasSize(20);
+ assertThat(topics.getPageCount()).isEqualTo(4);
+ assertThat(topics.getTopics()).hasSize(25);
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
}
@@ -97,8 +97,8 @@ public void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() {
var topics = clusterService.getTopics(topicName, Optional.of(0), Optional.of(-1),
Optional.empty(), Optional.empty(), Optional.empty());
- assertThat(topics.getPageCount()).isEqualTo(5);
- assertThat(topics.getTopics()).hasSize(20);
+ assertThat(topics.getPageCount()).isEqualTo(4);
+ assertThat(topics.getTopics()).hasSize(25);
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
}
@@ -122,8 +122,8 @@ public void shouldListBotInternalAndNonInternalTopics() {
var topics = clusterService.getTopics(topicName,
Optional.empty(), Optional.empty(), Optional.of(true),
Optional.empty(), Optional.empty());
- assertThat(topics.getPageCount()).isEqualTo(5);
- assertThat(topics.getTopics()).hasSize(20);
+ assertThat(topics.getPageCount()).isEqualTo(4);
+ assertThat(topics.getTopics()).hasSize(25);
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
}
@@ -148,8 +148,8 @@ public void shouldListOnlyNonInternalTopics() {
var topics = clusterService.getTopics(topicName,
Optional.empty(), Optional.empty(), Optional.of(true),
Optional.empty(), Optional.empty());
- assertThat(topics.getPageCount()).isEqualTo(5);
- assertThat(topics.getTopics()).hasSize(20);
+ assertThat(topics.getPageCount()).isEqualTo(4);
+ assertThat(topics.getTopics()).hasSize(25);
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
}
@@ -198,8 +198,8 @@ public void shouldListTopicsOrderedByPartitionsCount() {
var topics = clusterService.getTopics(topicName,
Optional.empty(), Optional.empty(), Optional.empty(),
Optional.empty(), Optional.of(TopicColumnsToSort.TOTAL_PARTITIONS));
- assertThat(topics.getPageCount()).isEqualTo(5);
- assertThat(topics.getTopics()).hasSize(20);
+ assertThat(topics.getPageCount()).isEqualTo(4);
+ assertThat(topics.getTopics()).hasSize(25);
assertThat(topics.getTopics()).map(Topic::getPartitionCount).isSorted();
}
}
| test | train | 2021-06-03T14:46:13 | "2021-06-03T07:59:28Z" | serdula | train |
provectus/kafka-ui/543_545 | provectus/kafka-ui | provectus/kafka-ui/543 | provectus/kafka-ui/545 | [
"keyword_pr_to_issue",
"timestamp(timedelta=0.0, similarity=0.8679259280346119)"
] | 804e3b96fc317c7e1ec52759aa1f1beed388b7a2 | db090a3985f2b6ccc033fda355bc5d9527630bb4 | [] | [
"Let's split this line into multiple",
"Should we name it OAuth0 example? Because for others there is no need to pass certs",
"done, looks better",
"I added a command line example without a self-signed certificate and SSL configuration, so we can leave it as-is"
] | "2021-06-10T13:09:18Z" | [
"type/documentation",
"type/enhancement"
] | Instructions how to configure SSO (OIDC/SAML) | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
Please provide documentation on how to configure UI for Apache Kafka with any SSO provider (Google/GitHub/etc)
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Any document in repository with this instruction is appreciate
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
No alternatives =)
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
No context =) | [
"README.md"
] | [
"README.md",
"guides/SSO.md"
] | [] | diff --git a/README.md b/README.md
index 57670f4106c..2853b43ef68 100644
--- a/README.md
+++ b/README.md
@@ -108,7 +108,7 @@ To read more please follow to [chart documentation](charts/kafka-ui/README.md)
# Guides
-To be done
+- [SSO configuration](guides/SSO.md)
## Connecting to a Secure Broker
diff --git a/guides/SSO.md b/guides/SSO.md
new file mode 100644
index 00000000000..2cb2e7fc603
--- /dev/null
+++ b/guides/SSO.md
@@ -0,0 +1,48 @@
+# How to configure SSO
+SSO require additionaly to configure TLS for application, in that example we will use self-signed certificate, in case of use legal certificates please skip step 1.
+#### Step 1
+At this step we will generate self-signed PKCS12 keypair.
+``` bash
+mkdir cert
+keytool -genkeypair -alias ui-for-apache-kafka -keyalg RSA -keysize 2048 \
+ -storetype PKCS12 -keystore cert/ui-for-apache-kafka.p12 -validity 3650
+```
+#### Step 2
+Create new application in any SSO provider, we will continue with [Auth0](https://auth0.com).
+
+<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-new-app.png" width="70%"/>
+
+After that need to provide callback URLs, in our case we will use `https://127.0.0.1:8080/login/oauth2/code/auth0`
+
+<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-configuration.png" width="70%"/>
+
+This is a main parameters required for enabling SSO
+
+<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-parameters.png" width="70%"/>
+
+#### Step 3
+To launch UI for Apache Kafka with enabled TLS and SSO run following:
+``` bash
+docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_ENABLED=true \
+ -e SECURITY_BASIC_ENABLED=true \
+ -e SERVER_SSL_KEY_STORE_TYPE=PKCS12 \
+ -e SERVER_SSL_KEY_STORE=/opt/cert/ui-for-apache-kafka.p12 \
+ -e SERVER_SSL_KEY_STORE_PASSWORD=123456 \
+ -e SERVER_SSL_KEY_ALIAS=ui-for-apache-kafka \
+ -e SERVER_SSL_ENABLED=true \
+ -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
+ -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
+ -e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
+ -e TRUST_STORE=/opt/cert/ui-for-apache-kafka.p12 \
+ -e TRUST_STORE_PASSWORD=123456 \
+provectuslabs/kafka-ui:0.1.0
+```
+In the case with trusted CA-signed SSL certificate and SSL termination somewhere outside of application we can pass only SSO related environment variables:
+``` bash
+docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_ENABLED=true \
+ -e SECURITY_BASIC_ENABLED=true \
+ -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
+ -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
+ -e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
+provectuslabs/kafka-ui:0.1.0
+```
| null | test | train | 2021-06-10T11:05:43 | "2021-06-09T13:14:45Z" | RustamGimadiev | train |
provectus/kafka-ui/122_562 | provectus/kafka-ui | provectus/kafka-ui/122 | provectus/kafka-ui/562 | [
"connected"
] | 2cb9b30090008a83e6d4ee5769b75c1bacb9402c | d3bf65cfb6c7929d874c5cf70e05582b2fc48b25 | [
"Current state of this task:\r\nNeed to figure out a way to read a set of topic messages from newer ones to older ones on BE, and then update existing FE request to properly work with new API.",
"Waiting fronted..."
] | [] | "2021-06-17T19:36:34Z" | [
"type/enhancement",
"scope/backend",
"scope/frontend"
] | Topic messages: Newest first | Change sorting of topic messages - sort by offset in descending order (latest messages first). | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekBackward.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekForward.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java",
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/OffsetsSeekTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
index 5dcb94a9892..a401b8268ef 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
@@ -2,6 +2,7 @@
import com.provectus.kafka.ui.api.MessagesApi;
import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekDirection;
import com.provectus.kafka.ui.model.SeekType;
import com.provectus.kafka.ui.model.TopicMessage;
import com.provectus.kafka.ui.service.ClusterService;
@@ -40,13 +41,15 @@ public Mono<ResponseEntity<Void>> deleteTopicMessages(
@Override
public Mono<ResponseEntity<Flux<TopicMessage>>> getTopicMessages(
String clusterName, String topicName, @Valid SeekType seekType, @Valid List<String> seekTo,
- @Valid Integer limit, @Valid String q, ServerWebExchange exchange) {
- return parseConsumerPosition(seekType, seekTo)
+ @Valid Integer limit, @Valid String q, @Valid SeekDirection seekDirection,
+ ServerWebExchange exchange) {
+ return parseConsumerPosition(seekType, seekTo, seekDirection)
.map(consumerPosition -> ResponseEntity
.ok(clusterService.getMessages(clusterName, topicName, consumerPosition, q, limit)));
}
- private Mono<ConsumerPosition> parseConsumerPosition(SeekType seekType, List<String> seekTo) {
+ private Mono<ConsumerPosition> parseConsumerPosition(
+ SeekType seekType, List<String> seekTo, SeekDirection seekDirection) {
return Mono.justOrEmpty(seekTo)
.defaultIfEmpty(Collections.emptyList())
.flatMapIterable(Function.identity())
@@ -61,7 +64,7 @@ private Mono<ConsumerPosition> parseConsumerPosition(SeekType seekType, List<Str
})
.collectMap(Pair::getKey, Pair::getValue)
.map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING,
- positions));
+ positions, seekDirection));
}
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java
index 0f947be7fc8..a867002b344 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java
@@ -8,5 +8,5 @@ public class ConsumerPosition {
private SeekType seekType;
private Map<Integer, Long> seekTo;
-
+ private SeekDirection seekDirection;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
index 310292bc6ee..fceb80aa9ef 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
@@ -6,12 +6,14 @@
import com.provectus.kafka.ui.deserialization.RecordDeserializer;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.SeekType;
+import com.provectus.kafka.ui.model.SeekDirection;
import com.provectus.kafka.ui.model.TopicMessage;
import com.provectus.kafka.ui.util.ClusterUtil;
+import com.provectus.kafka.ui.util.OffsetsSeek;
+import com.provectus.kafka.ui.util.OffsetsSeekBackward;
+import com.provectus.kafka.ui.util.OffsetsSeekForward;
import java.time.Duration;
import java.util.Collection;
-import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -53,7 +55,10 @@ public Flux<TopicMessage> loadMessages(KafkaCluster cluster, String topic,
.orElse(DEFAULT_RECORD_LIMIT);
RecordEmitter emitter = new RecordEmitter(
() -> kafkaService.createConsumer(cluster),
- new OffsetsSeek(topic, consumerPosition));
+ consumerPosition.getSeekDirection().equals(SeekDirection.FORWARD)
+ ? new OffsetsSeekForward(topic, consumerPosition)
+ : new OffsetsSeekBackward(topic, consumerPosition, recordsLimit)
+ );
RecordDeserializer recordDeserializer =
deserializationService.getRecordDeserializerForCluster(cluster);
return Flux.create(emitter)
@@ -79,7 +84,7 @@ public Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster,
* returns end offsets for partitions where start offset != end offsets.
* This is useful when we need to verify that partition is not empty.
*/
- private static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
+ public static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
String topicName,
Collection<Integer>
partitionsToInclude) {
@@ -159,98 +164,4 @@ public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
}
}
- @RequiredArgsConstructor
- static class OffsetsSeek {
-
- private final String topic;
- private final ConsumerPosition consumerPosition;
-
- public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
- SeekType seekType = consumerPosition.getSeekType();
- log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
- switch (seekType) {
- case OFFSET:
- assignAndSeekForOffset(consumer);
- break;
- case TIMESTAMP:
- assignAndSeekForTimestamp(consumer);
- break;
- case BEGINNING:
- assignAndSeekFromBeginning(consumer);
- break;
- default:
- throw new IllegalArgumentException("Unknown seekType: " + seekType);
- }
- log.info("Assignment: {}", consumer.assignment());
- return new WaitingOffsets(topic, consumer);
- }
-
- private List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
- Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
- return consumer.partitionsFor(topic).stream()
- .filter(
- p -> partitionPositions.isEmpty() || partitionPositions.containsKey(p.partition()))
- .map(p -> new TopicPartition(p.topic(), p.partition()))
- .collect(Collectors.toList());
- }
-
- private void assignAndSeekForOffset(Consumer<Bytes, Bytes> consumer) {
- List<TopicPartition> partitions = getRequestedPartitions(consumer);
- consumer.assign(partitions);
- consumerPosition.getSeekTo().forEach((partition, offset) -> {
- TopicPartition topicPartition = new TopicPartition(topic, partition);
- consumer.seek(topicPartition, offset);
- });
- }
-
- private void assignAndSeekForTimestamp(Consumer<Bytes, Bytes> consumer) {
- Map<TopicPartition, Long> timestampsToSearch =
- consumerPosition.getSeekTo().entrySet().stream()
- .collect(Collectors.toMap(
- partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
- Map.Entry::getValue
- ));
- Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
- .entrySet().stream()
- .filter(e -> e.getValue() != null)
- .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
-
- if (offsetsForTimestamps.isEmpty()) {
- throw new IllegalArgumentException("No offsets were found for requested timestamps");
- }
-
- consumer.assign(offsetsForTimestamps.keySet());
- offsetsForTimestamps.forEach(consumer::seek);
- }
-
- private void assignAndSeekFromBeginning(Consumer<Bytes, Bytes> consumer) {
- List<TopicPartition> partitions = getRequestedPartitions(consumer);
- consumer.assign(partitions);
- consumer.seekToBeginning(partitions);
- }
-
- static class WaitingOffsets {
- final Map<Integer, Long> offsets = new HashMap<>(); // partition number -> offset
-
- WaitingOffsets(String topic, Consumer<?, ?> consumer) {
- var partitions = consumer.assignment().stream()
- .map(TopicPartition::partition)
- .collect(Collectors.toList());
- significantOffsets(consumer, topic, partitions)
- .forEach((tp, offset) -> offsets.put(tp.partition(), offset - 1));
- }
-
- void markPolled(ConsumerRecord<?, ?> rec) {
- Long waiting = offsets.get(rec.partition());
- if (waiting != null && waiting <= rec.offset()) {
- offsets.remove(rec.partition());
- }
- }
-
- boolean endReached() {
- return offsets.isEmpty();
- }
- }
-
- }
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
new file mode 100644
index 00000000000..9d082c67bc8
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
@@ -0,0 +1,84 @@
+package com.provectus.kafka.ui.util;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekType;
+import com.provectus.kafka.ui.service.ConsumingService;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+
+@Log4j2
+public abstract class OffsetsSeek {
+ protected final String topic;
+ protected final ConsumerPosition consumerPosition;
+
+ public OffsetsSeek(String topic, ConsumerPosition consumerPosition) {
+ this.topic = topic;
+ this.consumerPosition = consumerPosition;
+ }
+
+ public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
+ SeekType seekType = consumerPosition.getSeekType();
+ log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
+ switch (seekType) {
+ case OFFSET:
+ assignAndSeekForOffset(consumer);
+ break;
+ case TIMESTAMP:
+ assignAndSeekForTimestamp(consumer);
+ break;
+ case BEGINNING:
+ assignAndSeekFromBeginning(consumer);
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown seekType: " + seekType);
+ }
+ log.info("Assignment: {}", consumer.assignment());
+ return new WaitingOffsets(topic, consumer);
+ }
+
+ protected List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
+ Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
+ return consumer.partitionsFor(topic).stream()
+ .filter(
+ p -> partitionPositions.isEmpty() || partitionPositions.containsKey(p.partition()))
+ .map(p -> new TopicPartition(p.topic(), p.partition()))
+ .collect(Collectors.toList());
+ }
+
+
+ protected abstract void assignAndSeekFromBeginning(Consumer<Bytes, Bytes> consumer);
+
+ protected abstract void assignAndSeekForTimestamp(Consumer<Bytes, Bytes> consumer);
+
+ protected abstract void assignAndSeekForOffset(Consumer<Bytes, Bytes> consumer);
+
+ public static class WaitingOffsets {
+ final Map<Integer, Long> offsets = new HashMap<>(); // partition number -> offset
+
+ public WaitingOffsets(String topic, Consumer<?, ?> consumer) {
+ var partitions = consumer.assignment().stream()
+ .map(TopicPartition::partition)
+ .collect(Collectors.toList());
+ ConsumingService.significantOffsets(consumer, topic, partitions)
+ .forEach((tp, offset) -> offsets.put(tp.partition(), offset - 1));
+ }
+
+ public void markPolled(ConsumerRecord<?, ?> rec) {
+ Long waiting = offsets.get(rec.partition());
+ if (waiting != null && waiting <= rec.offset()) {
+ offsets.remove(rec.partition());
+ }
+ }
+
+ public boolean endReached() {
+ return offsets.isEmpty();
+ }
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekBackward.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekBackward.java
new file mode 100644
index 00000000000..7500ab0bbb3
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekBackward.java
@@ -0,0 +1,121 @@
+package com.provectus.kafka.ui.util;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+@Log4j2
+public class OffsetsSeekBackward extends OffsetsSeek {
+
+ private final int maxMessages;
+
+ public OffsetsSeekBackward(String topic,
+ ConsumerPosition consumerPosition, int maxMessages) {
+ super(topic, consumerPosition);
+ this.maxMessages = maxMessages;
+ }
+
+
+ protected void assignAndSeekForOffset(Consumer<Bytes, Bytes> consumer) {
+ List<TopicPartition> partitions = getRequestedPartitions(consumer);
+ consumer.assign(partitions);
+ final Map<TopicPartition, Long> offsets =
+ findOffsetsInt(consumer, consumerPosition.getSeekTo());
+ offsets.forEach(consumer::seek);
+ }
+
+ protected void assignAndSeekFromBeginning(Consumer<Bytes, Bytes> consumer) {
+ List<TopicPartition> partitions = getRequestedPartitions(consumer);
+ consumer.assign(partitions);
+ final Map<TopicPartition, Long> offsets = findOffsets(consumer, Map.of());
+ offsets.forEach(consumer::seek);
+ }
+
+ protected void assignAndSeekForTimestamp(Consumer<Bytes, Bytes> consumer) {
+ Map<TopicPartition, Long> timestampsToSearch =
+ consumerPosition.getSeekTo().entrySet().stream()
+ .collect(Collectors.toMap(
+ partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
+ e -> e.getValue() + 1
+ ));
+ Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
+ .entrySet().stream()
+ .filter(e -> e.getValue() != null)
+ .map(v -> Tuples.of(v.getKey(), v.getValue().offset() - 1))
+ .collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
+
+ if (offsetsForTimestamps.isEmpty()) {
+ throw new IllegalArgumentException("No offsets were found for requested timestamps");
+ }
+
+ consumer.assign(offsetsForTimestamps.keySet());
+ final Map<TopicPartition, Long> offsets = findOffsets(consumer, offsetsForTimestamps);
+ offsets.forEach(consumer::seek);
+ }
+
+ protected Map<TopicPartition, Long> findOffsetsInt(
+ Consumer<Bytes, Bytes> consumer, Map<Integer, Long> seekTo) {
+
+ final Map<TopicPartition, Long> seekMap = seekTo.entrySet()
+ .stream().map(p ->
+ Tuples.of(
+ new TopicPartition(topic, p.getKey()),
+ p.getValue()
+ )
+ ).collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
+
+ return findOffsets(consumer, seekMap);
+ }
+
+ protected Map<TopicPartition, Long> findOffsets(
+ Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo) {
+
+ List<TopicPartition> partitions = getRequestedPartitions(consumer);
+ final Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
+ final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
+
+ final Map<TopicPartition, Long> seekMap = new HashMap<>(seekTo);
+ int awaitingMessages = maxMessages;
+
+ Set<TopicPartition> waiting = new HashSet<>(partitions);
+
+ while (awaitingMessages > 0 && !waiting.isEmpty()) {
+ final int msgsPerPartition = (int) Math.ceil((double) awaitingMessages / partitions.size());
+ for (TopicPartition partition : partitions) {
+ final Long offset = Optional.ofNullable(seekMap.get(partition))
+ .orElseGet(() -> endOffsets.get(partition));
+ final Long beginning = beginningOffsets.get(partition);
+
+ if (offset - beginning > msgsPerPartition) {
+ seekMap.put(partition, offset - msgsPerPartition);
+ awaitingMessages -= msgsPerPartition;
+ } else {
+ final long num = offset - beginning;
+ if (num > 0) {
+ seekMap.put(partition, offset - num);
+ awaitingMessages -= num;
+ } else {
+ waiting.remove(partition);
+ }
+ }
+
+ if (awaitingMessages <= 0) {
+ break;
+ }
+ }
+ }
+
+ return seekMap;
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekForward.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekForward.java
new file mode 100644
index 00000000000..0a31cc7753a
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekForward.java
@@ -0,0 +1,59 @@
+package com.provectus.kafka.ui.util;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekType;
+import com.provectus.kafka.ui.service.ConsumingService;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+
+@Log4j2
+public class OffsetsSeekForward extends OffsetsSeek {
+
+ public OffsetsSeekForward(String topic, ConsumerPosition consumerPosition) {
+ super(topic, consumerPosition);
+ }
+
+ protected void assignAndSeekForOffset(Consumer<Bytes, Bytes> consumer) {
+ List<TopicPartition> partitions = getRequestedPartitions(consumer);
+ consumer.assign(partitions);
+ consumerPosition.getSeekTo().forEach((partition, offset) -> {
+ TopicPartition topicPartition = new TopicPartition(topic, partition);
+ consumer.seek(topicPartition, offset);
+ });
+ }
+
+ protected void assignAndSeekForTimestamp(Consumer<Bytes, Bytes> consumer) {
+ Map<TopicPartition, Long> timestampsToSearch =
+ consumerPosition.getSeekTo().entrySet().stream()
+ .collect(Collectors.toMap(
+ partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
+ Map.Entry::getValue
+ ));
+ Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
+ .entrySet().stream()
+ .filter(e -> e.getValue() != null)
+ .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
+
+ if (offsetsForTimestamps.isEmpty()) {
+ throw new IllegalArgumentException("No offsets were found for requested timestamps");
+ }
+
+ consumer.assign(offsetsForTimestamps.keySet());
+ offsetsForTimestamps.forEach(consumer::seek);
+ }
+
+ protected void assignAndSeekFromBeginning(Consumer<Bytes, Bytes> consumer) {
+ List<TopicPartition> partitions = getRequestedPartitions(consumer);
+ consumer.assign(partitions);
+ consumer.seekToBeginning(partitions);
+ }
+
+}
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index e04566b7ed9..f77b6b1881e 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -323,6 +323,10 @@ paths:
in: query
schema:
type: string
+ - name: seekDirection
+ in: query
+ schema:
+ $ref: "#/components/schemas/SeekDirection"
responses:
200:
description: OK
@@ -1448,6 +1452,13 @@ components:
- OFFSET
- TIMESTAMP
+ SeekDirection:
+ type: string
+ enum:
+ - FORWARD
+ - BACKWARD
+ default: FORWARD
+
Partition:
type: object
properties:
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
index 11af012277a..358b818cccc 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
@@ -1,13 +1,14 @@
package com.provectus.kafka.ui.service;
-import static com.provectus.kafka.ui.service.ConsumingService.OffsetsSeek;
import static com.provectus.kafka.ui.service.ConsumingService.RecordEmitter;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractBaseTest;
import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekDirection;
import com.provectus.kafka.ui.model.SeekType;
import com.provectus.kafka.ui.producer.KafkaTestProducer;
+import com.provectus.kafka.ui.util.OffsetsSeekForward;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -65,7 +66,10 @@ static void cleanup() {
void pollNothingOnEmptyTopic() {
var emitter = new RecordEmitter(
this::createConsumer,
- new OffsetsSeek(EMPTY_TOPIC, new ConsumerPosition(SeekType.BEGINNING, Map.of())));
+ new OffsetsSeekForward(EMPTY_TOPIC,
+ new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD)
+ )
+ );
Long polledValues = Flux.create(emitter)
.limitRequest(100)
@@ -79,7 +83,10 @@ void pollNothingOnEmptyTopic() {
void pollFullTopicFromBeginning() {
var emitter = new RecordEmitter(
this::createConsumer,
- new OffsetsSeek(TOPIC, new ConsumerPosition(SeekType.BEGINNING, Map.of())));
+ new OffsetsSeekForward(TOPIC,
+ new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD)
+ )
+ );
var polledValues = Flux.create(emitter)
.map(this::deserialize)
@@ -101,7 +108,10 @@ void pollWithOffsets() {
var emitter = new RecordEmitter(
this::createConsumer,
- new OffsetsSeek(TOPIC, new ConsumerPosition(SeekType.OFFSET, targetOffsets)));
+ new OffsetsSeekForward(TOPIC,
+ new ConsumerPosition(SeekType.OFFSET, targetOffsets, SeekDirection.FORWARD)
+ )
+ );
var polledValues = Flux.create(emitter)
.map(this::deserialize)
@@ -127,7 +137,10 @@ void pollWithTimestamps() {
var emitter = new RecordEmitter(
this::createConsumer,
- new OffsetsSeek(TOPIC, new ConsumerPosition(SeekType.TIMESTAMP, targetTimestamps)));
+ new OffsetsSeekForward(TOPIC,
+ new ConsumerPosition(SeekType.TIMESTAMP, targetTimestamps, SeekDirection.FORWARD)
+ )
+ );
var polledValues = Flux.create(emitter)
.map(this::deserialize)
diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsSeekTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/OffsetsSeekTest.java
similarity index 58%
rename from kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsSeekTest.java
rename to kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/OffsetsSeekTest.java
index 550b2ce5587..302c2a6b586 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsSeekTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/OffsetsSeekTest.java
@@ -1,8 +1,9 @@
-package com.provectus.kafka.ui.service;
+package com.provectus.kafka.ui.util;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekDirection;
import com.provectus.kafka.ui.model.SeekType;
import java.util.List;
import java.util.Map;
@@ -51,10 +52,16 @@ void initConsumer() {
}
@Test
- void seekToBeginningAllPartitions() {
- var seek = new ConsumingService.OffsetsSeek(
+ void forwardSeekToBeginningAllPartitions() {
+ var seek = new OffsetsSeekForward(
topic,
- new ConsumerPosition(SeekType.BEGINNING, Map.of(0, 0L, 1, 0L)));
+ new ConsumerPosition(
+ SeekType.BEGINNING,
+ Map.of(0, 0L, 1, 0L),
+ SeekDirection.FORWARD
+ )
+ );
+
seek.assignAndSeek(consumer);
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1);
assertThat(consumer.position(tp0)).isZero();
@@ -62,10 +69,28 @@ void seekToBeginningAllPartitions() {
}
@Test
- void seekToBeginningWithPartitionsList() {
- var seek = new ConsumingService.OffsetsSeek(
+ void backwardSeekToBeginningAllPartitions() {
+ var seek = new OffsetsSeekBackward(
+ topic,
+ new ConsumerPosition(
+ SeekType.BEGINNING,
+ Map.of(2, 0L, 3, 0L),
+ SeekDirection.BACKWARD
+ ),
+ 10
+ );
+
+ seek.assignAndSeek(consumer);
+ assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2, tp3);
+ assertThat(consumer.position(tp2)).isEqualTo(15L);
+ assertThat(consumer.position(tp3)).isEqualTo(25L);
+ }
+
+ @Test
+ void forwardSeekToBeginningWithPartitionsList() {
+ var seek = new OffsetsSeekForward(
topic,
- new ConsumerPosition(SeekType.BEGINNING, Map.of()));
+ new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD));
seek.assignAndSeek(consumer);
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
assertThat(consumer.position(tp0)).isZero();
@@ -75,10 +100,31 @@ void seekToBeginningWithPartitionsList() {
}
@Test
- void seekToOffset() {
- var seek = new ConsumingService.OffsetsSeek(
+ void backwardSeekToBeginningWithPartitionsList() {
+ var seek = new OffsetsSeekBackward(
topic,
- new ConsumerPosition(SeekType.OFFSET, Map.of(0, 0L, 1, 1L, 2, 2L)));
+ new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.BACKWARD),
+ 10
+ );
+ seek.assignAndSeek(consumer);
+ assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
+ assertThat(consumer.position(tp0)).isZero();
+ assertThat(consumer.position(tp1)).isEqualTo(10L);
+ assertThat(consumer.position(tp2)).isEqualTo(15L);
+ assertThat(consumer.position(tp3)).isEqualTo(25L);
+ }
+
+
+ @Test
+ void forwardSeekToOffset() {
+ var seek = new OffsetsSeekForward(
+ topic,
+ new ConsumerPosition(
+ SeekType.OFFSET,
+ Map.of(0, 0L, 1, 1L, 2, 2L),
+ SeekDirection.FORWARD
+ )
+ );
seek.assignAndSeek(consumer);
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2);
assertThat(consumer.position(tp0)).isZero();
@@ -86,15 +132,34 @@ void seekToOffset() {
assertThat(consumer.position(tp2)).isEqualTo(2L);
}
+ @Test
+ void backwardSeekToOffset() {
+ var seek = new OffsetsSeekBackward(
+ topic,
+ new ConsumerPosition(
+ SeekType.OFFSET,
+ Map.of(0, 0L, 1, 1L, 2, 2L),
+ SeekDirection.FORWARD
+ ),
+ 2
+ );
+ seek.assignAndSeek(consumer);
+ assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2);
+ assertThat(consumer.position(tp0)).isZero();
+ assertThat(consumer.position(tp1)).isEqualTo(1L);
+ assertThat(consumer.position(tp2)).isEqualTo(0L);
+ }
+
+
@Nested
class WaitingOffsetsTest {
- ConsumingService.OffsetsSeek.WaitingOffsets offsets;
+ OffsetsSeekForward.WaitingOffsets offsets;
@BeforeEach
void assignAndCreateOffsets() {
consumer.assign(List.of(tp0, tp1, tp2, tp3));
- offsets = new ConsumingService.OffsetsSeek.WaitingOffsets(topic, consumer);
+ offsets = new OffsetsSeek.WaitingOffsets(topic, consumer);
}
@Test
| test | train | 2021-06-16T18:14:16 | "2020-11-24T11:15:50Z" | soffest | train |
provectus/kafka-ui/122_577 | provectus/kafka-ui | provectus/kafka-ui/122 | provectus/kafka-ui/577 | [
"connected"
] | dbebb440ac3ca6d0e1aad54f7af90eae350a0579 | de5a8652a12ec28920cbabcf920e704b4ed01d8e | [
"Current state of this task:\r\nNeed to figure out a way to read a set of topic messages from newer ones to older ones on BE, and then update existing FE request to properly work with new API.",
"Waiting fronted..."
] | [] | "2021-06-21T19:41:03Z" | [
"type/enhancement",
"scope/backend",
"scope/frontend"
] | Topic messages: Newest first | Change sorting of topic messages - sort by offset in descending order (latest messages first). | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
index fceb80aa9ef..a6351aae019 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
@@ -14,12 +14,14 @@
import com.provectus.kafka.ui.util.OffsetsSeekForward;
import java.time.Duration;
import java.util.Collection;
+import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
import lombok.RequiredArgsConstructor;
import lombok.extern.log4j.Log4j2;
import org.apache.commons.lang3.StringUtils;
@@ -136,6 +138,15 @@ static class RecordEmitter
private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
+ private static final Comparator<ConsumerRecord<?, ?>> PARTITION_COMPARING =
+ Comparator.comparing(
+ ConsumerRecord::partition,
+ Comparator.nullsFirst(Comparator.naturalOrder())
+ );
+ private static final Comparator<ConsumerRecord<?, ?>> REVERED_COMPARING =
+ PARTITION_COMPARING.thenComparing(ConsumerRecord::offset).reversed();
+
+
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
private final OffsetsSeek offsetsSeek;
@@ -146,7 +157,16 @@ public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
while (!sink.isCancelled() && !waitingOffsets.endReached()) {
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
log.info("{} records polled", records.count());
- for (ConsumerRecord<Bytes, Bytes> record : records) {
+
+ final Iterable<ConsumerRecord<Bytes, Bytes>> iterable;
+ if (offsetsSeek.getConsumerPosition().getSeekDirection().equals(SeekDirection.FORWARD)) {
+ iterable = records;
+ } else {
+ iterable = StreamSupport.stream(records.spliterator(), false)
+ .sorted(REVERED_COMPARING).collect(Collectors.toList());
+ }
+
+ for (ConsumerRecord<Bytes, Bytes> record : iterable) {
if (!sink.isCancelled() && !waitingOffsets.endReached()) {
sink.next(record);
waitingOffsets.markPolled(record);
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
index 9d082c67bc8..6383a83121d 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
@@ -23,6 +23,10 @@ public OffsetsSeek(String topic, ConsumerPosition consumerPosition) {
this.consumerPosition = consumerPosition;
}
+ public ConsumerPosition getConsumerPosition() {
+ return consumerPosition;
+ }
+
public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
SeekType seekType = consumerPosition.getSeekType();
log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
| null | train | train | 2021-06-21T15:17:40 | "2020-11-24T11:15:50Z" | soffest | train |
provectus/kafka-ui/573_584 | provectus/kafka-ui | provectus/kafka-ui/573 | provectus/kafka-ui/584 | [
"connected"
] | 5dd3944faabe98b089958186200b290d2d6c7766 | 50bf0f17804bc13329bddbd24f47be5014ed098f | [
"Hi, @EnzOuille thanks for sharing this issue. Could you please provide your kafka config for example in a docker compose file. This will help us to reproduce this issue. ",
"Hi thanks for the quick answer, below is my config of kafka in docker :\r\n\r\n kafka:\r\n image: wurstmeister/kafka:latest\r\n hostname: kafka\r\n container_name: kafka\r\n networks:\r\n - kafka_installation_default\r\n depends_on:\r\n - zookeeper\r\n ports:\r\n - '9092:9092'\r\n environment:\r\n KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'\r\n KAFKA_LISTENERS: SASL_PLAINTEXT://:9092\r\n KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.0.34:9092\r\n KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'\r\n ALLOW_PLAINTEXT_LISTENER: 'yes'\r\n KAFKA_OPTS: \"-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf\"\r\n KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer\r\n KAFKA_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT\r\n KAFKA_SASL_ENABLED_MECHANISMS: PLAIN\r\n KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN\r\n KAFKA_SECURITY_PROTOCOL: SASL_PLAINTEXT\r\n KAFKA_SUPER_USERS: User:admin,User:enzo\r\n KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'\r\n volumes:\r\n - /var/lib/docker/volumes/kafka_vol/_data:/etc/kafka\r\n\r\nAnd this is the jaas file config \r\n\r\nKafkaServer {\r\n org.apache.kafka.common.security.plain.PlainLoginModule required\r\n username=\"admin\"\r\n password=\"admin-secret\"\r\n user_admin=\"admin-secret\"\r\n user_enzo=\"cisternino\";\r\n};\r\n\r\nKafkaClient {\r\n org.apache.kafka.common.security.plain.PlainLoginModule required\r\n user_admin=\"admin-secret\";\r\n};\r\n\r\nClient {};",
"@EnzOuille thanks for quick sharing, we'll try it locally and return to you soon. ",
"[Documentation ](https://github.com/provectus/kafka-ui#connecting-to-a-secure-broker) says \"To be continued\"\r\nWhat is the correct way to set username and password for SASL using kafka-ui env vars?",
"@germanosin ok thanks for that ! ",
"@EnzOuille, looks like you missed trailing *;*.\r\nI created example docker compose here: https://github.com/provectus/kafka-ui/blob/master/docker/kafka-ui-sasl.yaml#L19",
"My bad, thanks for that, it's all working now !!! Thanks for your time.",
"@EnzOuille you are always welcome)"
] | [] | "2021-06-25T10:22:14Z" | [
"type/bug",
"scope/backend"
] | Can't connect to Kafka with SASL | Hi, i have kafka installed with docker and running with SASL_PLAINTEXT, when i try to go to the kafka ui web page, i only get 500 Error. This is my config
kafka-ui:
image: provectuslabs/kafka-ui
container_name: kafka-ui
networks:
- kafka_installation_default
ports:
- "8080:8080"
environment:
KAFKA_CLUSTERS_0_NAME: 'local'
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: 'kafka:9092'
KAFKA_CLUSTERS_0_ZOOKEEPER: 'zookeeper:2181'
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="enzo" password="cisternino"' | [] | [
"docker/jaas/client.properties",
"docker/jaas/kafka_server.conf",
"docker/kafka-ui-sasl.yaml"
] | [] | diff --git a/docker/jaas/client.properties b/docker/jaas/client.properties
new file mode 100644
index 00000000000..db1feea1f81
--- /dev/null
+++ b/docker/jaas/client.properties
@@ -0,0 +1,3 @@
+sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";
+security.protocol=SASL_PLAINTEXT
+sasl.mechanism=PLAIN
\ No newline at end of file
diff --git a/docker/jaas/kafka_server.conf b/docker/jaas/kafka_server.conf
new file mode 100644
index 00000000000..ef41c992e21
--- /dev/null
+++ b/docker/jaas/kafka_server.conf
@@ -0,0 +1,14 @@
+KafkaServer {
+ org.apache.kafka.common.security.plain.PlainLoginModule required
+ username="admin"
+ password="admin-secret"
+ user_admin="admin-secret"
+ user_enzo="cisternino";
+};
+
+KafkaClient {
+ org.apache.kafka.common.security.plain.PlainLoginModule required
+ user_admin="admin-secret";
+};
+
+Client {};
\ No newline at end of file
diff --git a/docker/kafka-ui-sasl.yaml b/docker/kafka-ui-sasl.yaml
new file mode 100644
index 00000000000..87b1094be7f
--- /dev/null
+++ b/docker/kafka-ui-sasl.yaml
@@ -0,0 +1,51 @@
+---
+version: '2'
+services:
+
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:latest
+ ports:
+ - 8080:8080
+ depends_on:
+ - zookeeper
+ - kafka
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
+ KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181
+ KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
+ KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
+ KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
+ zookeeper:
+ image: confluentinc/cp-zookeeper:5.2.4
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_TICK_TIME: 2000
+ ports:
+ - 2181:2181
+
+ kafka:
+ image: wurstmeister/kafka:latest
+ hostname: kafka
+ container_name: kafka
+ depends_on:
+ - zookeeper
+ ports:
+ - '9092:9092'
+ environment:
+ KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
+ KAFKA_LISTENERS: SASL_PLAINTEXT://kafka:9092
+ KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://kafka:9092
+ KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
+ ALLOW_PLAINTEXT_LISTENER: 'yes'
+ KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
+ KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
+ KAFKA_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT
+ KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
+ KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
+ KAFKA_SECURITY_PROTOCOL: SASL_PLAINTEXT
+ KAFKA_SUPER_USERS: User:admin,User:enzo
+ KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+ volumes:
+ - ./jaas:/etc/kafka/jaas
\ No newline at end of file
| null | train | train | 2021-06-23T19:47:32 | "2021-06-21T13:15:19Z" | EnzOuille | train |
provectus/kafka-ui/121_585 | provectus/kafka-ui | provectus/kafka-ui/121 | provectus/kafka-ui/585 | [
"timestamp(timedelta=0.0, similarity=0.9028316332241456)",
"connected"
] | 50bf0f17804bc13329bddbd24f47be5014ed098f | 57a4125c715cad3a3ace09e2c750f65cd8aa896e | [
"Add consumer groups list details per topic level"
] | [] | "2021-06-25T10:54:47Z" | [
"type/enhancement",
"scope/frontend"
] | Topic Details: Display consumers | Add ability to view list of consumers for topic. | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroups.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroupsContainer.ts",
"kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/__test__/TopicConsumerGroups.spec.tsx",
"kafka-ui-react-app/src/redux/interfaces/topic.ts",
"kafka-ui-react-app/src/redux/reducers/topics/selectors.ts"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroups.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroupsContainer.ts",
"kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/__test__/TopicConsumerGroups.spec.tsx",
"kafka-ui-react-app/src/redux/interfaces/topic.ts",
"kafka-ui-react-app/src/redux/reducers/topics/selectors.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroups.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroups.tsx
index ec65254113e..47c29cbe639 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroups.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroups.tsx
@@ -1,12 +1,15 @@
import React from 'react';
-import { Topic, TopicDetails, ConsumerGroup } from 'generated-sources';
+import {
+ Topic,
+ TopicDetails,
+ ConsumerTopicPartitionDetail,
+} from 'generated-sources';
import { ClusterName, TopicName } from 'redux/interfaces';
-import ListItem from 'components/ConsumerGroups/List/ListItem';
interface Props extends Topic, TopicDetails {
clusterName: ClusterName;
topicName: TopicName;
- consumerGroups: Array<ConsumerGroup>;
+ consumerGroups: ConsumerTopicPartitionDetail[];
fetchTopicConsumerGroups(
clusterName: ClusterName,
topicName: TopicName
@@ -26,20 +29,29 @@ const TopicConsumerGroups: React.FC<Props> = ({
return (
<div className="box">
{consumerGroups.length > 0 ? (
- <table className="table is-striped is-fullwidth is-hoverable">
+ <table className="table is-striped is-fullwidth">
<thead>
<tr>
- <th>Consumer group ID</th>
- <th>Num of consumers</th>
- <th>Num of topics</th>
+ <th>Group ID</th>
+ <th>Consumer ID</th>
+ <th>Host</th>
+ <th>Partition</th>
+ <th>Messages behind</th>
+ <th>Current offset</th>
+ <th>End offset</th>
</tr>
</thead>
<tbody>
- {consumerGroups.map((consumerGroup) => (
- <ListItem
- key={consumerGroup.consumerGroupId}
- consumerGroup={consumerGroup}
- />
+ {consumerGroups.map((consumer) => (
+ <tr>
+ <td>{consumer.groupId}</td>
+ <td>{consumer.consumerId}</td>
+ <td>{consumer.host}</td>
+ <td>{consumer.partition}</td>
+ <td>{consumer.messagesBehind}</td>
+ <td>{consumer.currentOffset}</td>
+ <td>{consumer.endOffset}</td>
+ </tr>
))}
</tbody>
</table>
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroupsContainer.ts b/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroupsContainer.ts
index 79798528b07..e0f8d3d0be1 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroupsContainer.ts
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/TopicConsumerGroupsContainer.ts
@@ -20,7 +20,7 @@ const mapStateToProps = (
},
}: OwnProps
) => ({
- consumerGroups: getTopicConsumerGroups(state),
+ consumerGroups: getTopicConsumerGroups(state, topicName),
topicName,
clusterName,
});
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/__test__/TopicConsumerGroups.spec.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/__test__/TopicConsumerGroups.spec.tsx
index 7c7d1e729b6..17ad0c969e3 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/__test__/TopicConsumerGroups.spec.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/ConsumerGroups/__test__/TopicConsumerGroups.spec.tsx
@@ -8,8 +8,26 @@ describe('Details', () => {
const mockTopicName = 'local';
const mockWithConsumerGroup = [
{
- clusterId: '1',
- consumerGroupId: '1',
+ groupId: 'messages-consumer',
+ consumerId:
+ 'consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d0',
+ topic: 'messages',
+ host: '/172.31.9.153',
+ partition: 6,
+ currentOffset: 394,
+ endOffset: 394,
+ messagesBehind: 0,
+ },
+ {
+ groupId: 'messages-consumer',
+ consumerId:
+ 'consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d0',
+ topic: 'messages',
+ host: '/172.31.9.153',
+ partition: 7,
+ currentOffset: 384,
+ endOffset: 384,
+ messagesBehind: 0,
},
];
diff --git a/kafka-ui-react-app/src/redux/interfaces/topic.ts b/kafka-ui-react-app/src/redux/interfaces/topic.ts
index 90e7331897d..f56cff06b6c 100644
--- a/kafka-ui-react-app/src/redux/interfaces/topic.ts
+++ b/kafka-ui-react-app/src/redux/interfaces/topic.ts
@@ -7,6 +7,7 @@ import {
GetTopicMessagesRequest,
ConsumerGroup,
TopicColumnsToSort,
+ TopicConsumerGroups,
} from 'generated-sources';
export type TopicName = Topic['name'];
@@ -40,6 +41,7 @@ export interface TopicFormCustomParams {
export interface TopicWithDetailedInfo extends Topic, TopicDetails {
config?: TopicConfig[];
+ consumerGroups?: TopicConsumerGroups;
}
export interface TopicsState {
diff --git a/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts b/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
index 16801fade4e..2936d1c2ac8 100644
--- a/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
+++ b/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
@@ -16,8 +16,6 @@ export const getTopicMessages = (state: RootState) =>
topicsState(state).messages;
export const getTopicListTotalPages = (state: RootState) =>
topicsState(state).totalPages;
-export const getTopicConsumerGroups = (state: RootState) =>
- topicsState(state).consumerGroups;
const getTopicListFetchingStatus = createFetchingSelector('GET_TOPICS');
const getTopicDetailsFetchingStatus =
@@ -138,3 +136,9 @@ export const getIsTopicInternal = createSelector(
getTopicByName,
({ internal }) => !!internal
);
+
+export const getTopicConsumerGroups = createSelector(
+ getTopicMap,
+ getTopicName,
+ (topics, topicName) => topics[topicName].consumerGroups?.consumers || []
+);
| null | train | train | 2021-06-25T12:22:47 | "2020-11-24T11:11:13Z" | soffest | train |
provectus/kafka-ui/581_586 | provectus/kafka-ui | provectus/kafka-ui/581 | provectus/kafka-ui/586 | [
"connected"
] | 50bf0f17804bc13329bddbd24f47be5014ed098f | 1a916ab247e0f221fd8d0f967de3b1206add10b9 | [
"Hi, @sashati. Thanks for creating this issue. I just checked and it works fine with http://localhost:8080/kafkaui/. So you should include trailing slash. Agree that this is not convenient if you use subpath. We'll fix it the next release. ",
"Hi @germanosin. Thanks for the info. Then I close this issue and hopefully see this resolved soon",
"@sashati fix is in master, you could use image with tag master to get these updates."
] | [] | "2021-06-25T12:19:52Z" | [
"type/bug",
"scope/backend"
] | Service is not working with Context Path | **Describe the bug**
When we run the service with `SERVER_SERVLET_CONTEXT_PATH` set to something like "/kafkaui", the service can not start and all APIs return error.
Here is the logs of the system:
```
09:01:47.771 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=root
09:01:47.771 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/root
09:01:47.772 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/
09:01:47.772 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.memory.free=55MB
09:01:47.772 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.memory.max=1979MB
09:01:47.772 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.memory.total=123MB
09:01:47.776 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=cp-helm-charts-cp-zookeeper-headless.k
09:01:47.783 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.common.X509Util - Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client
09:01:47.789 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ClientCnxnSocket - jute.maxbuffer value is 4194304 Bytes
09:01:47.799 [kafka-admin-client-thread | adminclient-1] INFO org.apache.zookeeper.ClientCnxn - zookeeper.request.timeout value is 0. feature enabled=
09:01:47.801 [kafka-admin-client-thread | adminclient-1] INFO org.I0Itec.zkclient.ZkClient - Waiting for keeper state SyncConnected
09:01:47.818 [kafka-admin-client-thread | adminclient-1-SendThread(cp-helm-charts-cp-zookeeper-headless.kafka-cluster:2181)] INFO org.apache.zookeeper.ClientCnxn - Opening socke
09:01:47.820 [kafka-admin-client-thread | adminclient-1-SendThread(cp-helm-charts-cp-zookeeper-headless.kafka-cluster:2181)] INFO org.apache.zookeeper.ClientCnxn - Socket connec
09:01:47.833 [kafka-admin-client-thread | adminclient-1-SendThread(cp-helm-charts-cp-zookeeper-headless.kafka-cluster:2181)] INFO org.apache.zookeeper.ClientCnxn - Session estab
09:01:47.837 [kafka-admin-client-thread | adminclient-1-EventThread] INFO org.I0Itec.zkclient.ZkClient - zookeeper state changed (SyncConnected)
09:01:47.837 [kafka-admin-client-thread | adminclient-1] DEBUG com.provectus.kafka.ui.service.ZookeeperService - Start getting Zookeeper metrics for kafkaCluster: Production
09:01:52.274 [boundedElastic-1] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [4a56d3f9] Encoding [class ErrorResponse {
code: 5000
message: 404 NOT_FOUND
timestamp: 1624438912139
req (truncated)...]
09:01:53.314 [boundedElastic-1] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [5da6b63f] Encoding [class ErrorResponse {
code: 5000
message: 404 NOT_FOUND
timestamp: 1624438913312
req (truncated)...]
09:02:01.918 [boundedElastic-1] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [dcda94e7] Encoding [class ErrorResponse {
code: 5000
message: 404 NOT_FOUND
timestamp: 1624438921918
req (truncated)...]
09:02:03.309 [boundedElastic-1] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [864b2c34] Encoding [class ErrorResponse {
code: 5000
message: 404 NOT_FOUND
timestamp: 1624438923308
req (truncated)...]
```
| [
"docker/kafka-ui-sasl.yaml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java"
] | [
"docker/kafka-ui-sasl.yaml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java"
] | [] | diff --git a/docker/kafka-ui-sasl.yaml b/docker/kafka-ui-sasl.yaml
index 87b1094be7f..1c0312f11a2 100644
--- a/docker/kafka-ui-sasl.yaml
+++ b/docker/kafka-ui-sasl.yaml
@@ -12,6 +12,7 @@ services:
- kafka
environment:
KAFKA_CLUSTERS_0_NAME: local
+# SERVER_SERVLET_CONTEXT_PATH: "/kafkaui"
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
index dfe9c4c13f1..5efee1db8de 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
@@ -1,5 +1,6 @@
package com.provectus.kafka.ui.config;
+import java.util.Optional;
import org.springframework.boot.autoconfigure.web.ServerProperties;
import org.springframework.stereotype.Component;
import org.springframework.web.server.ServerWebExchange;
@@ -22,13 +23,13 @@ public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {
String contextPath = serverProperties.getServlet().getContextPath() != null
? serverProperties.getServlet().getContextPath() : "";
- if (exchange.getRequest().getURI().getPath().equals(contextPath + "/")
- || exchange.getRequest().getURI().getPath().startsWith(contextPath + "/ui")) {
+ final String path = exchange.getRequest().getURI().getPath().replaceAll("/$", "");
+ if (path.equals(contextPath) || path.startsWith(contextPath + "/ui")) {
return chain.filter(
exchange.mutate().request(exchange.getRequest().mutate().path("/index.html").build())
.build()
);
- } else if (exchange.getRequest().getURI().getPath().startsWith(contextPath)) {
+ } else if (path.startsWith(contextPath)) {
return chain.filter(
exchange.mutate().request(exchange.getRequest().mutate().contextPath(contextPath).build())
.build()
| null | train | train | 2021-06-25T12:22:47 | "2021-06-23T09:03:11Z" | sashati | train |
provectus/kafka-ui/122_601 | provectus/kafka-ui | provectus/kafka-ui/122 | provectus/kafka-ui/601 | [
"keyword_pr_to_issue"
] | 97ec512b0027d626d05350bc00c01e61d272b9aa | 3fa2b995c6dbbd949e0367b5366f4bc743d04ab0 | [
"Current state of this task:\r\nNeed to figure out a way to read a set of topic messages from newer ones to older ones on BE, and then update existing FE request to properly work with new API.",
"Waiting fronted..."
] | [] | "2021-06-29T15:50:46Z" | [
"type/enhancement",
"scope/backend",
"scope/frontend"
] | Topic messages: Newest first | Change sorting of topic messages - sort by offset in descending order (latest messages first). | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java",
"kafka-ui-api/src/main/resources/application-sdp.yml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java",
"kafka-ui-api/src/main/resources/application-sdp.yml"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
index ee407a167f3..088366c238e 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
@@ -40,42 +40,44 @@ public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
) {
final Map<TopicPartition, Long> partitionsOffsets =
offsetsSeek.getPartitionsOffsets(consumer);
- log.info("partition offsets: {}", partitionsOffsets);
+ log.debug("partition offsets: {}", partitionsOffsets);
var waitingOffsets =
offsetsSeek.waitingOffsets(consumer, partitionsOffsets.keySet());
- log.info("waittin offsets {} {}",
+ log.debug("waittin offsets {} {}",
waitingOffsets.getBeginOffsets(),
waitingOffsets.getEndOffsets()
);
while (!sink.isCancelled() && !waitingOffsets.beginReached()) {
for (Map.Entry<TopicPartition, Long> entry : partitionsOffsets.entrySet()) {
final Long lowest = waitingOffsets.getBeginOffsets().get(entry.getKey().partition());
- consumer.assign(Collections.singleton(entry.getKey()));
- final long offset = Math.max(lowest, entry.getValue() - msgsPerPartition);
- log.info("Polling {} from {}", entry.getKey(), offset);
- consumer.seek(entry.getKey(), offset);
- ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
- final List<ConsumerRecord<Bytes, Bytes>> partitionRecords =
- records.records(entry.getKey()).stream()
- .filter(r -> r.offset() < partitionsOffsets.get(entry.getKey()))
- .collect(Collectors.toList());
- Collections.reverse(partitionRecords);
+ if (lowest != null) {
+ consumer.assign(Collections.singleton(entry.getKey()));
+ final long offset = Math.max(lowest, entry.getValue() - msgsPerPartition);
+ log.debug("Polling {} from {}", entry.getKey(), offset);
+ consumer.seek(entry.getKey(), offset);
+ ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
+ final List<ConsumerRecord<Bytes, Bytes>> partitionRecords =
+ records.records(entry.getKey()).stream()
+ .filter(r -> r.offset() < partitionsOffsets.get(entry.getKey()))
+ .collect(Collectors.toList());
+ Collections.reverse(partitionRecords);
- log.info("{} records polled", records.count());
- log.info("{} records sent", partitionRecords.size());
- for (ConsumerRecord<Bytes, Bytes> msg : partitionRecords) {
- if (!sink.isCancelled() && !waitingOffsets.beginReached()) {
- sink.next(msg);
- waitingOffsets.markPolled(msg);
- } else {
- log.info("Begin reached");
- break;
+ log.debug("{} records polled", records.count());
+ log.debug("{} records sent", partitionRecords.size());
+ for (ConsumerRecord<Bytes, Bytes> msg : partitionRecords) {
+ if (!sink.isCancelled() && !waitingOffsets.beginReached()) {
+ sink.next(msg);
+ waitingOffsets.markPolled(msg);
+ } else {
+ log.info("Begin reached");
+ break;
+ }
}
+ partitionsOffsets.put(
+ entry.getKey(),
+ Math.max(offset, entry.getValue() - msgsPerPartition)
+ );
}
- partitionsOffsets.put(
- entry.getKey(),
- Math.max(offset, entry.getValue() - msgsPerPartition)
- );
}
if (waitingOffsets.beginReached()) {
log.info("begin reached after partitions");
diff --git a/kafka-ui-api/src/main/resources/application-sdp.yml b/kafka-ui-api/src/main/resources/application-sdp.yml
index cde2ab2e593..71af1078f9d 100644
--- a/kafka-ui-api/src/main/resources/application-sdp.yml
+++ b/kafka-ui-api/src/main/resources/application-sdp.yml
@@ -3,7 +3,7 @@ kafka:
- name: local
bootstrapServers: b-1.kad-msk.uxahxx.c6.kafka.eu-west-1.amazonaws.com:9092
# zookeeper: localhost:2181
-# schemaRegistry: http://localhost:8083
+ schemaRegistry: http://kad-ecs-application-lb-857515197.eu-west-1.elb.amazonaws.com:9000/api/schema-registry
# -
# name: secondLocal
# zookeeper: zookeeper1:2181
| null | train | train | 2021-06-29T08:52:18 | "2020-11-24T11:15:50Z" | soffest | train |
provectus/kafka-ui/122_607 | provectus/kafka-ui | provectus/kafka-ui/122 | provectus/kafka-ui/607 | [
"keyword_pr_to_issue"
] | 64a5985e819ec3811de3b9278f9fdf472970fd8a | ca476d337337de4721552d962d9056f69b7ab113 | [
"Current state of this task:\r\nNeed to figure out a way to read a set of topic messages from newer ones to older ones on BE, and then update existing FE request to properly work with new API.",
"Waiting fronted..."
] | [] | "2021-07-02T11:35:15Z" | [
"type/enhancement",
"scope/backend",
"scope/frontend"
] | Topic messages: Newest first | Change sorting of topic messages - sort by offset in descending order (latest messages first). | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
index 088366c238e..8bf2bab547f 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
@@ -64,6 +64,13 @@ public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
log.debug("{} records polled", records.count());
log.debug("{} records sent", partitionRecords.size());
+
+ // This is workaround for case when partition begin offset is less than
+ // real minimal offset, usually appear in compcated topics
+ if (records.count() > 0 && partitionRecords.isEmpty()) {
+ waitingOffsets.markPolled(entry.getKey().partition());
+ }
+
for (ConsumerRecord<Bytes, Bytes> msg : partitionRecords) {
if (!sink.isCancelled() && !waitingOffsets.beginReached()) {
sink.next(msg);
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
index 17a496d53bb..6cc275c4c81 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
@@ -110,6 +110,11 @@ public List<TopicPartition> topicPartitions() {
.collect(Collectors.toList());
}
+ public void markPolled(int partition) {
+ endOffsets.remove(partition);
+ beginOffsets.remove(partition);
+ }
+
public void markPolled(ConsumerRecord<?, ?> rec) {
Long endWaiting = endOffsets.get(rec.partition());
if (endWaiting != null && endWaiting <= rec.offset()) {
| null | train | train | 2021-07-02T10:05:59 | "2020-11-24T11:15:50Z" | soffest | train |
provectus/kafka-ui/633_638 | provectus/kafka-ui | provectus/kafka-ui/633 | provectus/kafka-ui/638 | [
"timestamp(timedelta=0.0, similarity=0.8817624752761971)",
"connected"
] | 66228b00d5ea715f1b94bb8b53fc73bf525ebc74 | b7a1d1143def37b4ed9606e0b7d9f11e9d3150c6 | [] | [] | "2021-07-06T14:14:22Z" | [
"type/enhancement",
"good first issue",
"scope/frontend"
] | Display number of messages & size in topic list | ### Describe the solution you'd like
Display columns Number of messages & size in topic list page
Number of messages could be calculated from partitions:
backend provide list of partitions with min/max offset,
number of messages = sum(partition max offset - partition min offset)
size - field segmentSize (it's in bytes, let's display it KB/MB/GB...)
| [
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx",
"kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap"
] | [
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx",
"kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/List/List.tsx b/kafka-ui-react-app/src/components/Topics/List/List.tsx
index 99784bb869f..8bc231a98d9 100644
--- a/kafka-ui-react-app/src/components/Topics/List/List.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/List.tsx
@@ -135,6 +135,8 @@ const List: React.FC<Props> = ({
orderBy={orderBy}
setOrderBy={setTopicsOrderBy}
/>
+ <th>Number of messages</th>
+ <th>Size</th>
<th>Type</th>
<th> </th>
</tr>
diff --git a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
index 07074be71fa..6a086771a91 100644
--- a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
@@ -10,6 +10,7 @@ import DropdownItem from 'components/common/Dropdown/DropdownItem';
import Dropdown from 'components/common/Dropdown/Dropdown';
import ConfirmationModal from 'components/common/ConfirmationModal/ConfirmationModal';
import ClusterContext from 'components/contexts/ClusterContext';
+import BytesFormatted from 'components/common/BytesFormatted/BytesFormatted';
export interface ListItemProps {
topic: TopicWithDetailedInfo;
@@ -19,7 +20,7 @@ export interface ListItemProps {
}
const ListItem: React.FC<ListItemProps> = ({
- topic: { name, internal, partitions },
+ topic: { name, internal, partitions, segmentSize },
deleteTopic,
clusterName,
clearTopicMessages,
@@ -29,15 +30,27 @@ const ListItem: React.FC<ListItemProps> = ({
const [isDeleteTopicConfirmationVisible, setDeleteTopicConfirmationVisible] =
React.useState(false);
- const outOfSyncReplicas = React.useMemo(() => {
+ const { outOfSyncReplicas, numberOfMessages } = React.useMemo(() => {
if (partitions === undefined || partitions.length === 0) {
- return 0;
+ return {
+ outOfSyncReplicas: 0,
+ numberOfMessages: 0,
+ };
}
- return partitions.reduce((memo: number, { replicas }) => {
- const outOfSync = replicas?.filter(({ inSync }) => !inSync);
- return memo + (outOfSync?.length || 0);
- }, 0);
+ return partitions.reduce(
+ (memo, { replicas, offsetMax, offsetMin }) => {
+ const outOfSync = replicas?.filter(({ inSync }) => !inSync);
+ return {
+ outOfSyncReplicas: memo.outOfSyncReplicas + (outOfSync?.length || 0),
+ numberOfMessages: memo.numberOfMessages + (offsetMax - offsetMin),
+ };
+ },
+ {
+ outOfSyncReplicas: 0,
+ numberOfMessages: 0,
+ }
+ );
}, [partitions]);
const deleteTopicHandler = React.useCallback(() => {
@@ -62,6 +75,10 @@ const ListItem: React.FC<ListItemProps> = ({
</td>
<td>{partitions?.length}</td>
<td>{outOfSyncReplicas}</td>
+ <td>{numberOfMessages}</td>
+ <td>
+ <BytesFormatted value={segmentSize} />
+ </td>
<td>
<div className={cx('tag', internal ? 'is-light' : 'is-primary')}>
{internal ? 'Internal' : 'External'}
diff --git a/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
index 27706ba2831..17a86b1f2fe 100644
--- a/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
@@ -205,6 +205,12 @@ exports[`List when it does not have readonly flag matches the snapshot 1`] = `
</span>
</th>
</ListHeaderCell>
+ <th>
+ Number of messages
+ </th>
+ <th>
+ Size
+ </th>
<th>
Type
</th>
| null | train | train | 2021-07-05T13:32:01 | "2021-07-05T15:17:39Z" | germanosin | train |
provectus/kafka-ui/641_642 | provectus/kafka-ui | provectus/kafka-ui/641 | provectus/kafka-ui/642 | [
"connected",
"timestamp(timedelta=0.0, similarity=0.9088663318805024)"
] | b7a1d1143def37b4ed9606e0b7d9f11e9d3150c6 | e5f1e47c9987849db02be9e3f9b47f050f35624a | [] | [
"```suggestion\r\n const arr = c.spilt('-')[0].split('.');\r\n```",
"Is it a good place to use useMemo?",
"let's wrap all this function to try .. catch",
"We should be ready to case when customer runs our App internally without internet. "
] | "2021-07-07T11:49:29Z" | [
"type/enhancement"
] | Display warning for users who use outdated version of app | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
Some users use outdated version of the app
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Show to user info about latest release | [
"kafka-ui-react-app/src/components/Version/Version.tsx",
"kafka-ui-react-app/src/lib/constants.ts"
] | [
"kafka-ui-react-app/src/components/Version/Version.tsx",
"kafka-ui-react-app/src/components/Version/__tests__/compareVersions.spec.ts",
"kafka-ui-react-app/src/components/Version/compareVersions.ts",
"kafka-ui-react-app/src/lib/constants.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Version/Version.tsx b/kafka-ui-react-app/src/components/Version/Version.tsx
index dfdb94fd74c..d833d16b1a1 100644
--- a/kafka-ui-react-app/src/components/Version/Version.tsx
+++ b/kafka-ui-react-app/src/components/Version/Version.tsx
@@ -1,5 +1,8 @@
-import React from 'react';
+import React, { useEffect, useState } from 'react';
import { gitCommitPath } from 'lib/paths';
+import { GIT_REPO_LATEST_RELEASE_LINK } from 'lib/constants';
+
+import compareVersions from './compareVersions';
export interface VesionProps {
tag?: string;
@@ -7,14 +10,40 @@ export interface VesionProps {
}
const Version: React.FC<VesionProps> = ({ tag, commit }) => {
+ const [latestVersionInfo, setLatestVersionInfo] = useState({
+ outdated: false,
+ latestTag: '',
+ });
+ useEffect(() => {
+ if (tag) {
+ fetch(GIT_REPO_LATEST_RELEASE_LINK)
+ .then((response) => response.json())
+ .then((data) => {
+ setLatestVersionInfo({
+ outdated: compareVersions(tag, data.tag_name) === -1,
+ latestTag: data.tag_name,
+ });
+ });
+ }
+ }, [tag]);
if (!tag) {
return null;
}
+ const { outdated, latestTag } = latestVersionInfo;
+
return (
<div className="is-size-7 has-text-grey">
<span className="has-text-grey-light mr-1">Version:</span>
<span className="mr-1">{tag}</span>
+ {outdated && (
+ <span
+ className="icon has-text-warning"
+ title={`Your app version is outdated. Current latest version is ${latestTag}`}
+ >
+ <i className="fas fa-exclamation-triangle" />
+ </span>
+ )}
{commit && (
<>
<span>(</span>
diff --git a/kafka-ui-react-app/src/components/Version/__tests__/compareVersions.spec.ts b/kafka-ui-react-app/src/components/Version/__tests__/compareVersions.spec.ts
new file mode 100644
index 00000000000..807d247e77a
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Version/__tests__/compareVersions.spec.ts
@@ -0,0 +1,69 @@
+// eslint-disable-next-line @typescript-eslint/ban-ts-comment
+// @ts-nocheck
+
+import compareVersions from 'components/Version/compareVersions';
+
+describe('compareVersions function', () => {
+ it('single-segment versions', () => {
+ expect(compareVersions('10', '9')).toEqual(1);
+ expect(compareVersions('10', '10')).toEqual(0);
+ expect(compareVersions('9', '10')).toEqual(-1);
+ });
+
+ it('two-segment versions', () => {
+ expect(compareVersions('10.8', '10.4')).toEqual(1);
+ expect(compareVersions('10.1', '10.1')).toEqual(0);
+ expect(compareVersions('10.1', '10.2')).toEqual(-1);
+ });
+
+ it('three-segment versions', () => {
+ expect(compareVersions('10.1.8', '10.0.4')).toEqual(1);
+ expect(compareVersions('10.0.1', '10.0.1')).toEqual(0);
+ expect(compareVersions('10.1.1', '10.2.2')).toEqual(-1);
+ });
+
+ it('four-segment versions', () => {
+ expect(compareVersions('1.0.0.0', '1')).toEqual(0);
+ expect(compareVersions('1.0.0.0', '1.0')).toEqual(0);
+ expect(compareVersions('1.0.0.0', '1.0.0')).toEqual(0);
+ expect(compareVersions('1.0.0.0', '1.0.0.0')).toEqual(0);
+ expect(compareVersions('1.2.3.4', '1.2.3.4')).toEqual(0);
+ expect(compareVersions('1.2.3.4', '1.2.3.04')).toEqual(0);
+ expect(compareVersions('v1.2.3.4', '01.2.3.4')).toEqual(0);
+
+ expect(compareVersions('1.2.3.4', '1.2.3.5')).toEqual(-1);
+ expect(compareVersions('1.2.3.5', '1.2.3.4')).toEqual(1);
+ expect(compareVersions('1.0.0.0-alpha', '1.0.0-alpha')).toEqual(0);
+ expect(compareVersions('1.0.0.0-alpha', '1.0.0.0-beta')).toEqual(0);
+ });
+
+ it('different number of digits in same group', () => {
+ expect(compareVersions('11.0.10', '11.0.2')).toEqual(1);
+ expect(compareVersions('11.0.2', '11.0.10')).toEqual(-1);
+ });
+
+ it('different number of digits in different groups', () => {
+ expect(compareVersions('11.1.10', '11.0')).toEqual(1);
+ });
+
+ it('different number of digits', () => {
+ expect(compareVersions('1.1.1', '1')).toEqual(1);
+ expect(compareVersions('1.0.0', '1')).toEqual(0);
+ expect(compareVersions('1.0', '1.4.1')).toEqual(-1);
+ });
+
+ it('ignore non-numeric characters', () => {
+ expect(compareVersions('1.0.0-alpha.1', '1.0.0-alpha')).toEqual(0);
+ expect(compareVersions('1.0.0-rc', '1.0.0')).toEqual(0);
+ expect(compareVersions('1.0.0-alpha', '1')).toEqual(0);
+ expect(compareVersions('v1.0.0', '1.0.0')).toEqual(0);
+ });
+
+ it('returns valid result (negative test cases)', () => {
+ expect(compareVersions(123, 'v0.0.0')).toEqual(0);
+ expect(compareVersions(undefined, 'v0.0.0')).toEqual(0);
+ expect(compareVersions('v0.0.0', 123)).toEqual(0);
+ expect(compareVersions('v0.0.0', undefined)).toEqual(0);
+ expect(compareVersions(undefined, undefined)).toEqual(0);
+ });
+});
diff --git a/kafka-ui-react-app/src/components/Version/compareVersions.ts b/kafka-ui-react-app/src/components/Version/compareVersions.ts
new file mode 100644
index 00000000000..72a8db926f7
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Version/compareVersions.ts
@@ -0,0 +1,25 @@
+const split = (v: string): string[] => {
+ const c = v.replace(/^v/, '').replace(/\+.*$/, '');
+ return c.split('-')[0].split('.');
+};
+
+const compareVersions = (v1: string, v2: string): number => {
+ try {
+ const s1 = split(v1);
+ const s2 = split(v2);
+
+ for (let i = 0; i < Math.max(s1.length, s2.length); i += 1) {
+ const n1 = parseInt(s1[i] || '0', 10);
+ const n2 = parseInt(s2[i] || '0', 10);
+
+ if (n1 > n2) return 1;
+ if (n2 > n1) return -1;
+ }
+
+ return 0;
+ } catch (_) {
+ return 0;
+ }
+};
+
+export default compareVersions;
diff --git a/kafka-ui-react-app/src/lib/constants.ts b/kafka-ui-react-app/src/lib/constants.ts
index 7ee0e6de035..f93abfa0fdd 100644
--- a/kafka-ui-react-app/src/lib/constants.ts
+++ b/kafka-ui-react-app/src/lib/constants.ts
@@ -51,5 +51,7 @@ export const BYTES_IN_GB = 1_073_741_824;
export const PER_PAGE = 25;
export const GIT_REPO_LINK = 'https://github.com/provectus/kafka-ui';
+export const GIT_REPO_LATEST_RELEASE_LINK =
+ 'https://api.github.com/repos/provectus/kafka-ui/releases/latest';
export const GIT_TAG = process.env.REACT_APP_TAG;
export const GIT_COMMIT = process.env.REACT_APP_COMMIT;
| null | test | train | 2021-07-07T11:06:41 | "2021-07-07T09:05:16Z" | workshur | train |
provectus/kafka-ui/632_646 | provectus/kafka-ui | provectus/kafka-ui/632 | provectus/kafka-ui/646 | [
"connected"
] | 0ec40dd7e46c9a3a0645070e861bc24e63c7eef8 | df1d3bbfc7409c147f4a66b4c1a34134804fbab7 | [] | [] | "2021-07-07T15:25:37Z" | [
"type/enhancement",
"scope/backend"
] | Add an ability to sort topic list by replication factor | ### Is your proposal related to a problem?
No
### Describe the solution you'd like
Add an ability to sort topic list by replication factor
### Additional context
UI will be updated in context of https://github.com/provectus/kafka-ui/issues/608 | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
index c68c5900ca3..11217e0a919 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
@@ -142,6 +142,8 @@ private Comparator<InternalTopic> getComparatorForTopic(Optional<TopicColumnsToS
return Comparator.comparing(InternalTopic::getPartitionCount);
case OUT_OF_SYNC_REPLICAS:
return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
+ case REPLICATION_FACTOR:
+ return Comparator.comparing(InternalTopic::getReplicationFactor);
case NAME:
default:
return defaultComparator;
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index 9c75c110da4..25d4fb89604 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -1457,6 +1457,7 @@ components:
- NAME
- OUT_OF_SYNC_REPLICAS
- TOTAL_PARTITIONS
+ - REPLICATION_FACTOR
Topic:
type: object
| null | train | train | 2021-07-07T16:59:52 | "2021-07-05T11:55:36Z" | workshur | train |
provectus/kafka-ui/643_651 | provectus/kafka-ui | provectus/kafka-ui/643 | provectus/kafka-ui/651 | [
"timestamp(timedelta=0.0, similarity=0.863213758712552)",
"connected"
] | 13463fe95f8c6c375b09d47e7da41a87b8a47134 | 73d4e4894199ddb8ad85e475a164ee3150689918 | [] | [] | "2021-07-09T17:59:50Z" | [
"type/bug",
"scope/backend"
] | JsonSchema is not working properly with schema registry | **Describe the bug**
Json messages with schema registry have prefix 5 bytes with magic symbol & schema version. This could break message reader.
Let's add tests and cover this case
| [
"kafka-ui-api/pom.xml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java"
] | [
"kafka-ui-api/pom.xml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageFormatter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageReader.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/annotations/KafkaClientInternalsDependant.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java"
] | diff --git a/kafka-ui-api/pom.xml b/kafka-ui-api/pom.xml
index c658e6a23cb..41ca96f7446 100644
--- a/kafka-ui-api/pom.xml
+++ b/kafka-ui-api/pom.xml
@@ -86,6 +86,11 @@
<artifactId>kafka-avro-serializer</artifactId>
<version>${confluent.version}</version>
</dependency>
+ <dependency>
+ <groupId>io.confluent</groupId>
+ <artifactId>kafka-json-schema-serializer</artifactId>
+ <version>${confluent.version}</version>
+ </dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-protobuf-serializer</artifactId>
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java
index d89792159f5..fcf5173a27f 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java
@@ -7,6 +7,7 @@
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import io.confluent.kafka.serializers.KafkaAvroSerializer;
+import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
import java.io.IOException;
import java.util.Map;
import org.apache.kafka.common.serialization.Serializer;
@@ -23,8 +24,14 @@ public AvroMessageReader(String topic, boolean isKey,
@Override
protected Serializer<Object> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaAvroSerializer(client);
- // need to call configure to set isKey property
- serializer.configure(Map.of("schema.registry.url", "wontbeused"), isKey);
+ serializer.configure(
+ Map.of(
+ "schema.registry.url", "wontbeused",
+ KafkaAvroSerializerConfig.AUTO_REGISTER_SCHEMAS, false,
+ KafkaAvroSerializerConfig.USE_LATEST_VERSION, true
+ ),
+ isKey
+ );
return serializer;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageFormatter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageFormatter.java
new file mode 100644
index 00000000000..3435851b4ee
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageFormatter.java
@@ -0,0 +1,20 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.serializers.json.KafkaJsonSchemaDeserializer;
+
+public class JsonSchemaMessageFormatter implements MessageFormatter {
+
+ private final KafkaJsonSchemaDeserializer<JsonNode> jsonSchemaDeserializer;
+
+ public JsonSchemaMessageFormatter(SchemaRegistryClient client) {
+ this.jsonSchemaDeserializer = new KafkaJsonSchemaDeserializer<>(client);
+ }
+
+ @Override
+ public String format(String topic, byte[] value) {
+ JsonNode json = jsonSchemaDeserializer.deserialize(topic, value);
+ return json.toString();
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageReader.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageReader.java
new file mode 100644
index 00000000000..1cecdf7fd5e
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/JsonSchemaMessageReader.java
@@ -0,0 +1,81 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.provectus.kafka.ui.exception.ValidationException;
+import com.provectus.kafka.ui.util.annotations.KafkaClientInternalsDependant;
+import io.confluent.kafka.schemaregistry.ParsedSchema;
+import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.schemaregistry.json.JsonSchema;
+import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer;
+import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializerConfig;
+import java.io.IOException;
+import java.util.Map;
+import org.apache.kafka.common.serialization.Serializer;
+
+public class JsonSchemaMessageReader extends MessageReader<JsonNode> {
+
+ private static final ObjectMapper MAPPER = new ObjectMapper();
+
+ public JsonSchemaMessageReader(String topic,
+ boolean isKey,
+ SchemaRegistryClient client,
+ SchemaMetadata schema) throws IOException, RestClientException {
+ super(topic, isKey, client, schema);
+ }
+
+ @Override
+ protected Serializer<JsonNode> createSerializer(SchemaRegistryClient client) {
+ var serializer = new KafkaJsonSchemaSerializerWithoutSchemaInfer(client);
+ serializer.configure(
+ Map.of(
+ "schema.registry.url", "wontbeused",
+ KafkaJsonSchemaSerializerConfig.AUTO_REGISTER_SCHEMAS, false,
+ KafkaJsonSchemaSerializerConfig.USE_LATEST_VERSION, true
+ ),
+ isKey
+ );
+ return serializer;
+ }
+
+ @Override
+ protected JsonNode read(String value, ParsedSchema schema) {
+ try {
+ JsonNode json = MAPPER.readTree(value);
+ ((JsonSchema) schema).validate(json);
+ return json;
+ } catch (JsonProcessingException e) {
+ throw new ValidationException(String.format("'%s' is not valid json", value));
+ } catch (org.everit.json.schema.ValidationException e) {
+ throw new ValidationException(
+ String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
+ }
+ }
+
+ @KafkaClientInternalsDependant
+ private class KafkaJsonSchemaSerializerWithoutSchemaInfer
+ extends KafkaJsonSchemaSerializer<JsonNode> {
+
+ KafkaJsonSchemaSerializerWithoutSchemaInfer(SchemaRegistryClient client) {
+ super(client);
+ }
+
+ /**
+ * Need to override original method because it tries to infer schema from input
+ * by checking 'schema' json field or @Schema annotation on input class, which is not
+ * possible in our case. So, we just skip all infer logic and pass schema directly.
+ */
+ @Override
+ public byte[] serialize(String topic, JsonNode record) {
+ return super.serializeImpl(
+ super.getSubjectName(topic, isKey, record, schema),
+ record,
+ (JsonSchema) schema
+ );
+ }
+ }
+
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java
index a14bbcb650d..b1e875b7609 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java
@@ -3,6 +3,5 @@
public enum MessageFormat {
AVRO,
JSON,
- STRING,
PROTOBUF
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java
index 334e824200f..c6cb1e4606d 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java
@@ -11,7 +11,7 @@ public abstract class MessageReader<T> {
protected final Serializer<T> serializer;
protected final String topic;
protected final boolean isKey;
- private final ParsedSchema schema;
+ protected final ParsedSchema schema;
protected MessageReader(String topic, boolean isKey, SchemaRegistryClient client,
SchemaMetadata schema) throws IOException, RestClientException {
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java
index 6a59bce8007..ce3150467c7 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java
@@ -9,6 +9,7 @@
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
+import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializerConfig;
import java.io.IOException;
import java.util.Map;
import org.apache.kafka.common.serialization.Serializer;
@@ -24,8 +25,14 @@ public ProtobufMessageReader(String topic, boolean isKey,
@Override
protected Serializer<Message> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaProtobufSerializer<>(client);
- // need to call configure to set isKey property
- serializer.configure(Map.of("schema.registry.url", "wontbeused"), isKey);
+ serializer.configure(
+ Map.of(
+ "schema.registry.url", "wontbeused",
+ KafkaProtobufSerializerConfig.AUTO_REGISTER_SCHEMAS, false,
+ KafkaProtobufSerializerConfig.USE_LATEST_VERSION, true
+ ),
+ isKey
+ );
return serializer;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
index 68185ddc994..08aef7455ff 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
@@ -16,6 +16,7 @@
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.schemaregistry.json.JsonSchemaProvider;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaProvider;
import java.net.URI;
@@ -52,6 +53,9 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
@Nullable
private final ProtobufMessageFormatter protobufFormatter;
+ @Nullable
+ private final JsonSchemaMessageFormatter jsonSchemaMessageFormatter;
+
private final StringMessageFormatter stringFormatter = new StringMessageFormatter();
private final ProtobufSchemaConverter protoSchemaConverter = new ProtobufSchemaConverter();
private final AvroJsonSchemaConverter avroSchemaConverter = new AvroJsonSchemaConverter();
@@ -60,7 +64,7 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
private static SchemaRegistryClient createSchemaRegistryClient(KafkaCluster cluster) {
Objects.requireNonNull(cluster.getSchemaRegistry());
List<SchemaProvider> schemaProviders =
- List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider());
+ List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider(), new JsonSchemaProvider());
//TODO add auth
return new CachedSchemaRegistryClient(
Collections.singletonList(cluster.getSchemaRegistry()),
@@ -78,9 +82,11 @@ public SchemaRegistryAwareRecordSerDe(KafkaCluster cluster) {
if (schemaRegistryClient != null) {
this.avroFormatter = new AvroMessageFormatter(schemaRegistryClient);
this.protobufFormatter = new ProtobufMessageFormatter(schemaRegistryClient);
+ this.jsonSchemaMessageFormatter = new JsonSchemaMessageFormatter(schemaRegistryClient);
} else {
this.avroFormatter = null;
this.protobufFormatter = null;
+ this.jsonSchemaMessageFormatter = null;
}
}
@@ -128,6 +134,8 @@ private byte[] serialize(
reader = new ProtobufMessageReader(topic, isKey, schemaRegistryClient, schema);
} else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
reader = new AvroMessageReader(topic, isKey, schemaRegistryClient, schema);
+ } else if (schema.getSchemaType().equals(MessageFormat.JSON.name())) {
+ reader = new JsonSchemaMessageReader(topic, isKey, schemaRegistryClient, schema);
} else {
throw new IllegalStateException("Unsupported schema type: " + schema.getSchemaType());
}
@@ -218,6 +226,10 @@ private MessageFormatter detectFormat(ConsumerRecord<Bytes, Bytes> msg, boolean
if (tryFormatter(avroFormatter, msg, isKey).isPresent()) {
return avroFormatter;
}
+ } else if (type.get().equals(MessageFormat.JSON.name())) {
+ if (tryFormatter(jsonSchemaMessageFormatter, msg, isKey).isPresent()) {
+ return jsonSchemaMessageFormatter;
+ }
} else {
throw new IllegalStateException("Unsupported schema type: " + type.get());
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/annotations/KafkaClientInternalsDependant.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/annotations/KafkaClientInternalsDependant.java
new file mode 100644
index 00000000000..1003ff0d7fd
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/annotations/KafkaClientInternalsDependant.java
@@ -0,0 +1,8 @@
+package com.provectus.kafka.ui.util.annotations;
+
+/**
+ * All code places that depend on kafka-client's internals or implementation-specific logic
+ * should be marked with this annotation to make further update process easier.
+ */
+public @interface KafkaClientInternalsDependant {
+}
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
index 526e01f6ff1..f3b1988f5ca 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
@@ -12,6 +12,7 @@
import com.provectus.kafka.ui.model.TopicMessage;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
+import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import java.time.Duration;
import java.util.Map;
@@ -21,8 +22,6 @@
import lombok.SneakyThrows;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.common.TopicPartition;
-import org.junit.Assert;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
@@ -81,6 +80,33 @@ public class SendAndReadTests extends AbstractBaseTest {
private static final String PROTOBUF_SCHEMA_JSON_RECORD
= "{ \"f1\" : \"test str\", \"f2\" : 123 }";
+
+ private static final JsonSchema JSON_SCHEMA = new JsonSchema(
+ "{ "
+ + " \"$schema\": \"http://json-schema.org/draft-07/schema#\", "
+ + " \"$id\": \"http://example.com/myURI.schema.json\", "
+ + " \"title\": \"TestRecord\","
+ + " \"type\": \"object\","
+ + " \"additionalProperties\": false,"
+ + " \"properties\": {"
+ + " \"f1\": {"
+ + " \"type\": \"integer\""
+ + " },"
+ + " \"f2\": {"
+ + " \"type\": \"string\""
+ + " },"
+ // it is important special case since there is code in KafkaJsonSchemaSerializer
+ // that checks fields with this name (it should be worked around)
+ + " \"schema\": {"
+ + " \"type\": \"string\""
+ + " }"
+ + " }"
+ + "}"
+ );
+
+ private static final String JSON_SCHEMA_RECORD
+ = "{ \"f1\": 12, \"f2\": \"testJsonSchema1\", \"schema\": \"some txt\" }";
+
@Autowired
private ClusterService clusterService;
@@ -236,15 +262,14 @@ void keyWithAvroSchemaValueWithAvroSchemaKeyIsNull() {
@Test
void valueWithAvroSchemaShouldThrowExceptionArgIsNotValidJsonObject() {
- assertThatThrownBy(() -> {
- new SendAndReadSpec()
- .withValueSchema(AVRO_SCHEMA_2)
- .withMsgToSend(
- new CreateTopicMessage()
- .content("not a json object")
- )
- .doAssert(polled -> Assertions.fail());
- }).hasMessageContaining("Failed to serialize record");
+ new SendAndReadSpec()
+ .withValueSchema(AVRO_SCHEMA_2)
+ .withMsgToSend(
+ new CreateTopicMessage()
+ // f2 has type object instead of string
+ .content("{ \"f1\": 111, \"f2\": {} }")
+ )
+ .assertSendThrowsException();
}
@Test
@@ -281,15 +306,56 @@ void keyWithAvroSchemaValueWithProtoSchema() {
@Test
void valueWithProtoSchemaShouldThrowExceptionArgIsNotValidJsonObject() {
- assertThatThrownBy(() -> {
- new SendAndReadSpec()
- .withValueSchema(PROTOBUF_SCHEMA)
- .withMsgToSend(
- new CreateTopicMessage()
- .content("not a json object")
- )
- .doAssert(polled -> Assertions.fail());
- }).hasMessageContaining("Failed to serialize record");
+ new SendAndReadSpec()
+ .withValueSchema(PROTOBUF_SCHEMA)
+ .withMsgToSend(
+ new CreateTopicMessage()
+ // f2 field has type object instead of int
+ .content("{ \"f1\" : \"test str\", \"f2\" : {} }"))
+ .assertSendThrowsException();
+ }
+
+ @Test
+ void keyWithProtoSchemaValueWithJsonSchema() {
+ new SendAndReadSpec()
+ .withKeySchema(PROTOBUF_SCHEMA)
+ .withValueSchema(JSON_SCHEMA)
+ .withMsgToSend(
+ new CreateTopicMessage()
+ .key(PROTOBUF_SCHEMA_JSON_RECORD)
+ .content(JSON_SCHEMA_RECORD)
+ )
+ .doAssert(polled -> {
+ assertJsonEqual(polled.getKey(), PROTOBUF_SCHEMA_JSON_RECORD);
+ assertJsonEqual(polled.getContent(), JSON_SCHEMA_RECORD);
+ });
+ }
+
+ @Test
+ void keyWithJsonValueWithJsonSchemaKeyValueIsNull() {
+ new SendAndReadSpec()
+ .withKeySchema(JSON_SCHEMA)
+ .withValueSchema(JSON_SCHEMA)
+ .withMsgToSend(
+ new CreateTopicMessage()
+ .key(JSON_SCHEMA_RECORD)
+ )
+ .doAssert(polled -> {
+ assertJsonEqual(polled.getKey(), JSON_SCHEMA_RECORD);
+ assertThat(polled.getContent()).isNull();
+ });
+ }
+
+ @Test
+ void valueWithJsonSchemaThrowsExceptionIfArgIsNotValidJsonObject() {
+ new SendAndReadSpec()
+ .withValueSchema(JSON_SCHEMA)
+ .withMsgToSend(
+ new CreateTopicMessage()
+ // 'f2' field has has type object instead of string
+ .content("{ \"f1\": 12, \"f2\": {}, \"schema\": \"some txt\" }")
+ )
+ .assertSendThrowsException();
}
@@ -320,7 +386,7 @@ public SendAndReadSpec withValueSchema(ParsedSchema valueSchema) {
}
@SneakyThrows
- public void doAssert(Consumer<TopicMessage> msgAssert) {
+ private String createTopicAndCreateSchemas() {
Objects.requireNonNull(msgToSend);
String topic = UUID.randomUUID().toString();
createTopic(new NewTopic(topic, 1, (short) 1));
@@ -330,9 +396,23 @@ public void doAssert(Consumer<TopicMessage> msgAssert) {
if (valueSchema != null) {
schemaRegistry.schemaRegistryClient().register(topic + "-value", valueSchema);
}
-
// need to update to see new topic & schemas
clustersMetricsScheduler.updateMetrics();
+ return topic;
+ }
+
+ public void assertSendThrowsException() {
+ String topic = createTopicAndCreateSchemas();
+ try {
+ assertThatThrownBy(() -> clusterService.sendMessage(LOCAL, topic, msgToSend).block());
+ } finally {
+ deleteTopic(topic);
+ }
+ }
+
+ @SneakyThrows
+ public void doAssert(Consumer<TopicMessage> msgAssert) {
+ String topic = createTopicAndCreateSchemas();
try {
clusterService.sendMessage(LOCAL, topic, msgToSend).block();
TopicMessage polled = clusterService.getMessages(
| test | train | 2021-07-09T14:55:43 | "2021-07-07T13:27:40Z" | germanosin | train |
provectus/kafka-ui/454_692 | provectus/kafka-ui | provectus/kafka-ui/454 | provectus/kafka-ui/692 | [
"connected"
] | febc495ea7df3985568e7afc85a15e2f5d221bf5 | 32a0ece0a3ae4f7c49017c67dcfa9024d93a3baf | [
"Hey, @andormarkus, looks pretty interesting. We are using MSK rather often in our projects."
] | [
"I suggest it would be useful to either update`kafka-ui-sasl.yaml` with this example or create a new example file?",
"@Haarolean, thx for reviewing, this will not work in docker compose, this is specific config for aws iam configuration."
] | "2021-07-20T07:01:26Z" | [
"scope/backend"
] | Would you consider support for MSK & IAM integration ? | Hello
AWS announced support for a new SASL mechanism called AWS_MSK_IAM which enables to handle both authentication and authorisation with AWS IAM. Is there a chance to add support for that mechanism to kafka-ui?
[source](https://aws.amazon.com/blogs/big-data/securing-apache-kafka-is-easy-and-familiar-with-iam-access-control-for-amazon-msk/):
```
If you’re using the Java clients (such as Java Producer, Consumer, Kafka Streams, or Kafka Connect),
you’re in luck—all you need to do is to reference the Java library using Maven:
<dependency>
<groupId>software.amazon.msk</groupId>
<artifactId>aws-msk-iam-auth</artifactId>
<version>1.0.0</version>
</dependency>
```
Thanks,
Andor | [
"README.md",
"kafka-ui-api/pom.xml"
] | [
"README.md",
"guides/AWS_IAM.md",
"kafka-ui-api/pom.xml"
] | [] | diff --git a/README.md b/README.md
index 1e39a9fb3bd..6feb09572ae 100644
--- a/README.md
+++ b/README.md
@@ -109,6 +109,7 @@ To read more please follow to [chart documentation](charts/kafka-ui/README.md)
# Guides
- [SSO configuration](guides/SSO.md)
+- [AWS IAM configuration](guides/AWS_IAM.md)
## Connecting to a Secure Broker
diff --git a/guides/AWS_IAM.md b/guides/AWS_IAM.md
new file mode 100644
index 00000000000..80bfab205bc
--- /dev/null
+++ b/guides/AWS_IAM.md
@@ -0,0 +1,41 @@
+# How to configure AWS IAM Authentication
+
+UI for Apache Kafka comes with built-in [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth) library.
+
+You could pass sasl configs in properties section for each cluster.
+
+More details could be found here: [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth)
+
+## Examples:
+
+Please replace
+* <KAFKA_URL> with broker list
+* <PROFILE_NAME> with your aws profile
+
+
+### Running From Docker Image
+
+```sh
+docker run -p 8080:8080 \
+ -e KAFKA_CLUSTERS_0_NAME=local \
+ -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL> \
+ -e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \
+ -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=AWS_MSK_IAM \
+ -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_CLIENT_CALLBACK_HANDLER_CLASS=software.amazon.msk.auth.iam.IAMClientCallbackHandler \
+ -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>"; \
+ -d provectuslabs/kafka-ui:latest
+```
+
+### Configuring by application.yaml
+
+```yaml
+kafka:
+ clusters:
+ - name: local
+ bootstrapServers: <KAFKA_URL>
+ properties:
+ security.protocol: SASL_SSL
+ sasl.mechanism: AWS_MSK_IAM
+ sasl.client.callback.handler.class: software.amazon.msk.auth.iam.IAMClientCallbackHandler
+ sasl.jaas.config: software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>";
+```
\ No newline at end of file
diff --git a/kafka-ui-api/pom.xml b/kafka-ui-api/pom.xml
index 41ca96f7446..ea7a888173d 100644
--- a/kafka-ui-api/pom.xml
+++ b/kafka-ui-api/pom.xml
@@ -97,6 +97,12 @@
<version>${confluent.version}</version>
</dependency>
+ <dependency>
+ <groupId>software.amazon.msk</groupId>
+ <artifactId>aws-msk-iam-auth</artifactId>
+ <version>1.1.0</version>
+ </dependency>
+
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
| null | train | train | 2021-07-19T15:20:05 | "2021-05-13T20:40:39Z" | andormarkus | train |
provectus/kafka-ui/331_693 | provectus/kafka-ui | provectus/kafka-ui/331 | provectus/kafka-ui/693 | [
"connected"
] | 443ed8bc8ca5e7b54f8c29396f8e3efed63041cb | b296108e5bb8ca7c9d57bb92c7a0bbb10d512dc2 | [
"1. Unfortunately this is the http code from the schema registry itself. It's 500 instead of 409 if you send an invalid request, like this:\r\n`{\"subject\":\"test\",\"schema\":\"test\",\"schemaType\":\"AVRO\"}`\r\ninstead of this:\r\n`{\"subject\":\"test\",\"schemaType\":\"AVRO\",\"schema\":\"{\\\"type\\\": \\\"string\\\"}\"}`\r\nIt produces 409 if the request body is correct.\r\n\r\n2. Seems like it's impossible since schema registry does not forward a list of such fields, but we'll forward an error message from schema registry instead.\r\n```\r\n{\r\n \"code\": 4006,\r\n \"message\": \"Either the input schema or one its references is invalid\", // <-- this has changed\r\n \"timestamp\": 1626784717378,\r\n \"requestId\": \"73e2c259\",\r\n \"fieldsErrors\": null\r\n}\r\n```\r\n\r\n3. Currently this request returns code 4001:\r\n```\r\n{\r\n \"code\": 4001,\r\n \"message\": \"Topic 'testXXX' already exists.\",\r\n}\r\n```\r\nI'm not quite sure if this is still the correct error code. There's a code 4005 (DUPLICATED_ENTITY) as well which might suit this purpose better. What do you folks think? @IldarAlmakaev @workshur "
] | [] | "2021-07-20T12:54:19Z" | [
"type/enhancement",
"scope/backend"
] | Backend returns incorrect http statuses | Some Create/Update requests returns unpredictable responses:
1. When I try to create schema dup
```
curl 'http://localhost:3000/api/clusters/secondLocal/schemas' \
-H 'Connection: keep-alive' \
-H 'sec-ch-ua: "Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"' \
-H 'DNT: 1' \
-H 'sec-ch-ua-mobile: ?0' \
-H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36' \
-H 'Content-Type: application/json' \
-H 'Accept: */*' \
-H 'Origin: http://localhost:3000' \
-H 'Sec-Fetch-Site: same-origin' \
-H 'Sec-Fetch-Mode: cors' \
-H 'Sec-Fetch-Dest: empty' \
-H 'Referer: http://localhost:3000/ui/clusters/secondLocal/schemas/new' \
-H 'Accept-Language: en-US,en;q=0.9,ru;q=0.8' \
--data-raw '{"subject":"my.new","schema":"my.new","schemaType":"AVRO"}' \
--compressed
```
it returns HTTP/1.1 500 Internal Server Error with:
```
{"code":5000,"message":"500 Internal Server Error from POST http://schemaregistry0:8085/subjects/my.new","timestamp":1617110665799,"requestId":"adb03f44","fieldsErrors":null}
```
instead of 409
2. Schema Update should return list of invalid fields in 422 response
3. Topic create dup request returns correct message with incorrect code
```
{"code":5000,"message":"Topic '123' already exists.","timestamp":1617110943045,"requestId":"71474ae8","fieldsErrors":null}
```
Please update All existing api endpoints to return correct responses in format of ErrorResponse | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/ErrorResponse.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/ErrorResponse.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/ErrorResponse.java
new file mode 100644
index 00000000000..a7c86b01f19
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/ErrorResponse.java
@@ -0,0 +1,14 @@
+package com.provectus.kafka.ui.model.schemaregistry;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import lombok.Data;
+
+@Data
+public class ErrorResponse {
+
+ @JsonProperty("error_code")
+ private int errorCode;
+
+ private String message;
+
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
index 3f05ddb186f..41374120b81 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
@@ -14,6 +14,7 @@
import com.provectus.kafka.ui.model.NewSchemaSubject;
import com.provectus.kafka.ui.model.SchemaSubject;
import com.provectus.kafka.ui.model.SchemaType;
+import com.provectus.kafka.ui.model.schemaregistry.ErrorResponse;
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
import com.provectus.kafka.ui.model.schemaregistry.InternalNewSchema;
@@ -195,7 +196,8 @@ private Mono<SubjectIdResponse> submitNewSchema(String subject,
.body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
.retrieve()
.onStatus(UNPROCESSABLE_ENTITY::equals,
- r -> Mono.error(new UnprocessableEntityException("Invalid params")))
+ r -> r.bodyToMono(ErrorResponse.class)
+ .flatMap(x -> Mono.error(new UnprocessableEntityException(x.getMessage()))))
.bodyToMono(SubjectIdResponse.class);
}
@@ -210,7 +212,8 @@ private Mono<SchemaSubject> checkSchemaOnDuplicate(String subject,
.retrieve()
.onStatus(NOT_FOUND::equals, res -> Mono.empty())
.onStatus(UNPROCESSABLE_ENTITY::equals,
- r -> Mono.error(new UnprocessableEntityException("Invalid params")))
+ r -> r.bodyToMono(ErrorResponse.class)
+ .flatMap(x -> Mono.error(new UnprocessableEntityException(x.getMessage()))))
.bodyToMono(SchemaSubject.class)
.filter(s -> Objects.isNull(s.getId()))
.switchIfEmpty(Mono.error(new DuplicateEntityException("Such schema already exists")));
| null | train | train | 2021-07-20T17:00:46 | "2021-03-30T13:31:23Z" | workshur | train |
provectus/kafka-ui/359_695 | provectus/kafka-ui | provectus/kafka-ui/359 | provectus/kafka-ui/695 | [
"timestamp(timedelta=0.0, similarity=0.9565308323696249)",
"connected"
] | ae8aec900dce62c13057684ab8f6d65584690ad0 | 443ed8bc8ca5e7b54f8c29396f8e3efed63041cb | [] | [] | "2021-07-20T14:35:37Z" | [
"type/enhancement",
"scope/frontend"
] | Update favicon | List of original logos - http://svn.apache.org/repos/asf/kafka/site/logos/ | [
".github/workflows/pr-checks.yaml"
] | [
".github/workflows/pr-checks.yaml"
] | [] | diff --git a/.github/workflows/pr-checks.yaml b/.github/workflows/pr-checks.yaml
index 88944ca6f97..48b47c5196f 100644
--- a/.github/workflows/pr-checks.yaml
+++ b/.github/workflows/pr-checks.yaml
@@ -10,6 +10,6 @@ jobs:
- uses: kentaro-m/[email protected]
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- - uses: dekinderfiets/[email protected]
+ - uses: dekinderfiets/[email protected]
with:
repo-token: '${{ secrets.GITHUB_TOKEN }}'
| null | train | train | 2021-07-20T15:54:08 | "2021-04-07T11:34:41Z" | mstolbov | train |
provectus/kafka-ui/698_709 | provectus/kafka-ui | provectus/kafka-ui/698 | provectus/kafka-ui/709 | [
"timestamp(timedelta=0.0, similarity=0.9140049080554569)",
"connected"
] | 326786e4ff5a9ff47dd4902dd981ac9d2d6bcad9 | 3cec4a1d6f5b33f845315104c57b4b19ddf78c2a | [] | [] | "2021-07-23T12:39:01Z" | [
"scope/backend",
"scope/QA"
] | Add docker-compose with preconfigured source and sink connectors for testing | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
(Write your answer here.)
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
(Describe your proposed solution here.)
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
(Write your answer here.)
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.) | [] | [
"docker/connectors/sink-activities.json",
"docker/connectors/source-activities.json",
"docker/connectors/start.sh",
"docker/kafka-ui-connectors.yaml",
"docker/postgres/Dockerfile",
"docker/postgres/data.sql"
] | [] | diff --git a/docker/connectors/sink-activities.json b/docker/connectors/sink-activities.json
new file mode 100644
index 00000000000..fe1e6012148
--- /dev/null
+++ b/docker/connectors/sink-activities.json
@@ -0,0 +1,19 @@
+{
+ "name": "sink_postgres_activities",
+ "config": {
+ "connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
+ "connection.url": "jdbc:postgresql://postgres-db:5432/test",
+ "connection.user": "dev_user",
+ "connection.password": "12345",
+ "topics": "source-activities",
+ "table.name.format": "sink_activities",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "key.converter.schema.registry.url": "http://schemaregistry0:8085",
+ "value.converter": "io.confluent.connect.avro.AvroConverter",
+ "value.converter.schema.registry.url": "http://schemaregistry0:8085",
+ "auto.create": "true",
+ "pk.mode": "record_value",
+ "pk.fields": "id",
+ "insert.mode": "upsert"
+ }
+}
\ No newline at end of file
diff --git a/docker/connectors/source-activities.json b/docker/connectors/source-activities.json
new file mode 100644
index 00000000000..dc55dfea922
--- /dev/null
+++ b/docker/connectors/source-activities.json
@@ -0,0 +1,20 @@
+{
+ "name": "source_postgres_activities",
+ "config": {
+ "connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
+ "connection.url": "jdbc:postgresql://postgres-db:5432/test",
+ "connection.user": "dev_user",
+ "connection.password": "12345",
+ "topic.prefix": "source-",
+ "poll.interval.ms": 3600000,
+ "table.whitelist": "public.activities",
+ "mode": "bulk",
+ "transforms": "extractkey",
+ "transforms.extractkey.type": "org.apache.kafka.connect.transforms.ExtractField$Key",
+ "transforms.extractkey.field": "id",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "key.converter.schema.registry.url": "http://schemaregistry0:8085",
+ "value.converter": "io.confluent.connect.avro.AvroConverter",
+ "value.converter.schema.registry.url": "http://schemaregistry0:8085"
+ }
+}
\ No newline at end of file
diff --git a/docker/connectors/start.sh b/docker/connectors/start.sh
new file mode 100755
index 00000000000..7adc5e4e127
--- /dev/null
+++ b/docker/connectors/start.sh
@@ -0,0 +1,9 @@
+#! /bin/bash
+while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' kafka-connect0:8083)" != "200" ]]
+ do sleep 5
+done
+
+echo "\n --------------Creating connectors..."
+for filename in /connectors/*.json; do
+ curl -X POST -H "Content-Type: application/json" -d @$filename http://kafka-connect0:8083/connectors
+done
diff --git a/docker/kafka-ui-connectors.yaml b/docker/kafka-ui-connectors.yaml
new file mode 100644
index 00000000000..e99a01c6da4
--- /dev/null
+++ b/docker/kafka-ui-connectors.yaml
@@ -0,0 +1,173 @@
+---
+version: '2'
+services:
+
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:master
+ ports:
+ - 8080:8080
+ depends_on:
+ - zookeeper0
+ - zookeeper1
+ - kafka0
+ - kafka1
+ - schemaregistry0
+ - kafka-connect0
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
+ KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
+ KAFKA_CLUSTERS_0_JMXPORT: 9997
+ KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
+ KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
+ KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
+ KAFKA_CLUSTERS_1_NAME: secondLocal
+ KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
+ KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
+ KAFKA_CLUSTERS_1_JMXPORT: 9998
+ KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
+ KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
+ KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
+
+ zookeeper0:
+ image: confluentinc/cp-zookeeper:5.2.4
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_TICK_TIME: 2000
+ ports:
+ - 2181:2181
+
+ kafka0:
+ image: confluentinc/cp-kafka:5.2.4
+ depends_on:
+ - zookeeper0
+ ports:
+ - 9092:9092
+ - 9997:9997
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ JMX_PORT: 9997
+ KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+
+ zookeeper1:
+ image: confluentinc/cp-zookeeper:5.2.4
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_TICK_TIME: 2000
+
+ kafka1:
+ image: confluentinc/cp-kafka:5.2.4
+ depends_on:
+ - zookeeper1
+ ports:
+ - 9093:9093
+ - 9998:9998
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ JMX_PORT: 9998
+ KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
+
+ schemaregistry0:
+ image: confluentinc/cp-schema-registry:5.2.4
+ ports:
+ - 8085:8085
+ depends_on:
+ - zookeeper0
+ - kafka0
+ environment:
+ SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
+ SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
+ SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
+ SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
+ SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
+
+ SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
+ SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
+ SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
+
+ schemaregistry1:
+ image: confluentinc/cp-schema-registry:5.5.0
+ ports:
+ - 18085:8085
+ depends_on:
+ - zookeeper1
+ - kafka1
+ environment:
+ SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
+ SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
+ SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
+ SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
+ SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
+
+ SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
+ SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
+ SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
+
+ kafka-connect0:
+ image: confluentinc/cp-kafka-connect:6.0.1
+ ports:
+ - 8083:8083
+ depends_on:
+ - kafka0
+ - schemaregistry0
+ environment:
+ CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
+ CONNECT_GROUP_ID: compose-connect-group
+ CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
+ CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
+ CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
+ CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
+ CONNECT_STATUS_STORAGE_TOPIC: _connect_status
+ CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
+ CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
+ CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
+ CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
+ CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
+ CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
+ CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
+ CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
+ CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
+
+ kafka-init-topics:
+ image: confluentinc/cp-kafka:5.2.4
+ volumes:
+ - ./message.json:/data/message.json
+ depends_on:
+ - kafka1
+ command: "bash -c 'echo Waiting for Kafka to be ready... && \
+ cub kafka-ready -b kafka1:29092 1 30 && \
+ kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
+ kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
+ kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
+ kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
+
+ postgres-db:
+ build:
+ context: ./postgres
+ args:
+ image: postgres:9.6.22
+ ports:
+ - 5432:5432
+ environment:
+ POSTGRES_USER: 'dev_user'
+ POSTGRES_PASSWORD: '12345'
+
+ create-connectors:
+ image: tutum/curl
+ depends_on:
+ - postgres-db
+ - kafka-connect0
+ volumes:
+ - ./connectors:/connectors
+ command: bash -c '/connectors/start.sh'
\ No newline at end of file
diff --git a/docker/postgres/Dockerfile b/docker/postgres/Dockerfile
new file mode 100644
index 00000000000..12db53e1eb6
--- /dev/null
+++ b/docker/postgres/Dockerfile
@@ -0,0 +1,9 @@
+ARG image
+
+FROM ${image}
+
+MAINTAINER Provectus Team
+
+ADD data.sql /docker-entrypoint-initdb.d
+
+EXPOSE 5432
\ No newline at end of file
diff --git a/docker/postgres/data.sql b/docker/postgres/data.sql
new file mode 100644
index 00000000000..0e1ffad5baa
--- /dev/null
+++ b/docker/postgres/data.sql
@@ -0,0 +1,24 @@
+CREATE DATABASE test WITH OWNER = dev_user;
+\connect test
+
+CREATE TABLE activities
+(
+ id INTEGER PRIMARY KEY,
+ msg varchar(24),
+ action varchar(128),
+ browser varchar(24),
+ device json,
+ createdAt timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
+);
+
+insert into activities(id, action, msg, browser, device)
+values (1, 'LOGIN', 'Success', 'Chrome', '{
+ "name": "Chrome",
+ "major": "67",
+ "version": "67.0.3396.99"
+}'),
+ (2, 'LOGIN', 'Failed', 'Apple WebKit', '{
+ "name": "WebKit",
+ "major": "605",
+ "version": "605.1.15"
+ }');
\ No newline at end of file
| null | val | train | 2021-07-21T14:55:30 | "2021-07-21T10:03:30Z" | IldarAlmakaev | train |
provectus/kafka-ui/634_718 | provectus/kafka-ui | provectus/kafka-ui/634 | provectus/kafka-ui/718 | [
"connected"
] | 8da350ea38531c2259b45c94e3645bec1d008a62 | 38e1d12452df6e72411a9c3a48ea909980e34b7e | [
"Waiting frontend",
"Looks like @GneyHabub added cleanup policy to topic description, but missed topic list part. FYI: @workshur "
] | [] | "2021-07-25T12:00:50Z" | [
"type/enhancement",
"good first issue",
"scope/backend",
"scope/frontend"
] | Provide cleanup policy in topic list | ### Describe the solution you'd like
Backend should expose cleanup policy in topic list request:
delete (with retention), compact, compact with delete (with retention)
Frontend should display this info in topic list page
| [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
index 34f45dbea92..93c7f55baac 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
@@ -30,6 +30,7 @@ const Overview: React.FC<Props> = ({
segmentCount,
clusterName,
topicName,
+ cleanUpPolicy,
clearTopicMessages,
}) => {
const { isReadOnly } = React.useContext(ClusterContext);
@@ -59,6 +60,9 @@ const Overview: React.FC<Props> = ({
<BytesFormatted value={segmentSize} />
</Indicator>
<Indicator label="Segment count">{segmentCount}</Indicator>
+ <Indicator label="Clean Up Policy">
+ <span className="tag is-info">{cleanUpPolicy || 'Unknown'}</span>
+ </Indicator>
</MetricsWrapper>
<div className="box">
<table className="table is-striped is-fullwidth">
| null | val | train | 2021-07-24T17:00:29 | "2021-07-05T15:20:06Z" | germanosin | train |
provectus/kafka-ui/707_720 | provectus/kafka-ui | provectus/kafka-ui/707 | provectus/kafka-ui/720 | [
"timestamp(timedelta=62.0, similarity=0.9211861787581184)",
"connected"
] | 8da350ea38531c2259b45c94e3645bec1d008a62 | 29a3158df68662f9ed7795edbc775446831a977d | [
"@germanosin what is the purpose of the \"name\" and \"error\" response fields?",
"@tdavletov Name is the logdir name and you can. get error from LogDirInfo object, it could be null",
"@germanosin Are we sure we need both endpoints? I think we can use /api/clusters/{clusterName}/brokers/logdirs\r\n and group dirs by broker on UI side",
"I think we could have both endpoints, and let's check how we can do this on UI. I'm thinking to have per broker page in future",
"I would suggest to have one page with filtration by broker",
"> @tdavletov Name is the logdir name and you can. get error from LogDirInfo object, it could be null\r\n\r\n@germanosin I see, thanks. I read that kafka's \"documentation says, that partitions will be distributed among the directories round-robin style\". So for multiple directories we will see same topic in different parts of response json. I'm going to implement it that way but I don't know how it will look on UI so if you have any concerns about it, let's discuss.",
"> I would suggest to have one page with filtration by broker\r\n\r\n@workshur, makes sense ",
"Updated spec (one endpoint):\r\n- /api/clusters/{clusterName}/brokers/logdirs\r\n\r\nOR\r\n- /api/clusters/{clusterName}/brokers/logdirs?broker=0&broker=1\r\n\r\nResponse:\r\n```\r\n[\r\n {\r\n \"name\": \"\",\r\n \"error\": \"\",\r\n \"topics\": {\r\n \"topicName\": {\r\n \"partitions\": [\r\n {\r\n \"broker\": 0,\r\n \"paritititon\": 0,\r\n \"size\": 11212121,\r\n \"offsetLag\": 0\r\n }\r\n ]\r\n } \r\n }\r\n }\r\n]\r\n```"
] | [
"this one is dirty, you should wrap future into mono, please check util classes",
"@germanosin I don't see a better way. Mono has methods for CompletableFuture but not for Future (KafkaFuture in this case). If you have a better solution please advise. One thing I can do is move this code to util class and make this particular place cleaner.",
"Sure, please use this method https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java#L57",
"Thank you. Fixed."
] | "2021-07-26T14:37:17Z" | [
"type/enhancement",
"scope/backend"
] | Expose broker log dirs | Add endpoints:
* /api/clusters/{clusterName}/brokers/{id}/logdirs
expected outcome:
```json
[
{
"name": "",
"error": "",
"topics": {
"topicName": {
"partitions": [
{
"paritititon": 0,
"size": 11212121,
"offsetLag": 0
}
]
}
}
}
]
```
* /api/clusters/{clusterName}/brokers/logdirs
```json
[
{
"name": "",
"error": "",
"topics": {
"topicName": {
"partitions": [
{
"broker": 0,
"paritititon": 0,
"size": 11212121,
"offsetLag": 0
}
]
}
}
}
]
```
Use AdminClient describeLogDirs to get this info | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/DescribeLogDirsMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
index 18b5291d75d..0678e414c3d 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
@@ -3,7 +3,9 @@
import com.provectus.kafka.ui.api.BrokersApi;
import com.provectus.kafka.ui.model.Broker;
import com.provectus.kafka.ui.model.BrokerMetrics;
+import com.provectus.kafka.ui.model.BrokersLogdirs;
import com.provectus.kafka.ui.service.ClusterService;
+import java.util.List;
import lombok.RequiredArgsConstructor;
import lombok.extern.log4j.Log4j2;
import org.springframework.http.ResponseEntity;
@@ -31,4 +33,12 @@ public Mono<ResponseEntity<Flux<Broker>>> getBrokers(String clusterName,
ServerWebExchange exchange) {
return Mono.just(ResponseEntity.ok(clusterService.getBrokers(clusterName)));
}
+
+ @Override
+ public Mono<ResponseEntity<Flux<BrokersLogdirs>>> getAllBrokersLogdirs(String clusterName,
+ List<Integer> brokers,
+ ServerWebExchange exchange
+ ) {
+ return Mono.just(ResponseEntity.ok(clusterService.getAllBrokersLogdirs(clusterName, brokers)));
+ }
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/DescribeLogDirsMapper.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/DescribeLogDirsMapper.java
new file mode 100644
index 00000000000..2718fdab7c1
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/DescribeLogDirsMapper.java
@@ -0,0 +1,65 @@
+package com.provectus.kafka.ui.mapper;
+
+import com.provectus.kafka.ui.model.BrokerTopicLogdirs;
+import com.provectus.kafka.ui.model.BrokerTopicPartitionLogdir;
+import com.provectus.kafka.ui.model.BrokersLogdirs;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.requests.DescribeLogDirsResponse;
+import org.springframework.stereotype.Component;
+
+@Component
+public class DescribeLogDirsMapper {
+
+ public List<BrokersLogdirs> toBrokerLogDirsList(
+ Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> logDirsInfo) {
+
+ return logDirsInfo.entrySet().stream().map(
+ mapEntry -> mapEntry.getValue().entrySet().stream()
+ .map(e -> toBrokerLogDirs(mapEntry.getKey(), e.getKey(), e.getValue()))
+ .collect(Collectors.toList())
+ ).flatMap(Collection::stream).collect(Collectors.toList());
+ }
+
+ private BrokersLogdirs toBrokerLogDirs(Integer broker, String dirName,
+ DescribeLogDirsResponse.LogDirInfo logDirInfo) {
+ BrokersLogdirs result = new BrokersLogdirs();
+ result.setName(dirName);
+ if (logDirInfo.error != null) {
+ result.setError(logDirInfo.error.message());
+ }
+ var topics = logDirInfo.replicaInfos.entrySet().stream()
+ .collect(Collectors.groupingBy(e -> e.getKey().topic())).entrySet().stream()
+ .map(e -> toTopicLogDirs(broker, e.getKey(), e.getValue()))
+ .collect(Collectors.toList());
+ result.setTopics(topics);
+ return result;
+ }
+
+ private BrokerTopicLogdirs toTopicLogDirs(Integer broker, String name,
+ List<Map.Entry<TopicPartition,
+ DescribeLogDirsResponse.ReplicaInfo>> partitions) {
+ BrokerTopicLogdirs topic = new BrokerTopicLogdirs();
+ topic.setName(name);
+ topic.setPartitions(
+ partitions.stream().map(
+ e -> topicPartitionLogDir(
+ broker, e.getKey().partition(), e.getValue())).collect(Collectors.toList())
+ );
+ return topic;
+ }
+
+ private BrokerTopicPartitionLogdir topicPartitionLogDir(Integer broker, Integer partition,
+ DescribeLogDirsResponse.ReplicaInfo
+ replicaInfo) {
+ BrokerTopicPartitionLogdir logDir = new BrokerTopicPartitionLogdir();
+ logDir.setBroker(broker);
+ logDir.setPartition(partition);
+ logDir.setSize(replicaInfo.size);
+ logDir.setOffsetLag(replicaInfo.offsetLag);
+ return logDir;
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
index 57faf2d5e45..84b13767b59 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
@@ -6,8 +6,10 @@
import com.provectus.kafka.ui.exception.TopicNotFoundException;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
+import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper;
import com.provectus.kafka.ui.model.Broker;
import com.provectus.kafka.ui.model.BrokerMetrics;
+import com.provectus.kafka.ui.model.BrokersLogdirs;
import com.provectus.kafka.ui.model.Cluster;
import com.provectus.kafka.ui.model.ClusterMetrics;
import com.provectus.kafka.ui.model.ClusterStats;
@@ -62,6 +64,7 @@ public class ClusterService {
private final KafkaService kafkaService;
private final ConsumingService consumingService;
private final DeserializationService deserializationService;
+ private final DescribeLogDirsMapper describeLogDirsMapper;
public List<Cluster> getClusters() {
return clustersStorage.getKafkaClusters()
@@ -361,4 +364,11 @@ public Mono<ReplicationFactorChangeResponse> changeReplicationFactor(
.orElse(Mono.error(new ClusterNotFoundException(
String.format("No cluster for name '%s'", clusterName))));
}
+
+ public Flux<BrokersLogdirs> getAllBrokersLogdirs(String clusterName, List<Integer> brokers) {
+ return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
+ .flatMap(c -> kafkaService.getClusterLogDirs(c, brokers))
+ .map(describeLogDirsMapper::toBrokerLogDirsList)
+ .flatMapMany(Flux::fromIterable);
+ }
}
\ No newline at end of file
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index e914a16a1ef..ed7cd4e246b 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -41,6 +41,7 @@
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import lombok.RequiredArgsConstructor;
@@ -53,6 +54,7 @@
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.clients.admin.ConsumerGroupListing;
+import org.apache.kafka.clients.admin.DescribeLogDirsResult;
import org.apache.kafka.clients.admin.ListTopicsOptions;
import org.apache.kafka.clients.admin.NewPartitionReassignment;
import org.apache.kafka.clients.admin.NewPartitions;
@@ -68,6 +70,8 @@
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.ConfigResource;
+import org.apache.kafka.common.errors.TimeoutException;
+import org.apache.kafka.common.requests.DescribeLogDirsResponse;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.BytesDeserializer;
import org.apache.kafka.common.utils.Bytes;
@@ -786,6 +790,23 @@ public Mono<InternalTopic> changeReplicationFactor(
});
}
+ public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
+ KafkaCluster cluster, List<Integer> reqBrokers) {
+ return getOrCreateAdminClient(cluster)
+ .map(admin -> {
+ List<Integer> brokers = new ArrayList<>(cluster.getBrokers());
+ if (reqBrokers != null && !reqBrokers.isEmpty()) {
+ brokers.retainAll(reqBrokers);
+ }
+ return admin.getAdminClient().describeLogDirs(brokers);
+ })
+ .flatMap(result -> ClusterUtil.toMono(result.all()))
+ .onErrorResume(TimeoutException.class, (TimeoutException e) -> {
+ log.error("Error during fetching log dirs", e);
+ return Mono.just(new HashMap<>());
+ });
+ }
+
private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
KafkaCluster cluster,
String topicName,
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index 6643282ffc6..9eb892fa5a7 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -142,6 +142,37 @@ paths:
schema:
$ref: '#/components/schemas/BrokerMetrics'
+ /api/clusters/{clusterName}/brokers/logdirs:
+ get:
+ tags:
+ - Brokers
+ summary: getAllBrokersLogdirs
+ operationId: getAllBrokersLogdirs
+ parameters:
+ - name: clusterName
+ in: path
+ required: true
+ schema:
+ type: string
+ - name: broker
+ in: query
+ description: array of broker ids
+ required: false
+ schema:
+ type: array
+ items:
+ type: integer
+ responses:
+ 200:
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: '#/components/schemas/BrokersLogdirs'
+
+
/api/clusters/{clusterName}/topics:
get:
tags:
@@ -1446,6 +1477,30 @@ components:
items:
$ref: '#/components/schemas/Metric'
+ BrokerLogdirs:
+ type: object
+ properties:
+ name:
+ type: string
+ error:
+ type: string
+ topics:
+ type: array
+ items:
+ $ref: '#/components/schemas/TopicLogdirs'
+
+ BrokersLogdirs:
+ type: object
+ properties:
+ name:
+ type: string
+ error:
+ type: string
+ topics:
+ type: array
+ items:
+ $ref: '#/components/schemas/BrokerTopicLogdirs'
+
TopicsResponse:
type: object
properties:
@@ -1786,6 +1841,46 @@ components:
additionalProperties:
type: number
+ TopicLogdirs:
+ type: object
+ properties:
+ name:
+ type: string
+ partitions:
+ type: array
+ items:
+ $ref: '#/components/schemas/TopicPartitionLogdir'
+
+ BrokerTopicLogdirs:
+ type: object
+ properties:
+ name:
+ type: string
+ partitions:
+ type: array
+ items:
+ $ref: '#/components/schemas/BrokerTopicPartitionLogdir'
+
+ TopicPartitionLogdir:
+ type: object
+ properties:
+ partition:
+ type: integer
+ size:
+ type: integer
+ format: int64
+ offsetLag:
+ type: integer
+ format: int64
+
+ BrokerTopicPartitionLogdir:
+ allOf:
+ - $ref: '#/components/schemas/TopicPartitionLogdir'
+ - type: object
+ properties:
+ broker:
+ type: integer
+
SchemaSubject:
type: object
properties:
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java
new file mode 100644
index 00000000000..79bcce8d822
--- /dev/null
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java
@@ -0,0 +1,85 @@
+package com.provectus.kafka.ui.service;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.AbstractBaseTest;
+import com.provectus.kafka.ui.model.BrokerTopicLogdirs;
+import com.provectus.kafka.ui.model.BrokersLogdirs;
+import java.util.List;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient;
+import org.springframework.core.ParameterizedTypeReference;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.web.reactive.server.WebTestClient;
+
+@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
+@AutoConfigureWebTestClient(timeout = "60000")
+public class LogDirsTest extends AbstractBaseTest {
+
+ @Autowired
+ private WebTestClient webTestClient;
+
+ @Test
+ public void testAllBrokers() {
+ List<BrokersLogdirs> dirs = webTestClient.get()
+ .uri("/api/clusters/{clusterName}/brokers/logdirs", LOCAL)
+ .exchange()
+ .expectStatus().isOk()
+ .expectBody(new ParameterizedTypeReference<List<BrokersLogdirs>>() {})
+ .returnResult()
+ .getResponseBody();
+
+ assertThat(dirs).hasSize(1);
+ BrokersLogdirs dir = dirs.get(0);
+ assertThat(dir.getName()).isEqualTo("/var/lib/kafka/data");
+ assertThat(dir.getTopics().stream().anyMatch(t -> t.getName().equals("__consumer_offsets")))
+ .isTrue();
+
+ BrokerTopicLogdirs topic = dir.getTopics().stream()
+ .filter(t -> t.getName().equals("__consumer_offsets"))
+ .findAny().get();
+
+ assertThat(topic.getPartitions()).hasSize(1);
+ assertThat(topic.getPartitions().get(0).getBroker()).isEqualTo(1);
+ assertThat(topic.getPartitions().get(0).getSize()).isPositive();
+ }
+
+ @Test
+ public void testOneBrokers() {
+ List<BrokersLogdirs> dirs = webTestClient.get()
+ .uri("/api/clusters/{clusterName}/brokers/logdirs?broker=1", LOCAL)
+ .exchange()
+ .expectStatus().isOk()
+ .expectBody(new ParameterizedTypeReference<List<BrokersLogdirs>>() {})
+ .returnResult()
+ .getResponseBody();
+
+ assertThat(dirs).hasSize(1);
+ BrokersLogdirs dir = dirs.get(0);
+ assertThat(dir.getName()).isEqualTo("/var/lib/kafka/data");
+ assertThat(dir.getTopics().stream().anyMatch(t -> t.getName().equals("__consumer_offsets")))
+ .isTrue();
+
+ BrokerTopicLogdirs topic = dir.getTopics().stream()
+ .filter(t -> t.getName().equals("__consumer_offsets"))
+ .findAny().get();
+
+ assertThat(topic.getPartitions()).hasSize(1);
+ assertThat(topic.getPartitions().get(0).getBroker()).isEqualTo(1);
+ assertThat(topic.getPartitions().get(0).getSize()).isPositive();
+ }
+
+ @Test
+ public void testWrongBrokers() {
+ List<BrokersLogdirs> dirs = webTestClient.get()
+ .uri("/api/clusters/{clusterName}/brokers/logdirs?broker=2", LOCAL)
+ .exchange()
+ .expectStatus().isOk()
+ .expectBody(new ParameterizedTypeReference<List<BrokersLogdirs>>() {})
+ .returnResult()
+ .getResponseBody();
+
+ assertThat(dirs).isEmpty();
+ }
+}
| train | train | 2021-07-24T17:00:29 | "2021-07-23T11:25:22Z" | germanosin | train |
provectus/kafka-ui/683_722 | provectus/kafka-ui | provectus/kafka-ui/683 | provectus/kafka-ui/722 | [
"connected",
"timestamp(timedelta=0.0, similarity=0.8896190383136755)"
] | 4c13555461cb5225ed976cf5d5e864d0f041840a | eed35de014489ef62698d828a74095e01ab1fc3f | [
"@anderssynstad thanks for creating this issue. we'll take it into next milestone."
] | [
"This might not work for all cases it's better to make Zookeeper TLS config on cluster level",
"@germanosin thank you. Done.",
"Could we use same params structure like: \r\n```\r\nKAFKA_CLUSTERS_0_ZOOKEEPER_PROPERTIES_CLIENTCNXNSOCKET\r\nKAFKA_CLUSTERS_0_ZOOKEEPER_PROPERTIES_SECURE\r\n...\r\n```",
"Done. And it works anyway, I dunno how it picks it up.\r\n\r\n```14:41:35.603 [kafka-admin-client-thread | adminclient-1] DEBUG com.provectus.kafka.ui.service.ZookeeperService - Start getting Zookeeper metrics for kafkaCluster: local```"
] | "2021-07-27T10:50:49Z" | [
"type/enhancement",
"scope/backend"
] | Connect to TLS-secured ZooKeeper cluster | It would be useful if one could connect to a ZooKeeper cluster using TLS.
For Kafka, one uses a config like this:
```
zookeeper.ssl.client.enable=true
zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.truststore.location=/path/truststore.jks
zookeeper.ssl.truststore.password=password
``` | [
"README.md",
"kafka-ui-api/pom.xml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java",
"pom.xml"
] | [
"README.md",
"docker/kafka-ui-zookeeper-ssl.yml",
"kafka-ui-api/pom.xml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ZooKeeperException.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java",
"pom.xml"
] | [] | diff --git a/README.md b/README.md
index 11f4568b27d..db4df22f28b 100644
--- a/README.md
+++ b/README.md
@@ -120,6 +120,8 @@ To be continued
# Configuration
+We have a plenty of docker-compose files as examples. Please check them out in ``docker`` directory.
+
## Configuration File
Example of how to configure clusters in the [application-local.yml](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/resources/application-local.yml) configuration file:
diff --git a/docker/kafka-ui-zookeeper-ssl.yml b/docker/kafka-ui-zookeeper-ssl.yml
new file mode 100644
index 00000000000..0ae5381b642
--- /dev/null
+++ b/docker/kafka-ui-zookeeper-ssl.yml
@@ -0,0 +1,145 @@
+---
+version: '2'
+services:
+
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:latest
+ ports:
+ - 8080:8080
+ - 5005:5005
+ volumes:
+ - /tmp/kafka/secrets/kafka.kafka1.keystore.jks:/etc/kafka/secrets/kafka.zookeeper.keystore.jks
+ - /tmp/kafka/secrets/kafka.zookeeper.truststore.jks:/etc/kafka/secrets/kafka.zookeeper.truststore.jks
+ depends_on:
+ - zookeeper0
+ - kafka0
+ - schemaregistry0
+ - kafka-connect0
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
+ KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2182
+ KAFKA_CLUSTERS_0_JMXPORT: 9997
+ KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
+ KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
+ KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
+ KAFKA_CLUSTERS_0_ZOOKEEPER_CLIENTCNXNSOCKET: org.apache.zookeeper.ClientCnxnSocketNetty
+ KAFKA_CLUSTERS_0_ZOOKEEPER_CLIENT_SECURE: 'true'
+ KAFKA_CLUSTERS_0_ZOOKEEPER_SSL_KEYSTORE_LOCATION: /etc/kafka/secrets/kafka.zookeeper.keystore.jks
+ KAFKA_CLUSTERS_0_ZOOKEEPER_SSL_KEYSTORE_PASSWORD: 12345678
+ KAFKA_CLUSTERS_0_ZOOKEEPER_SSL_TRUSTSTORE_LOCATION: /etc/kafka/secrets/kafka.zookeeper.truststore.jks
+ KAFKA_CLUSTERS_0_ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD: 12345678
+
+ zookeeper0:
+ image: confluentinc/cp-zookeeper:5.2.4
+ volumes:
+ - /tmp/kafka/secrets/kafka.kafka1.keystore.jks:/etc/kafka/secrets/kafka.zookeeper.keystore.jks
+ - /tmp/kafka/secrets/kafka.zookeeper.truststore.jks:/etc/kafka/secrets/kafka.zookeeper.truststore.jks
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2182
+ ZOOKEEPER_TICK_TIME: 2000
+
+ ZOOKEEPER_SECURE_CLIENT_PORT: 2182
+ ZOOKEEPER_SERVER_CNXN_FACTORY: org.apache.zookeeper.server.NettyServerCnxnFactory
+ ZOOKEEPER_SSL_KEYSTORE_LOCATION: /etc/kafka/secrets/kafka.zookeeper.keystore.jks
+ ZOOKEEPER_SSL_KEYSTORE_PASSWORD: 12345678
+ ZOOKEEPER_SSL_KEYSTORE_TYPE: PKCS12
+ ZOOKEEPER_SSL_TRUSTSTORE_LOCATION: /etc/kafka/secrets/kafka.zookeeper.truststore.jks
+ ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD: 12345678
+ ZOOKEEPER_SSL_TRUSTSTORE_TYPE: JKS
+ # TLS 1.2 is the tested-default - TLS 1.3 has not been tested for production
+ # You can evaluate TLS 1.3 for ZooKeeper by uncommenting the following two properties
+ # and setting KAFKA_ZOOKEEPER_SSL_PROTOCOL on brokers
+ ZOOKEEPER_SSL_ENABLED_PROTOCOLS: TLSv1.3,TLSv1.2
+ ZOOKEEPER_SSL_QUORUM_ENABLED_PROTOCOLS: TLSv1.3,TLSv1.2
+ ZOOKEEPER_SSL_CIPHER_SUITES: TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ ZOOKEEPER_SSL_CLIENT_AUTH: need
+ ZOOKEEPER_AUTH_PROVIDER_X509: org.apache.zookeeper.server.auth.X509AuthenticationProvider
+ ZOOKEEPER_AUTH_PROVIDER_SASL: org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+ ports:
+ - 2182:2182
+
+ kafka0:
+ image: confluentinc/cp-kafka:5.2.4
+ depends_on:
+ - zookeeper0
+ ports:
+ - 9092:9092
+ - 9997:9997
+ volumes:
+ - /tmp/kafka/secrets/kafka.kafka1.keystore.jks:/etc/kafka/secrets/kafka.kafka1.keystore.jks
+ - /tmp/kafka/secrets/kafka.server.truststore.jks:/etc/kafka/secrets/kafka.kafka1.truststore.jks
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2182
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ JMX_PORT: 9997
+ KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+ KAFKA_ZOOKEEPER_SSL_CLIENT_ENABLE: 'true'
+ KAFKA_ZOOKEEPER_SSL_CIPHER_SUITES: TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ KAFKA_ZOOKEEPER_CLIENT_CNXN_SOCKET: org.apache.zookeeper.ClientCnxnSocketNetty
+ KAFKA_ZOOKEEPER_SSL_KEYSTORE_LOCATION: /etc/kafka/secrets/kafka.kafka1.keystore.jks
+ KAFKA_ZOOKEEPER_SSL_KEYSTORE_PASSWORD: 12345678
+ KAFKA_ZOOKEEPER_SSL_KEYSTORE_TYPE: PKCS12
+ KAFKA_ZOOKEEPER_SSL_TRUSTSTORE_LOCATION: /etc/kafka/secrets/kafka.kafka1.truststore.jks
+ KAFKA_ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD: 12345678
+ KAFKA_ZOOKEEPER_SSL_TRUSTSTORE_TYPE: JKS
+
+ schemaregistry0:
+ image: confluentinc/cp-schema-registry:5.2.4
+ ports:
+ - 8085:8085
+ depends_on:
+ - zookeeper0
+ - kafka0
+ environment:
+ SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
+ SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2182
+ SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
+ SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
+ SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
+
+ SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
+ SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
+ SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
+
+ kafka-connect0:
+ image: confluentinc/cp-kafka-connect:5.2.4
+ ports:
+ - 8083:8083
+ depends_on:
+ - kafka0
+ - schemaregistry0
+ environment:
+ CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
+ CONNECT_GROUP_ID: compose-connect-group
+ CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
+ CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
+ CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
+ CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
+ CONNECT_STATUS_STORAGE_TOPIC: _connect_status
+ CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
+ CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
+ CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
+ CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
+ CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
+ CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
+ CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
+ CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
+ CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
+
+ kafka-init-topics:
+ image: confluentinc/cp-kafka:5.2.4
+ volumes:
+ - ./message.json:/data/message.json
+ depends_on:
+ - kafka0
+ command: "bash -c 'echo Waiting for Kafka to be ready... && \
+ cub kafka-ready -b kafka0:29092 1 30 && \
+ kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2182 && \
+ kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2182 && \
+ kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
diff --git a/kafka-ui-api/pom.xml b/kafka-ui-api/pom.xml
index ea7a888173d..11c07f77d53 100644
--- a/kafka-ui-api/pom.xml
+++ b/kafka-ui-api/pom.xml
@@ -62,9 +62,9 @@
<version>${kafka.version}</version>
</dependency>
<dependency>
- <groupId>com.101tec</groupId>
- <artifactId>zkclient</artifactId>
- <version>${zkclient.version}</version>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <version>${zookeper.version}</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ZooKeeperException.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ZooKeeperException.java
new file mode 100644
index 00000000000..761e8f1c1c0
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ZooKeeperException.java
@@ -0,0 +1,9 @@
+package com.provectus.kafka.ui.exception;
+
+public class ZooKeeperException extends RuntimeException {
+
+ public ZooKeeperException(Throwable cause) {
+ super(cause);
+ }
+
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index ed7cd4e246b..81c4df474ea 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -149,8 +149,9 @@ private KafkaCluster buildFromData(KafkaCluster currentCluster,
ServerStatus zookeeperStatus = ServerStatus.OFFLINE;
Throwable zookeeperException = null;
try {
- zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster) ? ServerStatus.ONLINE :
- ServerStatus.OFFLINE;
+ zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster)
+ ? ServerStatus.ONLINE
+ : ServerStatus.OFFLINE;
} catch (Throwable e) {
zookeeperException = e;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java
index 02123ac9576..31bd317e785 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java
@@ -1,11 +1,15 @@
package com.provectus.kafka.ui.service;
+import com.provectus.kafka.ui.exception.ZooKeeperException;
import com.provectus.kafka.ui.model.KafkaCluster;
+import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import lombok.RequiredArgsConstructor;
import lombok.extern.log4j.Log4j2;
-import org.I0Itec.zkclient.ZkClient;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooKeeper;
+import org.jetbrains.annotations.Nullable;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
@@ -14,7 +18,7 @@
@Log4j2
public class ZookeeperService {
- private final Map<String, ZkClient> cachedZkClient = new ConcurrentHashMap<>();
+ private final Map<String, ZooKeeper> cachedZkClient = new ConcurrentHashMap<>();
public boolean isZookeeperOnline(KafkaCluster kafkaCluster) {
var isConnected = false;
@@ -28,20 +32,41 @@ public boolean isZookeeperOnline(KafkaCluster kafkaCluster) {
return isConnected;
}
- private boolean isZkClientConnected(ZkClient zkClient) {
- zkClient.getChildren("/brokers/ids");
+ private boolean isZkClientConnected(ZooKeeper zkClient) {
+ try {
+ zkClient.getChildren("/brokers/ids", null);
+ } catch (KeeperException e) {
+ log.error("A zookeeper exception has occurred", e);
+ return false;
+ } catch (InterruptedException e) {
+ log.error("Interrupted: ", e);
+ Thread.currentThread().interrupt();
+ }
return true;
}
- private ZkClient getOrCreateZkClient(KafkaCluster cluster) {
+ @Nullable
+ private ZooKeeper getOrCreateZkClient(KafkaCluster cluster) {
+ final var clusterName = cluster.getName();
+ final var client = cachedZkClient.get(clusterName);
+ if (client != null && client.getState() != ZooKeeper.States.CONNECTED) {
+ cachedZkClient.remove(clusterName);
+ }
try {
- return cachedZkClient.computeIfAbsent(
- cluster.getName(),
- (n) -> new ZkClient(cluster.getZookeeper(), 1000)
- );
+ return cachedZkClient.computeIfAbsent(clusterName, n -> createClient(cluster));
} catch (Exception e) {
- log.error("Error while creating zookeeper client for cluster {}", cluster.getName());
+ log.error("Error while creating zookeeper client for cluster {}", clusterName);
return null;
}
}
+
+ private ZooKeeper createClient(KafkaCluster cluster) {
+ try {
+ return new ZooKeeper(cluster.getZookeeper(), 60 * 1000, watchedEvent -> {});
+ } catch (IOException e) {
+ log.error("Error while creating a zookeeper client for cluster [{}]",
+ cluster.getName());
+ throw new ZooKeeperException(e);
+ }
+ }
}
diff --git a/pom.xml b/pom.xml
index 0c74e664aae..d1ba567fa8f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1,68 +1,90 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
<packaging>pom</packaging>
<modules>
<module>kafka-ui-contract</module>
- <module>kafka-ui-api</module>
- <module>kafka-ui-e2e-checks</module>
- </modules>
+ <module>kafka-ui-api</module>
+ <module>kafka-ui-e2e-checks</module>
+ </modules>
- <properties>
- <maven.compiler.source>13</maven.compiler.source>
- <maven.compiler.target>13</maven.compiler.target>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <properties>
+ <maven.compiler.source>13</maven.compiler.source>
+ <maven.compiler.target>13</maven.compiler.target>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <spring-boot.version>2.2.4.RELEASE</spring-boot.version>
- <jackson-databind-nullable.version>0.2.1</jackson-databind-nullable.version>
- <org.mapstruct.version>1.3.1.Final</org.mapstruct.version>
- <org.projectlombok.version>1.18.10</org.projectlombok.version>
+ <spring-boot.version>2.2.4.RELEASE</spring-boot.version>
+ <jackson-databind-nullable.version>0.2.1</jackson-databind-nullable.version>
+ <org.mapstruct.version>1.3.1.Final</org.mapstruct.version>
+ <org.projectlombok.version>1.18.10</org.projectlombok.version>
<org.projectlombok.e2e-checks.version>1.18.20</org.projectlombok.e2e-checks.version>
- <git.revision>latest</git.revision>
- <zkclient.version>0.11</zkclient.version>
- <kafka-clients.version>2.4.1</kafka-clients.version>
- <node.version>v14.17.1</node.version>
- <dockerfile-maven-plugin.version>1.4.10</dockerfile-maven-plugin.version>
- <frontend-maven-plugin.version>1.8.0</frontend-maven-plugin.version>
- <maven-compiler-plugin.version>3.5.1</maven-compiler-plugin.version>
- <maven-clean-plugin.version>3.1.0</maven-clean-plugin.version>
- <maven-resources-plugin.version>3.1.0</maven-resources-plugin.version>
- <maven-surefire-plugin.version>2.22.0</maven-surefire-plugin.version>
- <openapi-generator-maven-plugin.version>4.3.0</openapi-generator-maven-plugin.version>
- <swagger-annotations.version>1.6.0</swagger-annotations.version>
- <springdoc-openapi-webflux-ui.version>1.2.32</springdoc-openapi-webflux-ui.version>
- <kafka.version>2.4.1</kafka.version>
- <avro.version>1.9.2</avro.version>
- <confluent.version>5.5.1</confluent.version>
- <apache.commons.version>2.2</apache.commons.version>
- <test.containers.version>1.15.1</test.containers.version>
- <junit-jupiter-engine.version>5.4.0</junit-jupiter-engine.version>
- <mockito.version>2.21.0</mockito.version>
- <assertj.version>3.19.0</assertj.version>
+ <git.revision>latest</git.revision>
+ <zookeper.version>3.5.7</zookeper.version>
+ <kafka-clients.version>2.4.1</kafka-clients.version>
+ <node.version>v14.17.1</node.version>
+ <dockerfile-maven-plugin.version>1.4.10</dockerfile-maven-plugin.version>
+ <frontend-maven-plugin.version>1.8.0</frontend-maven-plugin.version>
+ <maven-compiler-plugin.version>3.5.1</maven-compiler-plugin.version>
+ <maven-clean-plugin.version>3.1.0</maven-clean-plugin.version>
+ <maven-resources-plugin.version>3.1.0</maven-resources-plugin.version>
+ <maven-surefire-plugin.version>2.22.0</maven-surefire-plugin.version>
+ <openapi-generator-maven-plugin.version>4.3.0</openapi-generator-maven-plugin.version>
+ <swagger-annotations.version>1.6.0</swagger-annotations.version>
+ <springdoc-openapi-webflux-ui.version>1.2.32</springdoc-openapi-webflux-ui.version>
+ <kafka.version>2.4.1</kafka.version>
+ <avro.version>1.9.2</avro.version>
+ <confluent.version>5.5.1</confluent.version>
+ <apache.commons.version>2.2</apache.commons.version>
+ <test.containers.version>1.15.1</test.containers.version>
+ <junit-jupiter-engine.version>5.4.0</junit-jupiter-engine.version>
+ <mockito.version>2.21.0</mockito.version>
+ <assertj.version>3.19.0</assertj.version>
- <frontend-generated-sources-directory>..//kafka-ui-react-app/src/generated-sources</frontend-generated-sources-directory>
- <sonar.organization>provectus</sonar.organization>
- <sonar.host.url>https://sonarcloud.io</sonar.host.url>
- </properties>
+ <frontend-generated-sources-directory>..//kafka-ui-react-app/src/generated-sources
+ </frontend-generated-sources-directory>
+ <sonar.organization>provectus</sonar.organization>
+ <sonar.host.url>https://sonarcloud.io</sonar.host.url>
+ </properties>
- <repositories>
- <repository>
- <id>confluent</id>
- <url>https://packages.confluent.io/maven/</url>
- </repository>
- </repositories>
+ <repositories>
+ <repository>
+ <id>confluent</id>
+ <url>https://packages.confluent.io/maven/</url>
+ </repository>
+ <repository>
+ <id>central</id>
+ <name>Central Repository</name>
+ <url>https://repo.maven.apache.org/maven2</url>
+ <layout>default</layout>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
- <pluginRepositories>
- <pluginRepository>
- <id>confluent</id>
- <url>https://packages.confluent.io/maven/</url>
- </pluginRepository>
- </pluginRepositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>confluent</id>
+ <url>https://packages.confluent.io/maven/</url>
+ </pluginRepository>
+ <pluginRepository>
+ <id>central</id>
+ <name>Central Repository</name>
+ <url>https://repo.maven.apache.org/maven2</url>
+ <layout>default</layout>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ <releases>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ </pluginRepository>
+ </pluginRepositories>
- <groupId>com.provectus</groupId>
- <artifactId>kafka-ui</artifactId>
- <version>0.1.1-SNAPSHOT</version>
- <name>kafka-ui</name>
- <description>Kafka metrics for UI panel</description>
+ <groupId>com.provectus</groupId>
+ <artifactId>kafka-ui</artifactId>
+ <version>0.1.1-SNAPSHOT</version>
+ <name>kafka-ui</name>
+ <description>Kafka metrics for UI panel</description>
</project>
| null | train | train | 2021-07-27T18:09:15 | "2021-07-16T09:13:08Z" | anderssynstad | train |
provectus/kafka-ui/704_731 | provectus/kafka-ui | provectus/kafka-ui/704 | provectus/kafka-ui/731 | [
"connected"
] | 03ed67db897de24a7a827a5dd135021f268e886b | 8f625367b355aaa7c29cdbca4d2d7b2de6931fb5 | [
"Now I use following naming: issue#704_Some_inforamive_words\r\nI suppose that it is convenient, informative and beautiful.\r\nHow do you think?",
"I suggest it would be nice to use grouping tokens at least.\r\nFor example:\r\n`issues/123`\r\n`feature/feature_name`\r\n`bugfix/fix_thing`",
"Take a look at PR #731. "
] | [] | "2021-07-28T15:39:49Z" | [
"type/documentation",
"type/enhancement"
] | Contribution: Define naming convention for Git branches | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
(Write your answer here.)
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
(Describe your proposed solution here.)
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
(Write your answer here.)
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.) | [
"CONTRIBUTING.md"
] | [
"CONTRIBUTING.md"
] | [] | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 18437c90de0..0df5a0bb107 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,16 +1,44 @@
# Contributing
When contributing to this repository, please first discuss the change you wish to make via issue,
-email, or any other method with the owners of this repository before making a change.
+email, or any other method with the maintainers of the repository before making a change.
-Please note we have a code of conduct, please follow it in all your interactions with the project.
+Please note we have a code of conduct (`CODE-OF-CONDUCT.md`), please follow it in all your interactions with the project.
## Pull Request Process
-1. Ensure any install or build dependencies are removed before the end of the layer when doing a
+### General rules
+
+1. Ensure any install or build dependencies have been removed before the end of the layer when composing a
build.
-2. Update the README.md with details of changes to the interface, this includes new environment
+2. Update the `README.md` with details of changes to the interface, this includes new environment
variables, exposed ports, useful file locations and container parameters.
-3. Start Pull Request name with issue number (ex. #123)
-4. You may merge the Pull Request in once you have the sign-off of two other developers, or if you
- do not have permission to do that, you may request the second reviewer to merge it for you.
+3. Start a pull request name with issue number (ex. #123).
+4. You may merge the pull request once you have the approval of two other developers. In case you
+ don't have permissions to do that, you may request the second reviewer to merge it for you.
+
+### Branch naming
+
+In order to keep branch names understandable and similar please use the corresponding branch naming conventions.
+
+Generally speaking, it's a good idea to add a group/type prefix for a branch, e.g.,
+if you're working on a specific branch you could name your branch `issues/xxx`.
+
+Here's a list of good examples:<br/>
+`issues/123`<br/>
+`feature/feature_name`<br/>
+`bugfix/fix_thing`<br/>
+
+### Code style
+
+There's a file called `checkstyle.xml` in project root under `etc` directory.<br/>
+You can import it into IntelliJ IDEA via checkstyle plugin.
+
+### Naming conventions
+
+REST paths should be **lowercase** and consist of just **plural** nouns.<br/>
+Also, multiple words in a single path segment should be divided by a hyphen symbol (`-`).<br/>
+
+Query variable names should be formatted in `camelCase`.
+
+Model names should consist of just **plural** nouns and should be formatted in `camelCase` as well.
\ No newline at end of file
| null | test | train | 2021-08-18T12:24:17 | "2021-07-21T14:58:31Z" | IldarAlmakaev | train |
provectus/kafka-ui/634_732 | provectus/kafka-ui | provectus/kafka-ui/634 | provectus/kafka-ui/732 | [
"timestamp(timedelta=1.0, similarity=0.9535978590429749)",
"connected"
] | d29f42e0a0b7358095e058125e7f97990527ff8e | 268c87131232fcf3451922fc13df3057e15cc0f7 | [
"Waiting frontend",
"Looks like @GneyHabub added cleanup policy to topic description, but missed topic list part. FYI: @workshur "
] | [] | "2021-07-29T07:04:04Z" | [
"type/enhancement",
"good first issue",
"scope/backend",
"scope/frontend"
] | Provide cleanup policy in topic list | ### Describe the solution you'd like
Backend should expose cleanup policy in topic list request:
delete (with retention), compact, compact with delete (with retention)
Frontend should display this info in topic list page
| [
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx",
"kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx"
] | [
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx",
"kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/List/List.tsx b/kafka-ui-react-app/src/components/Topics/List/List.tsx
index 34b7bc8a955..be1a6b148dd 100644
--- a/kafka-ui-react-app/src/components/Topics/List/List.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/List.tsx
@@ -139,6 +139,7 @@ const List: React.FC<Props> = ({
<th>Number of messages</th>
<th>Size</th>
<th>Type</th>
+ <th>Clean Up Policy</th>
<th> </th>
</tr>
</thead>
diff --git a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
index ea2e99953fd..2a533027aa6 100644
--- a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
@@ -20,7 +20,14 @@ export interface ListItemProps {
}
const ListItem: React.FC<ListItemProps> = ({
- topic: { name, internal, partitions, segmentSize, replicationFactor },
+ topic: {
+ name,
+ internal,
+ partitions,
+ segmentSize,
+ replicationFactor,
+ cleanUpPolicy,
+ },
deleteTopic,
clusterName,
clearTopicMessages,
@@ -85,6 +92,9 @@ const ListItem: React.FC<ListItemProps> = ({
{internal ? 'Internal' : 'External'}
</div>
</td>
+ <td>
+ <span className="tag is-info">{cleanUpPolicy || 'Unknown'}</span>
+ </td>
<td className="topic-action-block">
{!internal && !isReadOnly ? (
<>
diff --git a/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
index b3012f81387..6f5456de37c 100644
--- a/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
@@ -217,6 +217,9 @@ exports[`List when it does not have readonly flag matches the snapshot 1`] = `
<th>
Type
</th>
+ <th>
+ Clean Up Policy
+ </th>
<th>
</th>
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
index 34f45dbea92..93c7f55baac 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
@@ -30,6 +30,7 @@ const Overview: React.FC<Props> = ({
segmentCount,
clusterName,
topicName,
+ cleanUpPolicy,
clearTopicMessages,
}) => {
const { isReadOnly } = React.useContext(ClusterContext);
@@ -59,6 +60,9 @@ const Overview: React.FC<Props> = ({
<BytesFormatted value={segmentSize} />
</Indicator>
<Indicator label="Segment count">{segmentCount}</Indicator>
+ <Indicator label="Clean Up Policy">
+ <span className="tag is-info">{cleanUpPolicy || 'Unknown'}</span>
+ </Indicator>
</MetricsWrapper>
<div className="box">
<table className="table is-striped is-fullwidth">
| null | train | train | 2021-07-29T07:44:37 | "2021-07-05T15:20:06Z" | germanosin | train |
provectus/kafka-ui/702_751 | provectus/kafka-ui | provectus/kafka-ui/702 | provectus/kafka-ui/751 | [
"timestamp(timedelta=0.0, similarity=0.9255164648756724)",
"connected"
] | ec70e28bffc501ed54b6a5543185ef9b30dc6ea3 | fa4ef337a7523d366de12d4e815dc2d6ec05f3a3 | [] | [] | "2021-07-30T09:27:25Z" | [
"type/enhancement",
"scope/backend"
] | Support incremental config update for brokers | ### Describe the solution you'd like
Implement endpoint
PUT /api/clusters/{clusterName}/brokers/{id}/configs/{name}:
on call do incremental alter config for certain broker
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/InvalidRequestApiException.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ConfigTest.java",
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
index 0b34efdbad1..d7cfd579125 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
@@ -3,6 +3,7 @@
import com.provectus.kafka.ui.api.BrokersApi;
import com.provectus.kafka.ui.model.Broker;
import com.provectus.kafka.ui.model.BrokerConfig;
+import com.provectus.kafka.ui.model.BrokerConfigItem;
import com.provectus.kafka.ui.model.BrokerLogdirUpdate;
import com.provectus.kafka.ui.model.BrokerMetrics;
import com.provectus.kafka.ui.model.BrokersLogdirs;
@@ -61,4 +62,16 @@ public Mono<ResponseEntity<Void>> updateBrokerTopicPartitionLogDir(
.flatMap(bld -> clusterService.updateBrokerLogDir(clusterName, id, bld))
.map(ResponseEntity::ok);
}
+
+ @Override
+ public Mono<ResponseEntity<Void>> updateBrokerConfigByName(String clusterName,
+ Integer id,
+ String name,
+ Mono<BrokerConfigItem> brokerConfig,
+ ServerWebExchange exchange) {
+ return brokerConfig
+ .flatMap(bci -> clusterService.updateBrokerConfigByName(
+ clusterName, id, name, bci.getValue()))
+ .map(ResponseEntity::ok);
+ }
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
index f87fccbaaf9..87147389048 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
@@ -22,7 +22,8 @@ public enum ErrorCode {
CONNECT_NOT_FOUND(4010, HttpStatus.NOT_FOUND),
KSQLDB_NOT_FOUND(4011, HttpStatus.NOT_FOUND),
DIR_NOT_FOUND(4012, HttpStatus.BAD_REQUEST),
- TOPIC_OR_PARTITION_NOT_FOUND(4013, HttpStatus.BAD_REQUEST);
+ TOPIC_OR_PARTITION_NOT_FOUND(4013, HttpStatus.BAD_REQUEST),
+ INVALID_REQUEST(4014, HttpStatus.BAD_REQUEST);
static {
// codes uniqueness check
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/InvalidRequestApiException.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/InvalidRequestApiException.java
new file mode 100644
index 00000000000..0ca84803b37
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/InvalidRequestApiException.java
@@ -0,0 +1,13 @@
+package com.provectus.kafka.ui.exception;
+
+public class InvalidRequestApiException extends CustomBaseException {
+
+ public InvalidRequestApiException(String message) {
+ super(message);
+ }
+
+ @Override
+ public ErrorCode getErrorCode() {
+ return ErrorCode.INVALID_REQUEST;
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
index b75c7a62552..37938f9c73c 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
@@ -386,4 +386,12 @@ public Mono<Void> updateBrokerLogDir(
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
.flatMap(c -> kafkaService.updateBrokerLogDir(c, id, brokerLogDir));
}
+
+ public Mono<Void> updateBrokerConfigByName(String clusterName,
+ Integer id,
+ String name,
+ String value) {
+ return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
+ .flatMap(c -> kafkaService.updateBrokerConfigByName(c, id, name, value));
+ }
}
\ No newline at end of file
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index 8dc5c3aed37..13ec1e87c64 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui.service;
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
+import com.provectus.kafka.ui.exception.InvalidRequestApiException;
import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.TopicMetadataException;
@@ -76,6 +77,7 @@
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.config.ConfigResource;
+import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.LogDirNotFoundException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
@@ -966,4 +968,27 @@ private Mono<Void> updateBrokerLogDir(ExtendedAdminClient adminMono,
e -> Mono.error(new LogDirNotFoundApiException()))
.doOnError(log::error);
}
+
+ public Mono<Void> updateBrokerConfigByName(KafkaCluster cluster,
+ Integer broker,
+ String name,
+ String value) {
+ return getOrCreateAdminClient(cluster)
+ .flatMap(ac -> updateBrokerConfigByName(ac, broker, name, value));
+ }
+
+ private Mono<Void> updateBrokerConfigByName(ExtendedAdminClient admin,
+ Integer broker,
+ String name,
+ String value) {
+ ConfigResource cr = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(broker));
+ AlterConfigOp op = new AlterConfigOp(new ConfigEntry(name, value), AlterConfigOp.OpType.SET);
+
+ return Mono.just(admin)
+ .map(a -> a.getAdminClient().incrementalAlterConfigs(Map.of(cr, List.of(op))))
+ .flatMap(result -> ClusterUtil.toMono(result.all()))
+ .onErrorResume(InvalidRequestException.class,
+ e -> Mono.error(new InvalidRequestApiException(e.getMessage())))
+ .doOnError(log::error);
+ }
}
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index 7fc50d5358e..9d126efaf29 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -106,6 +106,37 @@ paths:
404:
description: Not found
+ /api/clusters/{clusterName}/brokers/{id}/configs/{name}:
+ put:
+ tags:
+ - Brokers
+ summary: updateBrokerConfigByName
+ operationId: updateBrokerConfigByName
+ parameters:
+ - name: clusterName
+ in: path
+ required: true
+ schema:
+ type: string
+ - name: id
+ in: path
+ required: true
+ schema:
+ type: integer
+ - name: name
+ in: path
+ required: true
+ schema:
+ type: string
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/BrokerConfigItem'
+ responses:
+ 200:
+ description: OK
+
/api/clusters/{clusterName}/metrics:
get:
tags:
@@ -2400,6 +2431,12 @@ components:
- totalReplicationFactor
- topicName
+ BrokerConfigItem:
+ type: object
+ properties:
+ value:
+ type: string
+
BrokerConfig:
type: object
properties:
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ConfigTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ConfigTest.java
new file mode 100644
index 00000000000..5ae55b56ac3
--- /dev/null
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ConfigTest.java
@@ -0,0 +1,79 @@
+package com.provectus.kafka.ui.service;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.AbstractBaseTest;
+import com.provectus.kafka.ui.model.BrokerConfig;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient;
+import org.springframework.core.ParameterizedTypeReference;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.web.reactive.server.WebTestClient;
+
+@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
+@AutoConfigureWebTestClient(timeout = "60000")
+public class ConfigTest extends AbstractBaseTest {
+
+ @Autowired
+ private WebTestClient webTestClient;
+
+ @Test
+ public void testAlterConfig() throws Exception {
+ String name = "background.threads";
+
+ Optional<BrokerConfig> bc = getConfig(name);
+ assertThat(bc.isPresent()).isTrue();
+ assertThat(bc.get().getValue()).isEqualTo("10");
+
+ webTestClient.put()
+ .uri("/api/clusters/{clusterName}/brokers/{id}/configs/{name}", LOCAL, 1, name)
+ .bodyValue(Map.of(
+ "name", name,
+ "value", "5"
+ )
+ )
+ .exchange()
+ .expectStatus().isOk();
+
+ // Without sleep it returns old config so we need to wait a little bit
+ Thread.sleep(1000);
+
+ Optional<BrokerConfig> bcc = getConfig(name);
+ assertThat(bcc.isPresent()).isTrue();
+ assertThat(bcc.get().getValue()).isEqualTo("5");
+ }
+
+ @Test
+ public void testAlterReadonlyConfig() {
+ String name = "log.dirs";
+
+ webTestClient.put()
+ .uri("/api/clusters/{clusterName}/brokers/{id}/configs/{name}", LOCAL, 1, name)
+ .bodyValue(Map.of(
+ "name", name,
+ "value", "/var/lib/kafka2"
+ )
+ )
+ .exchange()
+ .expectStatus().isBadRequest();
+ }
+
+ private Optional<BrokerConfig> getConfig(String name) {
+ List<BrokerConfig> configs = webTestClient.get()
+ .uri("/api/clusters/{clusterName}/brokers/{id}/configs", LOCAL, 1)
+ .exchange()
+ .expectStatus().isOk()
+ .expectBody(new ParameterizedTypeReference<List<BrokerConfig>>() {
+ })
+ .returnResult()
+ .getResponseBody();
+
+ return configs.stream()
+ .filter(c -> c.getName().equals(name))
+ .findAny();
+ }
+}
diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java
index 5412086a579..2abac1e61ee 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/LogDirsTest.java
@@ -5,7 +5,6 @@
import com.provectus.kafka.ui.AbstractBaseTest;
import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException;
-import com.provectus.kafka.ui.model.BrokerLogdirUpdate;
import com.provectus.kafka.ui.model.BrokerTopicLogdirs;
import com.provectus.kafka.ui.model.BrokersLogdirs;
import com.provectus.kafka.ui.model.ErrorResponse;
| train | train | 2021-07-29T20:45:06 | "2021-07-21T14:45:19Z" | germanosin | train |
provectus/kafka-ui/750_752 | provectus/kafka-ui | provectus/kafka-ui/750 | provectus/kafka-ui/752 | [
"connected"
] | fa4ef337a7523d366de12d4e815dc2d6ec05f3a3 | f7ce3471491fe23a94a77c927e2fdb63fc88600e | [
" Hi, @sreenivasvp. Thanks for creating issue. We'll try to fix it in current release.",
"Hello, @sreenivasvp! Could you please share your docker-compose file?\r\n",
"@MarselAhmetov \r\n\r\n containers:\r\n - image: provectuslabs/kafka-ui:master\r\n imagePullPolicy: Always\r\n name: kafka-ui\r\n resources:\r\n limits:\r\n cpu: 1000m\r\n memory: 2Gi\r\n requests:\r\n cpu: 1000m\r\n memory: 2Gi\r\n terminationMessagePath: /dev/termination-log\r\n terminationMessagePolicy: File\r\n ports:\r\n - containerPort: 8080\r\n name: server\r\n - containerPort: 5012\r\n name: jmx\r\n - env:\r\n - name: KAFKA_CLUSTERS_0_NAME\r\n value: ENV1\r\n - name: KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS\r\n value: headless.ENV1-kafka:9094\r\n - name: KAFKA_CLUSTERS_0_ZOOKEEPER\r\n value: headless.ENV1-kafka:2181\r\n - name: KAFKA_CLUSTERS_1_NAME\r\n value: ENV2\r\n - name: KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS\r\n value: headless.ENV2-kafka:9094\r\n - name: KAFKA_CLUSTERS_1_ZOOKEEPER\r\n value: headless.ENV2-kafka:2181\r\n - name: KAFKA_CLUSTERS_2_NAME\r\n value: ENV3\r\n - name: KAFKA_CLUSTERS_2_BOOTSTRAPSERVERS\r\n value: headless.ENV3-kafka:9094\r\n - name: KAFKA_CLUSTERS_2_ZOOKEEPER\r\n value: headless.ENV3-kafka:2181\r\n"
] | [
"as far as I see there should not be situation when getSchemaRegistry() != null but getSchemaRegistry().getUrl() == null ? If so, getUrl() != null check is redundant",
"same here"
] | "2021-07-30T10:04:46Z" | [
"type/bug",
"scope/backend"
] | Listing topic's fails : /ui/clusters/environment1/topics | **Describe the bug**
(A clear and concise description of what the bug is.)
Listing topic's fails : /ui/clusters/environment1/topics
07:40:04.776 [main] WARN org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration$JodaDateTimeJacksonConfiguration - Auto-configuration of Jackson's Joda-Time integration is deprecated in favor of using java.time (JSR-310).
07:40:05.311 [main] WARN org.springframework.http.converter.json.Jackson2ObjectMapperBuilder - For Jackson Kotlin classes support please add "com.fasterxml.jackson.module:jackson-module-kotlin" to the classpath
07:40:05.818 [main] INFO com.provectus.kafka.ui.serde.DeserializationService - Using SchemaRegistryAwareRecordSerDe for cluster 'environment1'
07:40:05.884 [main] WARN org.springframework.boot.web.reactive.context.AnnotationConfigReactiveWebServerApplicationContext - Exception encountered during context initialization - cancelling refresh attempt: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'deserializationService': Invocation of init method failed; nested exception is java.lang.RuntimeException: Can't init deserializer
07:40:05.979 [main] INFO org.springframework.boot.autoconfigure.logging.ConditionEvaluationReportLoggingListener -
Error starting ApplicationContext. To display the conditions report re-run your application with 'debug' enabled.
07:40:05.981 [main] ERROR org.springframework.boot.SpringApplication - Application run failed
org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'deserializationService': Invocation of init method failed; nested exception is java.lang.RuntimeException: Can't init deserializer
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:160) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.applyBeanPostProcessorsBeforeInitialization(AbstractAutowireCapableBeanFactory.java:416) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1788) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:595) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:222) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:879) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:878) ~[spring-context-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:550) ~[spring-context-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.boot.web.reactive.context.ReactiveWebServerApplicationContext.refresh(ReactiveWebServerApplicationContext.java:66) ~[spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:747) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:397) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:315) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1226) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1215) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at com.provectus.kafka.ui.KafkaUiApplication.main(KafkaUiApplication.java:14) [classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
at java.lang.reflect.Method.invoke(Method.java:567) ~[?:?]
at org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:48) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:87) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:51) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:52) [kafka-ui-api.jar:?]
Caused by: java.lang.RuntimeException: Can't init deserializer
at com.provectus.kafka.ui.serde.DeserializationService.createRecordDeserializerForCluster(DeserializationService.java:44) ~[classes!/:?]
at java.util.stream.Collectors.lambda$uniqKeysMapAccumulator$1(Collectors.java:178) ~[?:?]
at java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169) ~[?:?]
at java.util.concurrent.ConcurrentHashMap$ValueSpliterator.forEachRemaining(ConcurrentHashMap.java:3605) ~[?:?]
at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) ~[?:?]
at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) ~[?:?]
at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:913) ~[?:?]
at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:?]
at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:578) ~[?:?]
at com.provectus.kafka.ui.serde.DeserializationService.init(DeserializationService.java:27) ~[classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
at java.lang.reflect.Method.invoke(Method.java:567) ~[?:?]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
... 26 more
Caused by: java.lang.NullPointerException
at java.util.Objects.requireNonNull(Objects.java:222) ~[?:?]
at com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe.createSchemaRegistryClient(SchemaRegistryAwareRecordSerDe.java:72) ~[classes!/:?]
at com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe.<init>(SchemaRegistryAwareRecordSerDe.java:101) ~[classes!/:?]
at com.provectus.kafka.ui.serde.DeserializationService.createRecordDeserializerForCluster(DeserializationService.java:41) ~[classes!/:?]
at java.util.stream.Collectors.lambda$uniqKeysMapAccumulator$1(Collectors.java:178) ~[?:?]
at java.util.stream.ReduceOps$3ReducingSink.accept(ReduceOps.java:169) ~[?:?]
at java.util.concurrent.ConcurrentHashMap$ValueSpliterator.forEachRemaining(ConcurrentHashMap.java:3605) ~[?:?]
at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) ~[?:?]
at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) ~[?:?]
at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:913) ~[?:?]
at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:?]
at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:578) ~[?:?]
at com.provectus.kafka.ui.serde.DeserializationService.init(DeserializationService.java:27) ~[classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
at java.lang.reflect.Method.invoke(Method.java:567) ~[?:?]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
... 26 more
**Set up**
(How do you run the app?)
today's image from master in docker store
docker pull provectuslabs/kafka-ui:master
**Steps to Reproduce**
Steps to reproduce the behavior:
1. goto topic section for given environment
**Expected behavior**
(A clear and concise description of what you expected to happen)
**Screenshots**
(If applicable, add screenshots to help explain your problem)
**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
index 380fe7d8534..9d2e3fc09a8 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
@@ -35,6 +35,7 @@
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.kafka.clients.admin.ConfigEntry;
@@ -91,7 +92,8 @@ default ConfigSynonym toConfigSynonym(ConfigEntry.ConfigSynonym config) {
Partition toPartition(InternalPartition topic);
default InternalSchemaRegistry setSchemaRegistry(ClustersProperties.Cluster clusterProperties) {
- if (clusterProperties == null) {
+ if (clusterProperties == null
+ || clusterProperties.getSchemaRegistry() == null) {
return null;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
index 03f1431b82d..747f5c61afa 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
@@ -68,8 +68,9 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
private final ObjectMapper objectMapper = new ObjectMapper();
private static SchemaRegistryClient createSchemaRegistryClient(KafkaCluster cluster) {
- Objects.requireNonNull(cluster.getSchemaRegistry());
- Objects.requireNonNull(cluster.getSchemaRegistry().getUrl());
+ if (cluster.getSchemaRegistry() == null) {
+ throw new ValidationException("schemaRegistry is not specified");
+ }
List<SchemaProvider> schemaProviders =
List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider(), new JsonSchemaProvider());
| null | train | train | 2021-07-30T11:48:42 | "2021-07-30T07:52:33Z" | sreenivasvp | train |
provectus/kafka-ui/580_763 | provectus/kafka-ui | provectus/kafka-ui/580 | provectus/kafka-ui/763 | [
"keyword_pr_to_issue"
] | 96d07a7d6f87de52144b4e21753bf4eafa42149a | 9770ad47af329ef4f444ff411d4062419c8d1f51 | [
"Hi, @wildone. SERVER_SERVLET_CONTEXT_PATH could be used to set non root path for ui & api. Could you please share what were urls in your browser?",
"Hi, sorry for the late resolution, I hope that you solved your problem, we tested the application with traefik and it is working fine, at least all routings are correct. An example of usage could be found here https://github.com/provectus/kafka-ui/pull/763 "
] | [] | "2021-08-03T08:55:16Z" | [
"type/enhancement"
] | Routing to KafkaUI using Traefik. | ### Is your proposal related to a problem?
Is it possible to make this as clear as the San Diego summer sky:
SERVER_SERVLET_CONTEXT_PATH | URI basePath
It's not clear what this is for.
I assume it's so that I can run this as a service on a path `localhost/kafkaui`?
I am using this with traefik and routing to the UI using prefix `/kafkaui` but I get an error
this is the output in the console.
`
kafkaui_1 | 12:19:16.903 [boundedElastic-2] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [f0610232] Encoding [class ErrorResponse {
kafkaui_1 | code: 5000
kafkaui_1 | message: 404 NOT_FOUND
kafkaui_1 | timestamp: 1624364356903
kafkaui_1 | req (truncated)...]
`
this is the output in the browser
`
{"code":5000,"message":"404 NOT_FOUND","timestamp":1624364356903,"requestId":"f0610232","fieldsErrors":null}
`
this is my docker-compose
`
version: "3.8"
services:
kafkaui:
image: provectuslabs/kafka-ui:latest
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
- KAFKA_CLUSTERS_0_ZOOKEEPER=zookeper:2181
- SERVER_SERVLET_CONTEXT_PATH=kafkaui
labels:
# note that you want this frontened to match the last. otherwise it will match login.${HOST_DOMAIN}"
traefik.frontend.priority: 1
traefik.enable: true
traefik.http.routers.kafkaui.rule: "PathPrefix(`/kafkaui`)"
traefik.http.routers.kafkaui.entrypoints: web
traefik.http.routers.kafkaui-https.rule: "PathPrefix(`/kafkaui`)"
traefik.http.routers.kafkaui-https.tls: true
traefik.http.routers.kafkaui-https.entrypoints: websecure
traefik.http.services.kafkaui.loadbalancer.passHostHeader: true
networks:
- internal
networks:
internal:
` | [] | [
"docker/kafka-ui-traefik-proxy.yaml",
"docker/traefik/kafkaui.yaml"
] | [] | diff --git a/docker/kafka-ui-traefik-proxy.yaml b/docker/kafka-ui-traefik-proxy.yaml
new file mode 100644
index 00000000000..e95d7a049b4
--- /dev/null
+++ b/docker/kafka-ui-traefik-proxy.yaml
@@ -0,0 +1,27 @@
+---
+version: '3.8'
+services:
+ traefik:
+ restart: always
+ image: traefik:v2.4
+ container_name: traefik
+ command:
+ - --api.insecure=true
+ - --providers.file.directory=/etc/traefik
+ - --providers.file.watch=true
+ - --entrypoints.web.address=:80
+ - --log.level=debug
+ ports:
+ - 80:80
+ volumes:
+ - ./traefik:/etc/traefik
+
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:latest
+ ports:
+ - 8082:8080
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
+ SERVER_SERVLET_CONTEXT_PATH: /kafka-ui
diff --git a/docker/traefik/kafkaui.yaml b/docker/traefik/kafkaui.yaml
new file mode 100644
index 00000000000..3d0cf089d78
--- /dev/null
+++ b/docker/traefik/kafkaui.yaml
@@ -0,0 +1,11 @@
+http:
+ routers:
+ kafkaui:
+ rule: "PathPrefix(`/kafka-ui/`)"
+ entrypoints: web
+ service: kafkaui
+ services:
+ kafkaui:
+ loadBalancer:
+ servers:
+ - url: http://kafka-ui:8080
\ No newline at end of file
| null | train | train | 2021-08-02T15:54:02 | "2021-06-22T12:21:21Z" | wildone | train |
provectus/kafka-ui/772_775 | provectus/kafka-ui | provectus/kafka-ui/772 | provectus/kafka-ui/775 | [
"keyword_pr_to_issue",
"timestamp(timedelta=0.0, similarity=0.8733743819379413)"
] | 40678809661cb2659d4c93b2fbcd0cd940b4006a | 6280c921f2af8f85be2bc5a7444f900398850020 | [
"Hi, @FedeBev. Thanks for creating issue. As for me looks interesting. @RustamGimadiev what is your opinion on this? Btw any PR is more then welcome )"
] | [] | "2021-08-06T10:09:44Z" | [
"type/enhancement"
] | [helm] restart application when configmap or secret changes | ### Is your proposal related to a problem?
In my opinion would be great if the deployment was automatically restarted when the configmap or secret changes.
### Describe the solution you'd like
Something like this should be enough
```yaml
kind: Deployment
spec:
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
```
Let me know what are your thoughts, I can deal with a PR | [
"charts/kafka-ui/templates/deployment.yaml"
] | [
"charts/kafka-ui/templates/deployment.yaml"
] | [] | diff --git a/charts/kafka-ui/templates/deployment.yaml b/charts/kafka-ui/templates/deployment.yaml
index f8bfe02ae6b..21091c4ad3d 100644
--- a/charts/kafka-ui/templates/deployment.yaml
+++ b/charts/kafka-ui/templates/deployment.yaml
@@ -13,10 +13,12 @@ spec:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
template:
metadata:
- {{- with .Values.podAnnotations }}
annotations:
- {{- toYaml . | nindent 8 }}
- {{- end }}
+ {{- with .Values.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
labels:
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
spec:
| null | train | train | 2021-08-05T08:05:21 | "2021-08-05T15:14:59Z" | FedeBev | train |
provectus/kafka-ui/207_787 | provectus/kafka-ui | provectus/kafka-ui/207 | provectus/kafka-ui/787 | [
"keyword_pr_to_issue"
] | 91e93a727e3802d8821bda6a06efd840053f9293 | 4f14942c6da2533026fc4e14f0555967fca1abb4 | [
"Hi guys, on backend side this feature can be implemented by adding routing from kafka-ui backend to ksqldb server or by creating endpoints in the kafka-ui backend itself. @IldarAlmakaev @workshur @germanosin any thoughts?",
"I've merged to master backend side of this ticket\r\ncc @workshur"
] | [] | "2021-08-11T17:44:07Z" | [
"type/enhancement",
"scope/backend",
"scope/frontend"
] | Ability to run KSQL queries directly from UI | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/strategy/ksql/statement/ShowStrategy.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/strategy/ksql/statement/ShowStrategy.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/strategy/ksql/statement/ShowStrategy.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/strategy/ksql/statement/ShowStrategy.java
index 1b80c6648bf..1a2250e850d 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/strategy/ksql/statement/ShowStrategy.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/strategy/ksql/statement/ShowStrategy.java
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui.strategy.ksql.statement;
import com.fasterxml.jackson.databind.JsonNode;
+import com.provectus.kafka.ui.model.KsqlCommand;
import com.provectus.kafka.ui.model.KsqlCommandResponse;
import java.util.List;
import java.util.Optional;
@@ -36,6 +37,15 @@ protected String getTestRegExp() {
return "";
}
+ @Override
+ public BaseStrategy ksqlCommand(KsqlCommand ksqlCommand) {
+ // return new instance to avoid conflicts for parallel requests
+ ShowStrategy clone = new ShowStrategy();
+ clone.setResponseValueKey(responseValueKey);
+ clone.ksqlCommand = ksqlCommand;
+ return clone;
+ }
+
protected String getShowRegExp(String key) {
return "show " + key + ";";
}
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java
index aef12d8ec83..dd8c3ab8295 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java
@@ -14,6 +14,7 @@
import com.provectus.kafka.ui.model.KsqlCommand;
import com.provectus.kafka.ui.model.KsqlCommandResponse;
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
+import com.provectus.kafka.ui.strategy.ksql.statement.DescribeStrategy;
import com.provectus.kafka.ui.strategy.ksql.statement.ShowStrategy;
import java.util.List;
import java.util.Optional;
@@ -30,6 +31,7 @@
class KsqlServiceTest {
private KsqlService ksqlService;
private BaseStrategy baseStrategy;
+ private BaseStrategy alternativeStrategy;
@Mock
private ClustersStorage clustersStorage;
@@ -40,10 +42,11 @@ class KsqlServiceTest {
@BeforeEach
public void setUp() {
this.baseStrategy = new ShowStrategy();
+ this.alternativeStrategy = new DescribeStrategy();
this.ksqlService = new KsqlService(
this.ksqlClient,
this.clustersStorage,
- List.of(baseStrategy)
+ List.of(baseStrategy, alternativeStrategy)
);
}
@@ -91,7 +94,7 @@ void shouldThrowUnprocessableEntityExceptionOnExecuteKsqlCommand() {
void shouldSetHostToStrategy() {
String clusterName = "test";
String host = "localhost:8088";
- KsqlCommand command = (new KsqlCommand()).ksql("show streams;");
+ KsqlCommand command = (new KsqlCommand()).ksql("describe streams;");
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
when(clustersStorage.getClusterByName(clusterName))
@@ -100,13 +103,13 @@ void shouldSetHostToStrategy() {
when(ksqlClient.execute(any())).thenReturn(Mono.just(new KsqlCommandResponse()));
ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block();
- assertThat(baseStrategy.getUri()).isEqualTo(host + "/ksql");
+ assertThat(alternativeStrategy.getUri()).isEqualTo(host + "/ksql");
}
@Test
void shouldCallClientAndReturnResponse() {
String clusterName = "test";
- KsqlCommand command = (new KsqlCommand()).ksql("show streams;");
+ KsqlCommand command = (new KsqlCommand()).ksql("describe streams;");
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
KsqlCommandResponse response = new KsqlCommandResponse().message("success");
@@ -117,7 +120,7 @@ void shouldCallClientAndReturnResponse() {
KsqlCommandResponse receivedResponse =
ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block();
- verify(ksqlClient, times(1)).execute(baseStrategy);
+ verify(ksqlClient, times(1)).execute(alternativeStrategy);
assertThat(receivedResponse).isEqualTo(response);
}
| test | train | 2021-08-12T10:40:05 | "2021-02-20T11:50:00Z" | IldarAlmakaev | train |
|
provectus/kafka-ui/801_802 | provectus/kafka-ui | provectus/kafka-ui/801 | provectus/kafka-ui/802 | [
"timestamp(timedelta=0.0, similarity=0.8426492946107546)",
"connected"
] | eaf77c49a277ea07fa47554142e1170a56ad268c | 76af95ed787fb39b5e051c5ec7d7f2b9a20c9835 | [] | [] | "2021-08-19T15:00:26Z" | [
"type/bug",
"scope/frontend"
] | Bug on Topic create form | **Describe the bug**
We need to fix loader on Topic page. After success topic creation the first time, we won't wait for creation any more, because state is loaded, and we will try to get entity immediately after pressing the button, and we will receive null
going to object page on the second creation.
**Steps to Reproduce**
1. Create topic 1
2. Create topic 2
**Expected behavior**
Topic form should work without FetchStatus reducer
**AC:**
- Topic new form works without fetchStatus reducer and tracks async request status using react-hook-form
- Topic new form covered by tests.
**Notes**
Related PR: https://github.com/provectus/kafka-ui/pull/575 | [
"kafka-ui-react-app/src/components/Topics/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/New/NewContainer.ts",
"kafka-ui-react-app/src/components/Topics/Topics.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts",
"kafka-ui-react-app/src/redux/reducers/topics/selectors.ts"
] | [
"kafka-ui-react-app/src/components/Topics/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/New/__tests__/New.spec.tsx",
"kafka-ui-react-app/src/components/Topics/Topics.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts",
"kafka-ui-react-app/src/redux/reducers/topics/selectors.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/New/New.tsx b/kafka-ui-react-app/src/components/Topics/New/New.tsx
index a6f8af9d9e2..3ef46aca488 100644
--- a/kafka-ui-react-app/src/components/Topics/New/New.tsx
+++ b/kafka-ui-react-app/src/components/Topics/New/New.tsx
@@ -1,40 +1,46 @@
import React from 'react';
-import { ClusterName, TopicName, TopicFormData } from 'redux/interfaces';
+import { ClusterName, TopicFormData, FailurePayload } from 'redux/interfaces';
import { useForm, FormProvider } from 'react-hook-form';
import Breadcrumb from 'components/common/Breadcrumb/Breadcrumb';
-import { clusterTopicsPath } from 'lib/paths';
+import { clusterTopicPath, clusterTopicsPath } from 'lib/paths';
import TopicForm from 'components/Topics/shared/Form/TopicForm';
+import {
+ formatTopicCreation,
+ topicsApiClient,
+ createTopicAction,
+} from 'redux/actions';
+import { useDispatch } from 'react-redux';
+import { getResponse } from 'lib/errorHandling';
+import { useHistory, useParams } from 'react-router';
-interface Props {
+interface RouterParams {
clusterName: ClusterName;
- isTopicCreated: boolean;
- createTopic: (clusterName: ClusterName, form: TopicFormData) => void;
- redirectToTopicPath: (clusterName: ClusterName, topicName: TopicName) => void;
- resetUploadedState: () => void;
}
-const New: React.FC<Props> = ({
- clusterName,
- isTopicCreated,
- createTopic,
- redirectToTopicPath,
-}) => {
+const New: React.FC = () => {
const methods = useForm<TopicFormData>();
- const [isSubmitting, setIsSubmitting] = React.useState<boolean>(false);
-
- React.useEffect(() => {
- if (isSubmitting && isTopicCreated) {
- const { name } = methods.getValues();
- redirectToTopicPath(clusterName, name);
- }
- }, [isSubmitting, isTopicCreated, redirectToTopicPath, clusterName, methods]);
+ const { clusterName } = useParams<RouterParams>();
+ const history = useHistory();
+ const dispatch = useDispatch();
const onSubmit = async (data: TopicFormData) => {
- // TODO: need to fix loader. After success loading the first time, we won't wait for creation any more, because state is
- // loaded, and we will try to get entity immediately after pressing the button, and we will receive null
- // going to object page on the second creation. Setting of isSubmitting after createTopic is a workaround, need to tweak loader logic
- createTopic(clusterName, data);
- setIsSubmitting(true); // Keep this action after createTopic to prevent redirect before create.
+ try {
+ await topicsApiClient.createTopic({
+ clusterName,
+ topicCreation: formatTopicCreation(data),
+ });
+
+ history.push(clusterTopicPath(clusterName, data.name));
+ } catch (error) {
+ const response = await getResponse(error);
+ const alert: FailurePayload = {
+ subject: ['schema', data.name].join('-'),
+ title: `Schema ${data.name}`,
+ response,
+ };
+
+ dispatch(createTopicAction.failure({ alert }));
+ }
};
return (
@@ -52,10 +58,9 @@ const New: React.FC<Props> = ({
</div>
<div className="box">
- {/* eslint-disable react/jsx-props-no-spreading */}
<FormProvider {...methods}>
<TopicForm
- isSubmitting={isSubmitting}
+ isSubmitting={methods.formState.isSubmitting}
onSubmit={methods.handleSubmit(onSubmit)}
/>
</FormProvider>
diff --git a/kafka-ui-react-app/src/components/Topics/New/NewContainer.ts b/kafka-ui-react-app/src/components/Topics/New/NewContainer.ts
deleted file mode 100644
index 41ea03e3419..00000000000
--- a/kafka-ui-react-app/src/components/Topics/New/NewContainer.ts
+++ /dev/null
@@ -1,48 +0,0 @@
-import { connect } from 'react-redux';
-import {
- RootState,
- ClusterName,
- TopicName,
- Action,
- TopicFormData,
-} from 'redux/interfaces';
-import { withRouter, RouteComponentProps } from 'react-router-dom';
-import { createTopic, createTopicAction } from 'redux/actions';
-import { getTopicCreated } from 'redux/reducers/topics/selectors';
-import { clusterTopicPath } from 'lib/paths';
-import { ThunkDispatch } from 'redux-thunk';
-
-import New from './New';
-
-interface RouteProps {
- clusterName: ClusterName;
-}
-
-type OwnProps = RouteComponentProps<RouteProps>;
-
-const mapStateToProps = (
- state: RootState,
- {
- match: {
- params: { clusterName },
- },
- }: OwnProps
-) => ({
- clusterName,
- isTopicCreated: getTopicCreated(state),
-});
-
-const mapDispatchToProps = (
- dispatch: ThunkDispatch<RootState, undefined, Action>,
- { history }: OwnProps
-) => ({
- createTopic: (clusterName: ClusterName, form: TopicFormData) => {
- dispatch(createTopic(clusterName, form));
- },
- redirectToTopicPath: (clusterName: ClusterName, topicName: TopicName) => {
- history.push(clusterTopicPath(clusterName, topicName));
- },
- resetUploadedState: () => dispatch(createTopicAction.failure({})),
-});
-
-export default withRouter(connect(mapStateToProps, mapDispatchToProps)(New));
diff --git a/kafka-ui-react-app/src/components/Topics/New/__tests__/New.spec.tsx b/kafka-ui-react-app/src/components/Topics/New/__tests__/New.spec.tsx
new file mode 100644
index 00000000000..d7c7d10d6cd
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Topics/New/__tests__/New.spec.tsx
@@ -0,0 +1,69 @@
+import React from 'react';
+import New from 'components/Topics/New/New';
+import { Router } from 'react-router';
+import configureStore from 'redux-mock-store';
+import { RootState } from 'redux/interfaces';
+import { Provider } from 'react-redux';
+import { fireEvent, render, screen, waitFor } from '@testing-library/react';
+import { createMemoryHistory } from 'history';
+import fetchMock from 'fetch-mock-jest';
+import { clusterTopicNewPath, clusterTopicPath } from 'lib/paths';
+
+const mockStore = configureStore();
+
+describe('New', () => {
+ const clusterName = 'local';
+ const topicName = 'test-topic';
+
+ const initialState: Partial<RootState> = {};
+ const storeMock = mockStore(initialState);
+ const historyMock = createMemoryHistory();
+
+ beforeEach(() => {
+ fetchMock.restore();
+ });
+
+ const setupComponent = (history = historyMock, store = storeMock) => (
+ <Router history={history}>
+ <Provider store={store}>
+ <New />
+ </Provider>
+ </Router>
+ );
+
+ it('validates form', async () => {
+ const mockedHistory = createMemoryHistory();
+ jest.spyOn(mockedHistory, 'push');
+
+ render(setupComponent(mockedHistory));
+
+ await waitFor(async () => {
+ fireEvent.click(await screen.findByText('Send'));
+ const errorText = await screen.findByText('Topic Name is required.');
+ expect(mockedHistory.push).toBeCalledTimes(0);
+ expect(errorText).toBeTruthy();
+ });
+ });
+
+ it('submits valid form', async () => {
+ const mockedHistory = createMemoryHistory({
+ initialEntries: [clusterTopicNewPath(clusterName)],
+ });
+ jest.spyOn(mockedHistory, 'push');
+
+ render(setupComponent());
+
+ const input = await screen.findByPlaceholderText('Topic Name');
+ fireEvent.change(input, { target: { value: topicName } });
+ expect(input).toHaveValue(topicName);
+
+ waitFor(async () => {
+ fireEvent.click(await screen.findByText('Send'));
+
+ expect(mockedHistory.location.pathname).toBe(
+ clusterTopicPath(clusterName, topicName)
+ );
+ expect(mockedHistory.push).toBeCalledTimes(1);
+ });
+ });
+});
diff --git a/kafka-ui-react-app/src/components/Topics/Topics.tsx b/kafka-ui-react-app/src/components/Topics/Topics.tsx
index 6a78e22ee51..dc9a7cdda8b 100644
--- a/kafka-ui-react-app/src/components/Topics/Topics.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topics.tsx
@@ -8,7 +8,7 @@ import {
import ListContainer from './List/ListContainer';
import TopicContainer from './Topic/TopicContainer';
-import NewContainer from './New/NewContainer';
+import New from './New/New';
const Topics: React.FC = () => (
<Switch>
@@ -17,11 +17,7 @@ const Topics: React.FC = () => (
path={clusterTopicsPath(':clusterName')}
component={ListContainer}
/>
- <Route
- exact
- path={clusterTopicNewPath(':clusterName')}
- component={NewContainer}
- />
+ <Route exact path={clusterTopicNewPath(':clusterName')} component={New} />
<Route
path={clusterTopicPath(':clusterName', ':topicName')}
component={TopicContainer}
diff --git a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
index eb52d147a03..8435ea657ae 100644
--- a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
+++ b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
@@ -156,7 +156,7 @@ const TopicForm: React.FC<Props> = ({
<CustomParamsContainer isSubmitting={isSubmitting} config={config} />
- <input type="submit" className="button is-primary" />
+ <input type="submit" className="button is-primary" value="Send" />
</fieldset>
</form>
);
diff --git a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
index 73f37a500cc..b9f6206d8b4 100644
--- a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
+++ b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
@@ -162,7 +162,7 @@ const topicReducer = (
};
};
-const formatTopicCreation = (form: TopicFormData): TopicCreation => {
+export const formatTopicCreation = (form: TopicFormData): TopicCreation => {
const {
name,
partitions,
@@ -212,40 +212,6 @@ const formatTopicUpdate = (form: TopicFormDataRaw): TopicUpdate => {
};
};
-export const createTopic =
- (clusterName: ClusterName, form: TopicFormData): PromiseThunkResult =>
- async (dispatch, getState) => {
- dispatch(actions.createTopicAction.request());
- try {
- const topic: Topic = await topicsApiClient.createTopic({
- clusterName,
- topicCreation: formatTopicCreation(form),
- });
-
- const state = getState().topics;
- const newState = {
- ...state,
- byName: {
- ...state.byName,
- [topic.name]: {
- ...topic,
- },
- },
- allNames: [...state.allNames, topic.name],
- };
-
- dispatch(actions.createTopicAction.success(newState));
- } catch (error) {
- const response = await getResponse(error);
- const alert: FailurePayload = {
- subject: ['schema', form.name].join('-'),
- title: `Schema ${form.name}`,
- response,
- };
- dispatch(actions.createTopicAction.failure({ alert }));
- }
- };
-
export const updateTopic =
(
clusterName: ClusterName,
diff --git a/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts b/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
index 9502d5e4bdd..e454bf0f9be 100644
--- a/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
+++ b/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
@@ -156,7 +156,7 @@ export const getTopicsOrderBy = createSelector(
export const getIsTopicInternal = createSelector(
getTopicByName,
- ({ internal }) => !!internal
+ (topic) => !!topic?.internal
);
export const getTopicConsumerGroups = createSelector(
| null | test | train | 2021-08-23T07:49:15 | "2021-08-18T10:25:09Z" | workshur | train |
provectus/kafka-ui/788_822 | provectus/kafka-ui | provectus/kafka-ui/788 | provectus/kafka-ui/822 | [
"connected"
] | d737953a8eb60c75ee87097337fb7e1a115a00f9 | 63059ffa288085946e85fa1df9ad82a94a39d103 | [
"@GladstoneChagas, thank for creating this issue. We'll try to fix it soon.",
"We added specific security config. Now it should work fine, please let us know, how it's working on your environment. "
] | [] | "2021-08-25T16:38:10Z" | [
"type/bug",
"scope/backend"
] | Oauth2 with problem delete permission | **Describe the bug**
(A clear and concise description of what the bug is.)
When enable "AUTH_ENABLED" the options delete Clear Message and Delete Topic return 403 with method DELETE.
**Set up**
(How do you run the app?)
the application is running in kubernetes , created by helm with follow args:
`
helm install kafka-ui kafka-ui/kafka-ui \
--set envs.config.KAFKA_CLUSTER_0_NAME= "<Cluster Name>" \
--set envs.config.KAFKA_CLUSTER_0_BOOTSTRAPSERVERS= "URL_BROKER1:port\,URL_BROKER2:port\,URL_BROKER3:port" \
--set envs.config.KAFKA_CLUSTER_0_ZOOKEEPER="URL_ZOOKEEPER1:port\,URL_ZOOKEEPER2:port\,URL_ZOOKEEPER3:port" \
--set envs.config.KAFKA_CLUSTER_0_PROPERTIES_SECURITY_PROTOCOL="SSL" \
--set envs.config.SECURITY_BASIC_ENABLED="'true'" \
--set envs.config.SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=<client id> \
--set envs.config.SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=<secret client id> \
--set envs.config.SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=<url> \
--set envs.config.AUTH_ENABLED="'true'"
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Clusters -> <NameCluster> -> Topic -> button tools -> Clear Message or Remove Topic.
**Expected behavior**
(A clear and concise description of what you expected to happen)
Delete the messages or topic.
**Screenshots**
(If applicable, add screenshots to help explain your problem)



`
**Additional context**
I tried force de optins KAFKA_CLUSTERS_0_READONLY but the same erro occurs. When remove AUTH_ENABLED the validation stop but actions return work. | [] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
new file mode 100644
index 00000000000..f28e96ed82f
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
@@ -0,0 +1,40 @@
+package com.provectus.kafka.ui.config;
+
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
+import org.springframework.security.config.web.server.ServerHttpSecurity;
+import org.springframework.security.web.server.SecurityWebFilterChain;
+
+@Configuration
+@EnableWebFluxSecurity
+@ConditionalOnProperty(value = "auth.enabled", havingValue = "true")
+public class OAuthSecurityConfig {
+
+ private static final String[] AUTH_WHITELIST = {
+ "/css/**",
+ "/js/**",
+ "/media/**",
+ "/resources/**",
+ "/actuator/health",
+ "/actuator/info",
+ "/login",
+ "/logout",
+ "/oauth2/**"
+ };
+
+ @Bean
+ public SecurityWebFilterChain configure(ServerHttpSecurity http) {
+ return http.authorizeExchange()
+ .pathMatchers(AUTH_WHITELIST).permitAll()
+ .anyExchange().authenticated()
+ .and()
+ .oauth2Login()
+ .and()
+ .csrf().disable()
+ .build();
+ }
+
+}
+
| null | train | train | 2021-08-25T17:42:19 | "2021-08-11T20:31:42Z" | GladstoneChagas | train |
provectus/kafka-ui/803_823 | provectus/kafka-ui | provectus/kafka-ui/803 | provectus/kafka-ui/823 | [
"connected"
] | 63059ffa288085946e85fa1df9ad82a94a39d103 | 673e74e15aa6cd2e9ed86683d1b7397c11799d22 | [
"Hi, @david-webill. Thanks for creating this issue. Could you please provide more information:\r\n1. How is your cluster is configured\r\n2. Provide logs from kafka ui",
"Hi @germanosin \r\n\r\nThanks for the reply.\r\nLogs are here https://pastebin.com/V7CPQHAG\r\n\r\nThe instance is a hosted instance with confluent.\r\nThey provide all the configuration for you and just provide details on how to connect to the cluster:\r\n`\r\nbootstrap.servers=server_url\r\nsecurity.protocol=SASL_SSL\r\nsasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';\r\nsasl.mechanism=PLAIN\r\nclient.dns.lookup=use_all_dns_ips\r\nacks=all\r\nschema.registry.url=url\r\nbasic.auth.credentials.source=USER_INFO\r\n` ",
"Hi, @david-webill \r\naccording to the confluent doc: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java-springboot.html\r\n\r\nYou have to put one additional config:\r\n```\r\n# Required for correctness in Apache Kafka clients prior to 2.6\r\nclient.dns.lookup=use_all_dns_ips\r\n```\r\n\r\nCould you try this config?\r\n```\r\ndocker run -p 8080:8080 \\ \r\n-e KAFKA_CLUSTERS_0_NAME=cluster01 \\ \r\n-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<BROKER+PORT> \\ \r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \\ \r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN \\ \r\n-e KAFKA_CLUSTERS_0_PROPERTIES_CLIENT_DNS_LOOKUP=use_all_dns_ips \\ \r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=<USERNAME> password=<PASSWORD>;' \\ \r\n-d provectuslabs/kafka-ui:latest\r\n```",
"@germanosin Thank you so much for the reply.\r\nI tried adding the extra setting with the following:\r\n\r\n `docker run -p 8080:8080 \\\r\n -e KAFKA_CLUSTERS_0_NAME=cluster01 \\\r\n -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=redacted \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_CLIENT_DNS_LOOKUP=use_all_dns_ips \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username='redacted' password='redacted';'\\\r\n -d provectuslabs/kafka-ui:latest`\r\n\r\n\r\nIt still does not work.\r\nLogs below.\r\nhttps://pastebin.com/Niq8gfXA\r\n\r\nThank you for your help, it is much appreciated",
"@david-webill Looks like something wrong with jaas config:\r\n```\r\nCaused by: java.lang.IllegalArgumentException: JAAS config entry not terminated by semi-colon\r\n```",
"I check and double checked.\r\nWhat im passing to the JAAS is this in docker:\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=FOMSOVNWOMD password=RepKT/4MhxVgrY+R8MLYG;'\\\r\n\r\nIm not sure if i should be wrapping in quotes.\r\nIm sure its a stupid error, but I just cannot get it to work :)\r\nI do have some characters in the password that may be messing up the input?\r\n\r\n(NOT the actual username and password)",
"> I check and double checked.\r\n> What im passing to the JAAS is this in docker:\r\n> -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=FOMSOVNWOMD password=RepKT/4MhxVgrY+R8MLYG;'\\\r\n> \r\n> Im not sure if i should be wrapping in quotes.\r\n> Im sure its a stupid error, but I just cannot get it to work :)\r\n> I do have some characters in the password that may be messing up the input?\r\n> \r\n> (NOT the actual username and password)\r\n\r\nIt should look like this:\r\n\r\n```\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-secret\";'\r\n```\r\nCould you wrap username and password into quotes?",
"I have tried that, seems I have a new error now 😄 \r\nDocker command:\r\ndocker run -p 8080:8080 \\\r\n -e KAFKA_CLUSTERS_0_NAME=cluster01 \\\r\n -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=.aws.confluent.cloud:9092 \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_CLIENT_DNS_LOOKUP=use_all_dns_ips \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=\"USERNAME\" password=\"PASSWORD\";'\\\r\n -d provectuslabs/kafka-ui:latest\r\n\r\nAt least the error has seemed to change.\r\n\r\nhttps://pastebin.com/28MSD0Rg\r\n",
"@germanosin \r\nNot sure if it has anything to do with the SASL_SSL authentication\r\nIve set up a kafka deployment with sasl_plaintext as the security protocol, and its connecting no problem",
"Hmm, looks like an NPE somewhere in code. Could you please check latest version with tag master?\r\n```\r\ndocker run -p 8080:8080\r\n-e KAFKA_CLUSTERS_0_NAME=cluster01\r\n-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=.aws.confluent.cloud:9092\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_CLIENT_DNS_LOOKUP=use_all_dns_ips\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=\"USERNAME\" password=\"PASSWORD\";'\r\n-d provectuslabs/kafka-ui:master\r\n```",
"Unfortunately still an error:\r\n```13:23:41.080 [main] INFO org.springframework.core.KotlinDetector - Kotlin reflection implementation not found at runtime, related features won't be available.\r\n\r\n\r\n . ____ _ __ _ _\r\n /\\\\ / ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\r\n( ( )\\___ | '_ | '_| | '_ \\/ _` | \\ \\ \\ \\\r\n \\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\r\n ' |____| .__|_| |_|_| |_\\__, | / / / /\r\n\r\n =========|_|==============|___/=/_/_/_/\r\n\r\n :: Spring Boot :: (v2.2.4.RELEASE)\r\n\r\n\r\n13:23:42.095 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - Starting KafkaUiApplication on 5a67d335b191 with PID 1 (/kafka-ui-api.jar started by root in /)\r\n\r\n13:23:42.096 [main] DEBUG com.provectus.kafka.ui.KafkaUiApplication - Running with Spring Boot v2.2.4.RELEASE, Spring v5.2.3.RELEASE\r\n\r\n13:23:42.097 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - No active profile set, falling back to default profiles: default\r\n\r\n13:23:42.539 [background-preinit] WARN org.springframework.http.converter.json.Jackson2ObjectMapperBuilder - For Jackson Kotlin classes support please add \"com.fasterxml.jackson.module:jackson-module-kotlin\" to the classpath\r\n\r\nSLF4J: Class path contains multiple SLF4J bindings.\r\nSLF4J: Found binding in [jar:file:/kafka-ui-api.jar!/BOOT-INF/lib/slf4j-log4j12-1.7.30.jar!/org/slf4j/impl/StaticLoggerBinder.class]\r\n\r\nSLF4J: Found binding in [jar:file:/kafka-ui-api.jar!/BOOT-INF/lib/log4j-slf4j-impl-2.12.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]\r\nSLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.\r\n\r\nSLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]\r\n\r\nlog4j:WARN No appenders could be found for logger (reactor.util.Loggers$LoggerFactory).\r\nlog4j:WARN Please initialize the log4j system properly.\r\n\r\nlog4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.\r\n\r\n13:23:44.281 [main] INFO org.springframework.boot.autoconfigure.security.reactive.ReactiveUserDetailsServiceAutoConfiguration - \r\n\r\nUsing generated security password: 3e4a49ed-0d2c-4bc5-994e-30610ba3c397\r\n\r\n\r\n13:23:44.355 [main] WARN org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration$JodaDateTimeJacksonConfiguration - Auto-configuration of Jackson's Joda-Time integration is deprecated in favor of using java.time (JSR-310).\r\n\r\n13:23:44.559 [main] INFO com.provectus.kafka.ui.serde.DeserializationService - Using SchemaRegistryAwareRecordSerDe for cluster 'cluster'\r\n\r\n13:23:45.430 [main] INFO org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler - Initializing ExecutorService 'taskScheduler'\r\n\r\n13:23:45.565 [parallel-1] DEBUG com.provectus.kafka.ui.service.MetricsUpdateService - Start getting metrics for kafkaCluster: cluster\r\n\r\n13:23:45.796 [main] INFO org.springframework.boot.web.embedded.netty.NettyWebServer - Netty started on port(s): 8080\r\n\r\n13:23:45.810 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - Started KafkaUiApplication in 4.614 seconds (JVM running for 6.591)\r\n\r\n13:23:52.825 [boundedElastic-4] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [6524d03a] Encoding [[class Cluster {\r\n name: cluster\r\n defaultCluster: null\r\n status: offline\r\n brokerCount: nul (truncated)...]\r\n\r\n13:24:09.155 [kafka-admin-client-thread | adminclient-2] WARN com.provectus.kafka.ui.exception.ErrorCode - Multiple class com.provectus.kafka.ui.exception.ErrorCode values refer to code 4001\r\n\r\n13:24:09.179 [kafka-admin-client-thread | adminclient-2] ERROR org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler - [de494e71] 500 Server Error for HTTP GET \"/api/clusters/cluster/brokers\"\r\njava.util.NoSuchElementException: No value present\r\n\tat java.util.Optional.orElseThrow(Unknown Source) ~[?:?]\r\n\tSuppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException: \r\nError has been observed at the following site(s):\r\n\t|_ checkpoint ⇢ Handler com.provectus.kafka.ui.controller.BrokersController#getBrokers(String, ServerWebExchange) [DispatcherHandler]\r\n\t|_ checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]\r\n\t|_ checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]\r\n\r\n\t|_ checkpoint ⇢ HTTP GET \"/api/clusters/cluster/brokers\" [ExceptionHandlingWebHandler]\r\n\r\nStack trace:\r\n\r\n\t\tat java.util.Optional.orElseThrow(Unknown Source) ~[?:?]\r\n\r\n\t\tat com.provectus.kafka.ui.util.ClusterUtil.getClusterVersion(ClusterUtil.java:368) ~[classes!/:?]\r\n\r\n\t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:107) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\n\t\tat reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1637) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\n\t\tat reactor.core.publisher.MonoCreate$DefaultMonoSink.success(MonoCreate.java:156) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\n\t\tat com.provectus.kafka.ui.util.ClusterUtil.lambda$toMono$0(ClusterUtil.java:64) ~[classes!/:?]\r\n\r\n\t\tat org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:177) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:162) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.internals.KafkaFutureImpl$Applicant.accept(KafkaFutureImpl.java:65) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.internals.KafkaFutureImpl$Applicant.accept(KafkaFutureImpl.java:49) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.KafkaFuture$AllOfAdapter.maybeComplete(KafkaFuture.java:82) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.KafkaFuture$AllOfAdapter.accept(KafkaFuture.java:76) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.KafkaFuture$AllOfAdapter.accept(KafkaFuture.java:57) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.clients.admin.KafkaAdminClient$10.handleResponse(KafkaAdminClient.java:1911) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.handleResponses(KafkaAdminClient.java:1076) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.run(KafkaAdminClient.java:1204) [kafka-clients-2.4.1.jar!/:?]\r\n\r\n\t\tat java.lang.Thread.run(Unknown Source) [?:?]\r\n\r\n13:24:09.227 [kafka-admin-client-thread | adminclient-2] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [de494e71] Encoding [class ErrorResponse {\r\n code: 5000\r\n message: No value present\r\n timestamp: 1629725049160\r\n (truncated)...]\r\n\r\n13:24:15.434 [parallel-1] DEBUG com.provectus.kafka.ui.service.MetricsUpdateService - Start getting metrics for kafkaCluster: cluster\r\n```",
"> Unfortunately still an error:\r\n> \r\n> ```\r\n> \r\n> \r\n> . ____ _ __ _ _\r\n> /\\\\ / ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\r\n> ( ( )\\___ | '_ | '_| | '_ \\/ _` | \\ \\ \\ \\\r\n> \\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\r\n> ' |____| .__|_| |_|_| |_\\__, | / / / /\r\n> \r\n> =========|_|==============|___/=/_/_/_/\r\n> \r\n> :: Spring Boot :: (v2.2.4.RELEASE)\r\n> \r\n> \r\n> 13:23:42.095 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - Starting KafkaUiApplication on 5a67d335b191 with PID 1 (/kafka-ui-api.jar started by root in /)\r\n> \r\n> 13:23:42.096 [main] DEBUG com.provectus.kafka.ui.KafkaUiApplication - Running with Spring Boot v2.2.4.RELEASE, Spring v5.2.3.RELEASE\r\n> \r\n> 13:23:42.097 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - No active profile set, falling back to default profiles: default\r\n> \r\n> 13:23:42.539 [background-preinit] WARN org.springframework.http.converter.json.Jackson2ObjectMapperBuilder - For Jackson Kotlin classes support please add \"com.fasterxml.jackson.module:jackson-module-kotlin\" to the classpath\r\n> \r\n> SLF4J: Class path contains multiple SLF4J bindings.\r\n> SLF4J: Found binding in [jar:file:/kafka-ui-api.jar!/BOOT-INF/lib/slf4j-log4j12-1.7.30.jar!/org/slf4j/impl/StaticLoggerBinder.class]\r\n> \r\n> SLF4J: Found binding in [jar:file:/kafka-ui-api.jar!/BOOT-INF/lib/log4j-slf4j-impl-2.12.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]\r\n> SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.\r\n> \r\n> SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]\r\n> \r\n> log4j:WARN No appenders could be found for logger (reactor.util.Loggers$LoggerFactory).\r\n> log4j:WARN Please initialize the log4j system properly.\r\n> \r\n> log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.\r\n> \r\n> 13:23:44.281 [main] INFO org.springframework.boot.autoconfigure.security.reactive.ReactiveUserDetailsServiceAutoConfiguration - \r\n> \r\n> Using generated security password: 3e4a49ed-0d2c-4bc5-994e-30610ba3c397\r\n> \r\n> \r\n> 13:23:44.355 [main] WARN org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration$JodaDateTimeJacksonConfiguration - Auto-configuration of Jackson's Joda-Time integration is deprecated in favor of using java.time (JSR-310).\r\n> \r\n> 13:23:44.559 [main] INFO com.provectus.kafka.ui.serde.DeserializationService - Using SchemaRegistryAwareRecordSerDe for cluster 'cluster'\r\n> \r\n> 13:23:45.430 [main] INFO org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler - Initializing ExecutorService 'taskScheduler'\r\n> \r\n> 13:23:45.565 [parallel-1] DEBUG com.provectus.kafka.ui.service.MetricsUpdateService - Start getting metrics for kafkaCluster: cluster\r\n> \r\n> 13:23:45.796 [main] INFO org.springframework.boot.web.embedded.netty.NettyWebServer - Netty started on port(s): 8080\r\n> \r\n> 13:23:45.810 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - Started KafkaUiApplication in 4.614 seconds (JVM running for 6.591)\r\n> \r\n> 13:23:52.825 [boundedElastic-4] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [6524d03a] Encoding [[class Cluster {\r\n> name: cluster\r\n> defaultCluster: null\r\n> status: offline\r\n> brokerCount: nul (truncated)...]\r\n> \r\n> 13:24:09.155 [kafka-admin-client-thread | adminclient-2] WARN com.provectus.kafka.ui.exception.ErrorCode - Multiple class com.provectus.kafka.ui.exception.ErrorCode values refer to code 4001\r\n> \r\n> 13:24:09.179 [kafka-admin-client-thread | adminclient-2] ERROR org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler - [de494e71] 500 Server Error for HTTP GET \"/api/clusters/cluster/brokers\"\r\n> java.util.NoSuchElementException: No value present\r\n> \tat java.util.Optional.orElseThrow(Unknown Source) ~[?:?]\r\n> \tSuppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException: \r\n> Error has been observed at the following site(s):\r\n> \t|_ checkpoint ⇢ Handler com.provectus.kafka.ui.controller.BrokersController#getBrokers(String, ServerWebExchange) [DispatcherHandler]\r\n> \t|_ checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]\r\n> \t|_ checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]\r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]\r\n> \r\n> \t|_ checkpoint ⇢ HTTP GET \"/api/clusters/cluster/brokers\" [ExceptionHandlingWebHandler]\r\n> \r\n> Stack trace:\r\n> \r\n> \t\tat java.util.Optional.orElseThrow(Unknown Source) ~[?:?]\r\n> \r\n> \t\tat com.provectus.kafka.ui.util.ClusterUtil.getClusterVersion(ClusterUtil.java:368) ~[classes!/:?]\r\n> \r\n> \t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:107) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n> \r\n> \t\tat reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1637) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n> \r\n> \t\tat reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n> \r\n> \t\tat reactor.core.publisher.MonoCreate$DefaultMonoSink.success(MonoCreate.java:156) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n> \r\n> \t\tat com.provectus.kafka.ui.util.ClusterUtil.lambda$toMono$0(ClusterUtil.java:64) ~[classes!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:177) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:162) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.internals.KafkaFutureImpl$Applicant.accept(KafkaFutureImpl.java:65) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.internals.KafkaFutureImpl$Applicant.accept(KafkaFutureImpl.java:49) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.KafkaFuture$AllOfAdapter.maybeComplete(KafkaFuture.java:82) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.KafkaFuture$AllOfAdapter.accept(KafkaFuture.java:76) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.KafkaFuture$AllOfAdapter.accept(KafkaFuture.java:57) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.clients.admin.KafkaAdminClient$10.handleResponse(KafkaAdminClient.java:1911) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.handleResponses(KafkaAdminClient.java:1076) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.run(KafkaAdminClient.java:1204) [kafka-clients-2.4.1.jar!/:?]\r\n> \r\n> \t\tat java.lang.Thread.run(Unknown Source) [?:?]\r\n> \r\n> 13:24:09.227 [kafka-admin-client-thread | adminclient-2] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [de494e71] Encoding [class ErrorResponse {\r\n> code: 5000\r\n> message: No value present\r\n> timestamp: 1629725049160\r\n> (truncated)...]\r\n> \r\n> 13:24:15.434 [parallel-1] DEBUG com.provectus.kafka.ui.service.MetricsUpdateService - Start getting metrics for kafkaCluster: cluster\r\n> ```\r\n\r\nHmm, looks strange.\r\nI see in your config cluster name: cluster01\r\nBut from logs i see request to: /api/clusters/cluster/brokers\r\nThis looks suspicious. Is this error appears when you open UI, or you are querying from cli?\r\nCould you query something like: /api/clusters/cluster01/brokers",
"Apologies for the confusion with the cluster name, I changed it at some point, hence cluster is the correct name.\r\nIm using the ui to access.\r\n\r\nJust to be clear this is the docker command i am using:\r\ndocker run -p 8080:8080 \\\r\n -e KAFKA_CLUSTERS_0_NAME=cluster \\\r\n -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=.aws.confluent.cloud:9092 \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_CLIENT_DNS_LOOKUP=use_all_dns_ips \\\r\n -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=\"USERNAME\" password=\"PASSWORD\";'\\\r\n -d provectuslabs/kafka-ui:master\r\n\r\nThis seems to be the error when hitting /api/clusters/cluster/brokers:\r\n\r\n```13:39:35.782 [kafka-admin-client-thread | adminclient-6] ERROR org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler - [338cab37] 500 Server Error for HTTP GET \"/api/clusters/cluster/brokers\"\r\n\r\njava.util.NoSuchElementException: No value present\r\n\r\nat java.util.Optional.orElseThrow(Unknown Source) ~[?:?]\r\n\r\nSuppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException:\r\n\r\nError has been observed at the following site(s):\r\n\r\n|_ checkpoint ⇢ Handler com.provectus.kafka.ui.controller.BrokersController#getBrokers(String, ServerWebExchange) [DispatcherHandler]\r\n\r\n|_ checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]\r\n\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]\r\n\r\n|_ checkpoint ⇢ HTTP GET \"/api/clusters/cluster/brokers\" [ExceptionHandlingWebHandler]\r\n\r\nStack trace:\r\n\r\nat java.util.Optional.orElseThrow(Unknown Source) ~[?:?]\r\n\r\nat com.provectus.kafka.ui.util.ClusterUtil.getClusterVersion(ClusterUtil.java:368) ~[classes!/:?]\r\n\r\nat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:107) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\nat reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1637) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\nat reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\nat reactor.core.publisher.MonoCreate$DefaultMonoSink.success(MonoCreate.java:156) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]\r\n\r\nat com.provectus.kafka.ui.util.ClusterUtil.lambda$toMono$0(ClusterUtil.java:64) ~[classes!/:?]\r\n\r\nat org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:177) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:162) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.internals.KafkaFutureImpl$Applicant.accept(KafkaFutureImpl.java:65) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.internals.KafkaFutureImpl$Applicant.accept(KafkaFutureImpl.java:49) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.KafkaFuture$AllOfAdapter.maybeComplete(KafkaFuture.java:82) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.KafkaFuture$AllOfAdapter.accept(KafkaFuture.java:76) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.KafkaFuture$AllOfAdapter.accept(KafkaFuture.java:57) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.clients.admin.KafkaAdminClient$10.handleResponse(KafkaAdminClient.java:1911) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.handleResponses(KafkaAdminClient.java:1076) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.run(KafkaAdminClient.java:1204) [kafka-clients-2.4.1.jar!/:?]\r\n\r\nat java.lang.Thread.run(Unknown Source) [?:?]\r\n\r\n13:39:35.788 [kafka-admin-client-thread | adminclient-6] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [338cab37] Encoding [class ErrorResponse {\r\n\r\ncode: 5000\r\n\r\nmessage: No value present\r\n\r\ntimestamp: 1629725975782\r\n\r\n(truncated)...]\r\n```",
"According to this stacktrace, you don't have config inter.broker.protocol.version in your cluster. This looks strange. Let me check it. ",
"@david-webill I just updated master branch with fix Could you please pull latest image with tag master and check it?",
"Thank you @germanosin\r\nI will try later today when I get a chance and let you know.",
"Hello\r\n\r\nI have tried with the latest master\r\n\r\nI get the following:\r\n\r\n\r\n```\r\n08:52:16.203 [parallel-1] DEBUG com.provectus.kafka.ui.service.MetricsUpdateService - Start getting metrics for kafkaCluster: cluster\r\n\r\n\r\n08:52:16.942 [kafka-admin-client-thread | adminclient-1] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [be8f458f] Encoding [[class Broker {\r\n\r\nid: 0\r\n\r\nhost: b0-pkc-q283m.af-south-1.aws.confluent.cloud\r\n\r\n}, class Broker {\r\n\r\n(truncated)...]\r\n\r\n```",
"> Hello\r\n> \r\n> I have tried with the latest master\r\n> \r\n> I get the following:\r\n> \r\n> ```\r\n> 08:52:16.203 [parallel-1] DEBUG com.provectus.kafka.ui.service.MetricsUpdateService - Start getting metrics for kafkaCluster: cluster\r\n> \r\n> \r\n> 08:52:16.942 [kafka-admin-client-thread | adminclient-1] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [be8f458f] Encoding [[class Broker {\r\n> \r\n> id: 0\r\n> \r\n> host: b0-pkc-q283m.af-south-1.aws.confluent.cloud\r\n> \r\n> }, class Broker {\r\n> \r\n> (truncated)...]\r\n> ```\r\n\r\nHmm, looks lit it's working.",
"I also thought that, but nothing is showing up in the ui, it still says 1 cluster offline",
"Looks strange. We'll try to debug it with confluent cloud",
"I checked it with confluent cloud. Problem was in our algorithm collecting logdirs information to estimate topic size. I added specific config \r\n```\r\nKAFKA_CLUSTERS_0_DISABLELOGDIRSCOLLECTION\r\n```\r\nIf you'll set it to true, it will work fine, but without topics size information. We'll recheck it in future versions."
] | [] | "2021-08-25T17:29:36Z" | [
"scope/backend"
] | Connecting SASL_SSL with sasl.mechanisms=PLAIN | Hello
Im trying to connect this to an instance of Kafka as follows:
` docker run -p 8080:8080 \
-e KAFKA_CLUSTERS_0_NAME=cluster01 \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<BROKER+PORT> \
-e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN \
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG='org.apache.kafka.common.security.plain.PlainLoginModule required username=<USERNAME> password=<PASSWORD>;'\
-d provectuslabs/kafka-ui:latest`
It does not want to connect.
I have looked through multiple issues such as [this](https://github.com/provectus/kafka-ui/issues/573) one, but im not able to come right.
Your assistance would be greatly appreciated | [
"README.md",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java"
] | [
"README.md",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java"
] | [] | diff --git a/README.md b/README.md
index db4df22f28b..b24ad772f3e 100644
--- a/README.md
+++ b/README.md
@@ -173,6 +173,7 @@ For example, if you want to use an environment variable to set the `name` parame
|`KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE` |How keys are saved to schemaRegistry
|`KAFKA_CLUSTERS_0_JMXPORT` |Open jmxPosrts of a broker
|`KAFKA_CLUSTERS_0_READONLY` |Enable read only mode. Default: false
+|`KAFKA_CLUSTERS_0_DISABLELOGDIRSCOLLECTION` |Disable collecting segments information. Should be true for confluent cloud. Default: false
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` |Given name for the Kafka Connect cluster
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
|`LOGGING_LEVEL_ROOT` | Setting log level (all, debug, info, warn, error, fatal, off). Default: debug
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
index 63925376cab..ef0fdd66f70 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
@@ -30,6 +30,7 @@ public static class Cluster {
int jmxPort;
Properties properties;
boolean readOnly = false;
+ boolean disableLogDirsCollection = false;
}
@Data
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
index c87d4945407..9439eb26004 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
@@ -31,5 +31,6 @@ public class KafkaCluster {
private final String protobufMessageName;
private final Properties properties;
private final Boolean readOnly;
+ private final Boolean disableLogDirsCollection;
private final List<Feature> features;
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index b966ee06794..e94596b4547 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -129,11 +129,21 @@ public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
getClusterMetrics(ac.getAdminClient())
.flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
.flatMap(clusterMetrics ->
- getTopicsData(ac.getAdminClient()).flatMap(it ->
- updateSegmentMetrics(ac.getAdminClient(), clusterMetrics, it)
+ getTopicsData(ac.getAdminClient()).flatMap(it -> {
+ if (cluster.getDisableLogDirsCollection() == null
+ || !cluster.getDisableLogDirsCollection()) {
+ return updateSegmentMetrics(
+ ac.getAdminClient(), clusterMetrics, it
+ );
+ } else {
+ return emptySegmentMetrics(clusterMetrics, it);
+ }
+ }
).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto))
)
)
+ ).doOnError(e ->
+ log.error("Failed to collect cluster {} info", cluster.getName(), e)
).onErrorResume(
e -> Mono.just(cluster.toBuilder()
.status(ServerStatus.OFFLINE)
@@ -484,6 +494,28 @@ private InternalPartition mergeWithStats(String topic, InternalPartition partiti
.build();
}
+ private Mono<InternalSegmentSizeDto> emptySegmentMetrics(InternalClusterMetrics clusterMetrics,
+ List<InternalTopic> internalTopics) {
+ return Mono.just(
+ InternalSegmentSizeDto.builder()
+ .clusterMetricsWithSegmentSize(
+ clusterMetrics.toBuilder()
+ .segmentSize(0)
+ .segmentCount(0)
+ .internalBrokerDiskUsage(Collections.emptyMap())
+ .build()
+ )
+ .internalTopicWithSegmentSize(
+ internalTopics.stream().collect(
+ Collectors.toMap(
+ InternalTopic::getName,
+ i -> i
+ )
+ )
+ ).build()
+ );
+ }
+
private Mono<InternalSegmentSizeDto> updateSegmentMetrics(AdminClient ac,
InternalClusterMetrics clusterMetrics,
List<InternalTopic> internalTopics) {
@@ -491,9 +523,11 @@ private Mono<InternalSegmentSizeDto> updateSegmentMetrics(AdminClient ac,
internalTopics.stream().map(InternalTopic::getName).collect(Collectors.toList());
return ClusterUtil.toMono(ac.describeTopics(names).all()).flatMap(topic ->
ClusterUtil.toMono(ac.describeCluster().nodes()).flatMap(nodes ->
+
ClusterUtil.toMono(
- ac.describeLogDirs(nodes.stream().map(Node::id).collect(Collectors.toList())).all())
- .map(log -> {
+ ac.describeLogDirs(
+ nodes.stream().map(Node::id).collect(Collectors.toList())).all()
+ ).map(log -> {
final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
log.entrySet().stream().flatMap(b ->
b.getValue().entrySet().stream().flatMap(topicMap ->
| null | train | train | 2021-08-25T19:27:17 | "2021-08-20T12:35:27Z" | ghost | train |
provectus/kafka-ui/824_833 | provectus/kafka-ui | provectus/kafka-ui/824 | provectus/kafka-ui/833 | [
"connected"
] | 978ed3ad9b04cdeb072d6b8c738d11f713698044 | ba1022d87a4567df8dd4a749582096f2d3ab2e6c | [
"Hi, @vangahari . Thanks for creating this issue. This feature looks interesting we'll try to add it in next versions",
"Hi, @germanosin. Thanks for the reply.\r\nIt would be grateful if we can have this feature in the next versions. ",
"This is already supported by spring and env variables:\r\n\r\n```\r\nAUTH_ENABLED=true\r\nSPRING_SECURITY_USER_NAME=admin\r\nSPRING_SECURITY_USER_PASSWORD=pass\r\n```\r\n\r\nBut broken dues to the `CSRF` not configured properly: https://github.com/provectus/kafka-ui/issues/287#issuecomment-903986233\r\n\r\nI have not tried SSO, but I presume this should be broken with SSO as well as `CSRF` token is not passed to the API and enabled by default.",
"@apryiomka Yes, Got the same issue. I configured as you mentioned above, got the same issue when creating topic from the dashboard.\r\n"
] | [] | "2021-08-27T13:02:21Z" | [
"type/enhancement"
] | Authentication mechanism with simple Username and password | Hi Team, Is there any simple authentication mechanism for the kafka-ui by default. I see we have SSL and SSO option, but without those things it would be better to have simple user and password for login to dashboard in terms of Security concerns | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
index f28e96ed82f..3e8bcd478f2 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
@@ -1,17 +1,29 @@
package com.provectus.kafka.ui.config;
+import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.ServerHttpSecurity;
import org.springframework.security.web.server.SecurityWebFilterChain;
+import org.springframework.util.ClassUtils;
@Configuration
@EnableWebFluxSecurity
@ConditionalOnProperty(value = "auth.enabled", havingValue = "true")
public class OAuthSecurityConfig {
+ public static final String REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME =
+ "org.springframework.security.oauth2.client.registration."
+ + "ReactiveClientRegistrationRepository";
+
+ private static final boolean isOAuth2Present = ClassUtils.isPresent(
+ REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME,
+ OAuthSecurityConfig.class.getClassLoader()
+ );
+
private static final String[] AUTH_WHITELIST = {
"/css/**",
"/js/**",
@@ -24,17 +36,43 @@ public class OAuthSecurityConfig {
"/oauth2/**"
};
+ @Autowired
+ ApplicationContext context;
+
@Bean
public SecurityWebFilterChain configure(ServerHttpSecurity http) {
- return http.authorizeExchange()
+ http.authorizeExchange()
.pathMatchers(AUTH_WHITELIST).permitAll()
- .anyExchange().authenticated()
- .and()
- .oauth2Login()
- .and()
- .csrf().disable()
- .build();
+ .anyExchange()
+ .authenticated();
+
+ if (isOAuth2Present && OAuth2ClasspathGuard.shouldConfigure(this.context)) {
+ OAuth2ClasspathGuard.configure(this.context, http);
+ } else {
+ http
+ .httpBasic().and()
+ .formLogin();
+ }
+
+ SecurityWebFilterChain result = http.csrf().disable().build();
+ return result;
}
+ private static class OAuth2ClasspathGuard {
+ static void configure(ApplicationContext context, ServerHttpSecurity http) {
+ http
+ .oauth2Login().and()
+ .oauth2Client();
+ }
+
+ static boolean shouldConfigure(ApplicationContext context) {
+ ClassLoader loader = context.getClassLoader();
+ Class<?> reactiveClientRegistrationRepositoryClass =
+ ClassUtils.resolveClassName(REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME, loader);
+ return context.getBeanNamesForType(reactiveClientRegistrationRepositoryClass).length == 1;
+ }
+ }
+
+
}
| null | train | train | 2021-08-27T14:31:15 | "2021-08-26T05:09:22Z" | vangahari | train |
provectus/kafka-ui/832_834 | provectus/kafka-ui | provectus/kafka-ui/832 | provectus/kafka-ui/834 | [
"connected"
] | 978ed3ad9b04cdeb072d6b8c738d11f713698044 | 1b2b22f18a6d1b1a05681175f788ef87438e8cd1 | [
"Hi, @erizzo . Thanks for creating this issue! This appears to be a regression issue in the master branch after adding the topic deletion feature detection. The fix is in PR. it would be merged today."
] | [] | "2021-08-27T13:22:33Z" | [
"type/bug",
"scope/backend"
] | Startup failure with unreachable cluster in configuration | Running kafka-ui via docker-compose, using the "master" image.
If I have multiple clusters configured and 1 of them is not currently reachable (for example, it requires that I be on the company VPN in order to access), kafka-ui fails at startup.
Here is an example docker-compose config:
```yaml
kafka-ui:
image: provectuslabs/kafka-ui:master
container_name: kafka-ui
restart: "no"
ports:
- "9001:8080"
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper:2181
- KAFKA_CLUSTERS_0_SCHEMAREGISTRY=schemaregistry:8081
- KAFKA_CLUSTERS_1_NAME=QA
- KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS=qa-env.mycompany.com:40992
- KAFKA_CLUSTERS_1_ZOOKEEPER=qa-env.mycompany.com:32181
- KAFKA_CLUSTERS_1_SCHEMAREGISTRY=qa-env.mycompany.com:30081
depends_on:
- "kafka"
```
If the host `qa-env.mycompany.com` is not reachable, kafka-ui won't even start up. If I comment out or remove all of the KAFKA_CLUSTERS_1_* keys, it starts up fine.
The "latest" image does not have this problem, it will start and the "local" cluster can be browsed/managed ("qa" cluster shows as offline in kafka-ui); so this seems like a regression in the master image.
Here is the startup error log:
```
12:51:08.449 [main] INFO org.springframework.core.KotlinDetector - Kotlin reflection implementation not found at runtime, related features won't be available.
. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v2.2.4.RELEASE)
12:51:09.631 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - Starting KafkaUiApplication on 477724a2e3b3 with PID 1 (/kafka-ui-api.jar started by root in /)
12:51:09.631 [main] DEBUG com.provectus.kafka.ui.KafkaUiApplication - Running with Spring Boot v2.2.4.RELEASE, Spring v5.2.3.RELEASE
12:51:09.632 [main] INFO com.provectus.kafka.ui.KafkaUiApplication - No active profile set, falling back to default profiles: default
12:51:10.295 [background-preinit] WARN org.springframework.http.converter.json.Jackson2ObjectMapperBuilder - For Jackson Kotlin classes support please add "com.fasterxml.jackson.module:jackson-module-kotlin" to the classpath
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/kafka-ui-api.jar!/BOOT-INF/lib/slf4j-log4j12-1.7.30.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/kafka-ui-api.jar!/BOOT-INF/lib/log4j-slf4j-impl-2.12.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
log4j:WARN No appenders could be found for logger (reactor.util.Loggers$LoggerFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
12:51:12.270 [main] INFO org.springframework.boot.autoconfigure.security.reactive.ReactiveUserDetailsServiceAutoConfiguration -
Using generated security password: 9d0ce723-8a96-48e3-9c68-ec6cfa0e724c
12:51:12.331 [main] WARN org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration$JodaDateTimeJacksonConfiguration - Auto-configuration of Jackson's Joda-Time integration is deprecated in favor of using java.time (JSR-310).
12:51:13.866 [main] WARN org.springframework.boot.web.reactive.context.AnnotationConfigReactiveWebServerApplicationContext - Exception encountered during context initialization - cancelling refresh attempt: org.springframework.beans.factory.UnsatisfiedDependencyException: Error creating bean with name 'deserializationService' defined in URL [jar:file:/kafka-ui-api.jar!/BOOT-INF/classes!/com/provectus/kafka/ui/serde/DeserializationService.class]: Unsatisfied dependency expressed through constructor parameter 0; nested exception is org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'clustersStorage': Invocation of init method failed; nested exception is org.apache.kafka.common.KafkaException: Failed to create new KafkaAdminClient
12:51:13.890 [main] INFO org.springframework.boot.autoconfigure.logging.ConditionEvaluationReportLoggingListener -
Error starting ApplicationContext. To display the conditions report re-run your application with 'debug' enabled.
12:51:13.895 [main] ERROR org.springframework.boot.SpringApplication - Application run failed
org.springframework.beans.factory.UnsatisfiedDependencyException: Error creating bean with name 'deserializationService' defined in URL [jar:file:/kafka-ui-api.jar!/BOOT-INF/classes!/com/provectus/kafka/ui/serde/DeserializationService.class]: Unsatisfied dependency expressed through constructor parameter 0; nested exception is org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'clustersStorage': Invocation of init method failed; nested exception is org.apache.kafka.common.KafkaException: Failed to create new KafkaAdminClient
at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:798) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.autowireConstructor(ConstructorResolver.java:228) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireConstructor(AbstractAutowireCapableBeanFactory.java:1358) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1204) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:557) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:222) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:879) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:878) ~[spring-context-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:550) ~[spring-context-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.boot.web.reactive.context.ReactiveWebServerApplicationContext.refresh(ReactiveWebServerApplicationContext.java:66) ~[spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:747) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:397) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:315) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1226) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1215) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at com.provectus.kafka.ui.KafkaUiApplication.main(KafkaUiApplication.java:14) [classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]
at org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:48) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:87) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:51) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:52) [kafka-ui-api.jar:?]
Caused by: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'clustersStorage': Invocation of init method failed; nested exception is org.apache.kafka.common.KafkaException: Failed to create new KafkaAdminClient
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:160) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.applyBeanPostProcessorsBeforeInitialization(AbstractAutowireCapableBeanFactory.java:416) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1788) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:595) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:222) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1287) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1207) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:885) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:789) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
... 27 more
Caused by: org.apache.kafka.common.KafkaException: Failed to create new KafkaAdminClient
at org.apache.kafka.clients.admin.KafkaAdminClient.createInternal(KafkaAdminClient.java:540) ~[kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.Admin.create(Admin.java:134) ~[kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.AdminClient.create(AdminClient.java:39) ~[kafka-clients-2.8.0.jar!/:?]
at com.provectus.kafka.ui.service.AdminClientServiceImpl.lambda$createAdminClient$2(AdminClientServiceImpl.java:41) ~[classes!/:?]
at reactor.core.publisher.MonoSupplier.call(MonoSupplier.java:85) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxFlatMap.trySubscribeScalarMap(FluxFlatMap.java:126) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoFlatMap.subscribeOrReturn(MonoFlatMap.java:53) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Mono.subscribe(Mono.java:4090) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:75) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Operators.complete(Operators.java:132) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoEmpty.subscribe(MonoEmpty.java:45) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Mono.subscribe(Mono.java:4105) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Mono.blockOptional(Mono.java:1707) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at com.provectus.kafka.ui.service.FeatureServiceImpl.topicDeletionCheck(FeatureServiceImpl.java:57) ~[classes!/:?]
at com.provectus.kafka.ui.service.FeatureServiceImpl.getAvailableFeatures(FeatureServiceImpl.java:41) ~[classes!/:?]
at com.provectus.kafka.ui.service.ClustersStorage.init(ClustersStorage.java:39) ~[classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.applyBeanPostProcessorsBeforeInitialization(AbstractAutowireCapableBeanFactory.java:416) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1788) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:595) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:222) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1287) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1207) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:885) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:789) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
... 27 more
Suppressed: java.lang.Exception: #block terminated with an error
at reactor.core.publisher.BlockingOptionalMonoSubscriber.blockingGet(BlockingOptionalMonoSubscriber.java:139) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Mono.blockOptional(Mono.java:1708) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at com.provectus.kafka.ui.service.FeatureServiceImpl.topicDeletionCheck(FeatureServiceImpl.java:57) ~[classes!/:?]
at com.provectus.kafka.ui.service.FeatureServiceImpl.getAvailableFeatures(FeatureServiceImpl.java:41) ~[classes!/:?]
at com.provectus.kafka.ui.service.ClustersStorage.init(ClustersStorage.java:39) ~[classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.applyBeanPostProcessorsBeforeInitialization(AbstractAutowireCapableBeanFactory.java:416) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1788) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:595) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:222) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1287) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1207) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:885) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:789) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.autowireConstructor(ConstructorResolver.java:228) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireConstructor(AbstractAutowireCapableBeanFactory.java:1358) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1204) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:557) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:222) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:879) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:878) ~[spring-context-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:550) ~[spring-context-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.boot.web.reactive.context.ReactiveWebServerApplicationContext.refresh(ReactiveWebServerApplicationContext.java:66) ~[spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:747) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:397) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:315) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1226) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1215) [spring-boot-2.2.4.RELEASE.jar!/:2.2.4.RELEASE]
at com.provectus.kafka.ui.KafkaUiApplication.main(KafkaUiApplication.java:14) [classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]
at org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:48) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:87) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:51) [kafka-ui-api.jar:?]
at org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:52) [kafka-ui-api.jar:?]
Caused by: org.apache.kafka.common.config.ConfigException: No resolvable bootstrap urls given in bootstrap.servers
at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:89) ~[kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:48) ~[kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.KafkaAdminClient.createInternal(KafkaAdminClient.java:494) ~[kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.Admin.create(Admin.java:134) ~[kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.AdminClient.create(AdminClient.java:39) ~[kafka-clients-2.8.0.jar!/:?]
at com.provectus.kafka.ui.service.AdminClientServiceImpl.lambda$createAdminClient$2(AdminClientServiceImpl.java:41) ~[classes!/:?]
at reactor.core.publisher.MonoSupplier.call(MonoSupplier.java:85) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxFlatMap.trySubscribeScalarMap(FluxFlatMap.java:126) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoFlatMap.subscribeOrReturn(MonoFlatMap.java:53) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Mono.subscribe(Mono.java:4090) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:75) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Operators.complete(Operators.java:132) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoEmpty.subscribe(MonoEmpty.java:45) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Mono.subscribe(Mono.java:4105) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Mono.blockOptional(Mono.java:1707) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at com.provectus.kafka.ui.service.FeatureServiceImpl.topicDeletionCheck(FeatureServiceImpl.java:57) ~[classes!/:?]
at com.provectus.kafka.ui.service.FeatureServiceImpl.getAvailableFeatures(FeatureServiceImpl.java:41) ~[classes!/:?]
at com.provectus.kafka.ui.service.ClustersStorage.init(ClustersStorage.java:39) ~[classes!/:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]
at java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.applyBeanPostProcessorsBeforeInitialization(AbstractAutowireCapableBeanFactory.java:416) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1788) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:595) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:222) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1287) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1207) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:885) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:789) ~[spring-beans-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
... 27 more
``` | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java
index 6c58470e59b..638af227753 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java
@@ -22,7 +22,6 @@ public class ClustersStorage {
private final ClustersProperties clusterProperties;
private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
- private final FeatureService featureService;
@PostConstruct
public void init() {
@@ -36,7 +35,6 @@ public void init() {
clusterProperties.getName(),
cluster.toBuilder()
.topics(new HashMap<>())
- .features(featureService.getAvailableFeatures(cluster))
.build()
);
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java
index 7abe4870ee2..58610259da0 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java
@@ -2,7 +2,7 @@
import com.provectus.kafka.ui.model.Feature;
import com.provectus.kafka.ui.model.KafkaCluster;
-import java.util.List;
+import reactor.core.publisher.Flux;
public interface FeatureService {
/**
@@ -11,5 +11,5 @@ public interface FeatureService {
* @param cluster - cluster
* @return List of Feature
*/
- List<Feature> getAvailableFeatures(KafkaCluster cluster);
+ Flux<Feature> getAvailableFeatures(KafkaCluster cluster);
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java
index 919b19d30c4..413c237d9ce 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java
@@ -12,6 +12,8 @@
import lombok.extern.log4j.Log4j2;
import org.apache.kafka.common.Node;
import org.springframework.stereotype.Service;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
@Service
@RequiredArgsConstructor
@@ -21,31 +23,32 @@ public class FeatureServiceImpl implements FeatureService {
private final BrokerService brokerService;
@Override
- public List<Feature> getAvailableFeatures(KafkaCluster cluster) {
- List<Feature> features = new ArrayList<>();
+ public Flux<Feature> getAvailableFeatures(KafkaCluster cluster) {
+ List<Mono<Feature>> features = new ArrayList<>();
if (Optional.ofNullable(cluster.getKafkaConnect())
.filter(Predicate.not(List::isEmpty))
.isPresent()) {
- features.add(Feature.KAFKA_CONNECT);
+ features.add(Mono.just(Feature.KAFKA_CONNECT));
}
if (cluster.getKsqldbServer() != null) {
- features.add(Feature.KSQL_DB);
+ features.add(Mono.just(Feature.KSQL_DB));
}
if (cluster.getSchemaRegistry() != null) {
- features.add(Feature.SCHEMA_REGISTRY);
+ features.add(Mono.just(Feature.SCHEMA_REGISTRY));
}
- if (topicDeletionCheck(cluster)) {
- features.add(Feature.TOPIC_DELETION);
- }
+ features.add(
+ topicDeletionCheck(cluster)
+ .flatMap(r -> r ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty())
+ );
- return features;
+ return Flux.fromIterable(features).flatMap(m -> m);
}
- private boolean topicDeletionCheck(KafkaCluster cluster) {
+ private Mono<Boolean> topicDeletionCheck(KafkaCluster cluster) {
return brokerService.getController(cluster)
.map(Node::id)
.flatMap(broker -> brokerService.getBrokerConfigMap(cluster, broker))
@@ -54,6 +57,6 @@ private boolean topicDeletionCheck(KafkaCluster cluster) {
return Boolean.parseBoolean(config.get(DELETE_TOPIC_ENABLE).getValue());
}
return false;
- }).blockOptional().orElse(false);
+ });
}
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index e94596b4547..d888a388a01 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -1,9 +1,7 @@
package com.provectus.kafka.ui.service;
-import com.provectus.kafka.ui.exception.IllegalEntityStateException;
import com.provectus.kafka.ui.exception.InvalidRequestApiException;
import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
-import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.TopicMetadataException;
import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException;
import com.provectus.kafka.ui.exception.ValidationException;
@@ -11,7 +9,6 @@
import com.provectus.kafka.ui.model.CleanupPolicy;
import com.provectus.kafka.ui.model.CreateTopicMessage;
import com.provectus.kafka.ui.model.ExtendedAdminClient;
-import com.provectus.kafka.ui.model.InternalBrokerConfig;
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
import com.provectus.kafka.ui.model.InternalBrokerMetrics;
import com.provectus.kafka.ui.model.InternalClusterMetrics;
@@ -104,6 +101,8 @@ public class KafkaService {
private final ClustersStorage clustersStorage;
private final DeserializationService deserializationService;
private final AdminClientService adminClientService;
+ private final FeatureService featureService;
+
public KafkaCluster getUpdatedCluster(KafkaCluster cluster, InternalTopic updatedTopic) {
final Map<String, InternalTopic> topics =
@@ -142,6 +141,9 @@ public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto))
)
)
+ ).flatMap(
+ nc -> featureService.getAvailableFeatures(cluster).collectList()
+ .map(f -> nc.toBuilder().features(f).build())
).doOnError(e ->
log.error("Failed to collect cluster {} info", cluster.getName(), e)
).onErrorResume(
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java
index 0130d2698aa..779bda4d3ea 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java
@@ -52,8 +52,10 @@ public class OffsetsResetServiceTest extends AbstractBaseTest {
@BeforeEach
void init() {
AdminClientServiceImpl adminClientService = new AdminClientServiceImpl();
+ BrokerService brokerService = new BrokerServiceImpl(adminClientService);
+ FeatureService featureService = new FeatureServiceImpl(brokerService);
adminClientService.setClientTimeout(5_000);
- kafkaService = new KafkaService(null, null, null, null, adminClientService);
+ kafkaService = new KafkaService(null, null, null, null, adminClientService, featureService);
offsetsResetService = new OffsetsResetService(kafkaService);
createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
| test | train | 2021-08-27T14:31:15 | "2021-08-27T12:53:43Z" | erizzo | train |
provectus/kafka-ui/840_842 | provectus/kafka-ui | provectus/kafka-ui/840 | provectus/kafka-ui/842 | [
"connected"
] | 7643825059459457c3888df554577618e9ade173 | de245e8cc7aea5ee0182c6da9afa76114f89b622 | [] | [] | "2021-08-31T06:54:12Z" | [
"type/bug"
] | Direct link access to the topic does not work | **Describe the bug**
Сurrently, during opening the created topic by direct link will be opened list with topics, not the topic view page
**Set up**
`./mvnw clean install -Pprod `
`docker-compose -f ./docker/kafka-ui.yaml up`
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Create the topic1
2. Open topic view by direct access `ui/clusters/secondLocal/topics/topic1`
**Expected behavior**
Opening the topic1 view page with tabs and functions to delete topic etc
**Screenshots**
https://user-images.githubusercontent.com/44370201/131344570-2fadae50-108e-48c7-bda9-491f79879fc5.mov
| [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/DetailsContainer.ts",
"kafka-ui-react-app/src/redux/actions/actions.ts",
"kafka-ui-react-app/src/redux/reducers/loader/reducer.ts",
"kafka-ui-react-app/src/redux/reducers/topics/selectors.ts"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/DetailsContainer.ts",
"kafka-ui-react-app/src/redux/actions/actions.ts",
"kafka-ui-react-app/src/redux/reducers/loader/reducer.ts",
"kafka-ui-react-app/src/redux/reducers/topics/selectors.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx
index 611d1327881..d898805254a 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx
@@ -13,6 +13,8 @@ import {
} from 'lib/paths';
import ClusterContext from 'components/contexts/ClusterContext';
import ConfirmationModal from 'components/common/ConfirmationModal/ConfirmationModal';
+import { useDispatch } from 'react-redux';
+import { deleteTopicAction } from 'redux/actions';
import OverviewContainer from './Overview/OverviewContainer';
import TopicConsumerGroupsContainer from './ConsumerGroups/TopicConsumerGroupsContainer';
@@ -37,6 +39,7 @@ const Details: React.FC<Props> = ({
clearTopicMessages,
}) => {
const history = useHistory();
+ const dispatch = useDispatch();
const { isReadOnly, isTopicDeletionAllowed } =
React.useContext(ClusterContext);
const [isDeleteTopicConfirmationVisible, setDeleteTopicConfirmationVisible] =
@@ -44,8 +47,10 @@ const Details: React.FC<Props> = ({
const deleteTopicHandler = React.useCallback(() => {
deleteTopic(clusterName, topicName);
}, [clusterName, topicName]);
+
React.useEffect(() => {
if (isDeleted) {
+ dispatch(deleteTopicAction.cancel());
history.push(clusterTopicsPath(clusterName));
}
}, [isDeleted]);
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/DetailsContainer.ts b/kafka-ui-react-app/src/components/Topics/Topic/Details/DetailsContainer.ts
index f4607dcc117..81c3b036c6b 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/DetailsContainer.ts
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/DetailsContainer.ts
@@ -3,8 +3,8 @@ import { ClusterName, RootState, TopicName } from 'redux/interfaces';
import { withRouter, RouteComponentProps } from 'react-router-dom';
import { deleteTopic, clearTopicMessages } from 'redux/actions';
import {
+ getIsTopicDeleted,
getIsTopicInternal,
- getTopicList,
} from 'redux/reducers/topics/selectors';
import Details from './Details';
@@ -27,7 +27,7 @@ const mapStateToProps = (
clusterName,
topicName,
isInternal: getIsTopicInternal(state, topicName),
- isDeleted: !getTopicList(state).find((topic) => topic.name === topicName),
+ isDeleted: getIsTopicDeleted(state),
});
const mapDispatchToProps = {
diff --git a/kafka-ui-react-app/src/redux/actions/actions.ts b/kafka-ui-react-app/src/redux/actions/actions.ts
index 27f9126f113..f79d6c60f27 100644
--- a/kafka-ui-react-app/src/redux/actions/actions.ts
+++ b/kafka-ui-react-app/src/redux/actions/actions.ts
@@ -97,8 +97,9 @@ export const updateTopicAction = createAsyncAction(
export const deleteTopicAction = createAsyncAction(
'DELETE_TOPIC__REQUEST',
'DELETE_TOPIC__SUCCESS',
- 'DELETE_TOPIC__FAILURE'
-)<undefined, TopicName, undefined>();
+ 'DELETE_TOPIC__FAILURE',
+ 'DELETE_TOPIC__CANCEL'
+)<undefined, TopicName, undefined, undefined>();
export const fetchConsumerGroupsAction = createAsyncAction(
'GET_CONSUMER_GROUPS__REQUEST',
diff --git a/kafka-ui-react-app/src/redux/reducers/loader/reducer.ts b/kafka-ui-react-app/src/redux/reducers/loader/reducer.ts
index 37892542fae..18197eb2f41 100644
--- a/kafka-ui-react-app/src/redux/reducers/loader/reducer.ts
+++ b/kafka-ui-react-app/src/redux/reducers/loader/reducer.ts
@@ -4,9 +4,9 @@ export const initialState: LoaderState = {};
const reducer = (state = initialState, action: Action): LoaderState => {
const { type } = action;
- const matches = /(.*)__(REQUEST|SUCCESS|FAILURE)$/.exec(type);
+ const matches = /(.*)__(REQUEST|SUCCESS|FAILURE|CANCEL)$/.exec(type);
- // not a *__REQUEST / *__SUCCESS / *__FAILURE actions, so we ignore them
+ // not a *__REQUEST / *__SUCCESS / *__FAILURE / *__CANCEL actions, so we ignore them
if (!matches) return state;
const [, requestName, requestState] = matches;
@@ -27,6 +27,11 @@ const reducer = (state = initialState, action: Action): LoaderState => {
...state,
[requestName]: 'errorFetching',
};
+ case 'CANCEL':
+ return {
+ ...state,
+ [requestName]: 'notFetched',
+ };
default:
return state;
}
diff --git a/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts b/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
index fe0f5eaf5c0..ad388253827 100644
--- a/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
+++ b/kafka-ui-react-app/src/redux/reducers/topics/selectors.ts
@@ -31,6 +31,12 @@ const getPartitionsCountIncreaseStatus =
const getReplicationFactorUpdateStatus = createFetchingSelector(
'UPDATE_REPLICATION_FACTOR'
);
+const getTopicDeletingStatus = createFetchingSelector('DELETE_TOPIC');
+
+export const getIsTopicDeleted = createSelector(
+ getTopicDeletingStatus,
+ (status) => status === 'fetched'
+);
export const getAreTopicsFetching = createSelector(
getTopicListFetchingStatus,
| null | train | train | 2021-08-30T11:51:07 | "2021-08-30T13:15:07Z" | antipova926 | train |
provectus/kafka-ui/841_843 | provectus/kafka-ui | provectus/kafka-ui/841 | provectus/kafka-ui/843 | [
"connected"
] | 7643825059459457c3888df554577618e9ade173 | 161d887e64065f2cd2987f236d74ab81650ce4f3 | [
"Hi, @rlogiacco. Thanks for creating this issue. According to the stack trace, I assume you are using version 0.1. I checked this part of the code and only one possible situation could be there if your header value is null. I created PR to fix this. Could you please check the version with tag master, after we'll merge it?"
] | [] | "2021-08-31T07:05:59Z" | [
"type/bug"
] | Message display crash on message with custom header | **Describe the bug**
When opening a topic containing messages with custom headers application crashes:
```
23:05:01.764 [boundedElastic-1] ERROR org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler - [ff384bf8] 500 Server Error for HTTP GET "/api/clusters/local/topics/dlt/messages?limit=100"
java.lang.NullPointerException: null
at java.lang.String.<init>(String.java:630) ~[?:?]
Suppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException:
Error has been observed at the following site(s):
|_ checkpoint ⇢ Handler com.provectus.kafka.ui.controller.MessagesController#getTopicMessages(String, String, SeekType, List, Integer, String, ServerWebExchange) [DispatcherHandler]
|_ checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]
|_ checkpoint ⇢ HTTP GET "/api/clusters/local/topics/dlt/messages?limit=100" [ExceptionHandlingWebHandler]
Stack trace:
at java.lang.String.<init>(String.java:630) ~[?:?]
at com.provectus.kafka.ui.util.ClusterUtil.lambda$mapToTopicMessage$15(ClusterUtil.java:203) ~[classes!/:?]
at java.util.Iterator.forEachRemaining(Iterator.java:133) ~[?:?]
at com.provectus.kafka.ui.util.ClusterUtil.mapToTopicMessage(ClusterUtil.java:203) ~[classes!/:?]
at com.provectus.kafka.ui.service.ConsumingService.lambda$loadMessages$2(ConsumingService.java:61) ~[classes!/:?]
at reactor.core.publisher.FluxMap$MapConditionalSubscriber.onNext(FluxMap.java:199) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxSubscribeOn$SubscribeOnSubscriber.onNext(FluxSubscribeOn.java:151) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxCreate$BufferAsyncSink.drain(FluxCreate.java:793) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxCreate$BufferAsyncSink.next(FluxCreate.java:718) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxCreate$SerializedSink.next(FluxCreate.java:153) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at com.provectus.kafka.ui.service.ConsumingService$RecordEmitter.accept(ConsumingService.java:146) [classes!/:?]
at com.provectus.kafka.ui.service.ConsumingService$RecordEmitter.accept(ConsumingService.java:128) [classes!/:?]
at reactor.core.publisher.FluxCreate.subscribe(FluxCreate.java:94) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxSubscribeOn$SubscribeOnSubscriber.run(FluxSubscribeOn.java:194) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.scheduler.WorkerTask.call(WorkerTask.java:84) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.scheduler.WorkerTask.call(WorkerTask.java:37) [reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at java.util.concurrent.FutureTask.run(FutureTask.java:264) [?:?]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) [?:?]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?]
at java.lang.Thread.run(Thread.java:830) [?:?]
23:05:01.772 [boundedElastic-1] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [ff384bf8] Encoding [class ErrorResponse {
code: 5000
message: Unexpected internal error
timestamp: 163036470 (truncated)...]
```
**Set up**
I'm running kafka, zookeeper and kafka-ui as a docker compose
**Steps to Reproduce**
Steps to reproduce the behaviour:
1. publish a message to a topic with custom headers: `echo '1:{"id":25,"name":"pippo"}' | kafkacat -b kafka:9092 -t test.topic -H "name=value" -K: -P
2. open the topic messages ui
**Expected behavior**
Message being displayed in the topic messages list | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
index ccd8dd47ad4..3c21232539f 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
@@ -281,9 +281,15 @@ public static int convertToIntServerStatus(ServerStatus serverStatus) {
public static TopicMessage mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord,
RecordSerDe recordDeserializer) {
+
Map<String, String> headers = new HashMap<>();
consumerRecord.headers().iterator()
- .forEachRemaining(header -> headers.put(header.key(), new String(header.value())));
+ .forEachRemaining(header ->
+ headers.put(
+ header.key(),
+ header.value() != null ? new String(header.value()) : null
+ )
+ );
TopicMessage topicMessage = new TopicMessage();
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
index 39f57c797e4..e7147dea1cc 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
@@ -15,6 +15,7 @@
import com.provectus.kafka.ui.util.OffsetsSeekForward;
import java.io.Serializable;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -29,6 +30,8 @@
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.header.Header;
+import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.serialization.BytesDeserializer;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.AfterAll;
@@ -57,13 +60,21 @@ static void generateMsgs() throws Exception {
for (int i = 0; i < MSGS_PER_PARTITION; i++) {
long ts = System.currentTimeMillis() + i;
var value = "msg_" + partition + "_" + i;
- var metadata =
- producer.send(new ProducerRecord<>(TOPIC, partition, ts, null, value)).get();
- SENT_RECORDS.add(new Record(
- value,
- new TopicPartition(metadata.topic(), metadata.partition()),
- metadata.offset(),
- ts)
+ var metadata = producer.send(
+ new ProducerRecord<>(
+ TOPIC, partition, ts, null, value, List.of(
+ new RecordHeader("name", null),
+ new RecordHeader("name2", "value".getBytes())
+ )
+ )
+ ).get();
+ SENT_RECORDS.add(
+ new Record(
+ value,
+ new TopicPartition(metadata.topic(), metadata.partition()),
+ metadata.offset(),
+ ts
+ )
);
}
}
| train | train | 2021-08-30T11:51:07 | "2021-08-30T23:23:43Z" | rlogiacco | train |
provectus/kafka-ui/846_848 | provectus/kafka-ui | provectus/kafka-ui/846 | provectus/kafka-ui/848 | [
"connected"
] | 65884467cfb936b5503371f8d8557147506849e7 | 49c188a13b8eb7592b7a3fd76c46c8e7441206c4 | [
"Thanks, @antipova926 for finding this issue.\r\nWe realized that there is a misconfiguration on kafka-ui's docker-compose side. I'll fix it asap.\r\nLet me assign myself this ticket and resolve the issue"
] | [] | "2021-09-03T09:25:36Z" | [
"type/bug"
] | Created connector in the local cluster is duplicated to the secondLocal cluster | **Describe the bug**
When I create a connector in the Local cluster I also see this connector in the Second cluster. It works in both ways.
**Set up**
`./mvnw clean install -Pprod`
`docker-compose -f ./docker/kafka-ui-connectors.yaml up`
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Go to the Local cluster
2. Create a topic with a message
3. Create a connector
4. Go to the Second Local cluster
**Expected behavior**
Created connector only in the Local cluster
**Screenshots**
https://user-images.githubusercontent.com/44370201/131527173-ba87e34a-f04c-457e-8780-ed2b02287711.mov
**Additional context**
Connector's config:
```
{
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"connection.url": "jdbc:postgresql://postgres-db:5432/test",
"connection.user": "dev_user",
"connection.password": "12345",
"topics": "TEST_TOPIC",
"table.name.format": "sink_activities_e2e_check",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter.schema.registry.url": "http://schemaregistry0:8085",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schema.registry.url": "http://schemaregistry0:8085",
"auto.create": "true",
"pk.mode": "record_value",
"pk.fields": "id",
"insert.mode": "upsert",
"errors.log.enable": "true",
"errors.log.include.messages": "true"
}
``` | [
"docker/kafka-ui-connectors.yaml"
] | [
"docker/kafka-ui-connectors.yaml"
] | [] | diff --git a/docker/kafka-ui-connectors.yaml b/docker/kafka-ui-connectors.yaml
index 54cd1d3ddaf..57137cef6b0 100644
--- a/docker/kafka-ui-connectors.yaml
+++ b/docker/kafka-ui-connectors.yaml
@@ -27,8 +27,6 @@ services:
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
KAFKA_CLUSTERS_1_JMXPORT: 9998
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
- KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
- KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
zookeeper0:
image: confluentinc/cp-zookeeper:5.2.4
| null | test | train | 2021-09-02T11:47:18 | "2021-08-31T15:08:26Z" | antipova926 | train |
provectus/kafka-ui/849_854 | provectus/kafka-ui | provectus/kafka-ui/849 | provectus/kafka-ui/854 | [
"timestamp(timedelta=0.0, similarity=0.898160487534571)",
"connected"
] | 49c188a13b8eb7592b7a3fd76c46c8e7441206c4 | 64f957771c7f2893343cf8db1dad871bd286b644 | [
"Hi, @zarezadeh. Thanks for creating this issue. We'll take this in the next version. ",
"It's great @germanosin, so if you are OK with the proposed solution I would prepare and send the PR very soon.",
"@zarezadeh . Great! I would be happy to review it. "
] | [] | "2021-09-06T13:20:42Z" | [
"type/enhancement"
] | Ability to provide a separate Message name for each topic when using ProtobufFile | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
When using a protobufFile definition for parsing messages, it is assumed that all topics use a single message type. But this assumption is not valid in a lot of common scenarios IMO. It could be helpful to use a separate message type for each topic in a cluster.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Instead of setting a single message name in `protobufMessageName` when configuring a cluster, use a map of `topic -> messageName` in order to have different message names for each topic. Also, we can use a predefined key in this map (e.g. `_default`) to set a message name for all other topics without explicit mapping.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
Using _Schema Registry_ is a better alternative when possible but in legacy deployments when messages in Kafka topics don't adhere to the format schema registry expects, this is not an option.
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
I've already started working to prepare a PR, but I'd like to discuss the best solution before submitting the PR. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java",
"kafka-ui-api/src/main/resources/application-local.yml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java",
"kafka-ui-api/src/main/resources/application-local.yml"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDeTest.java",
"kafka-ui-api/src/test/resources/address-book.proto"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
index cab60a256ea..04efdae3c37 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
@@ -2,6 +2,7 @@
import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
import java.util.Properties;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
@@ -26,6 +27,7 @@ public static class Cluster {
String keySchemaNameTemplate = "%s-key";
String protobufFile;
String protobufMessageName;
+ Map<String, String> protobufMessageNameByTopic;
List<ConnectCluster> kafkaConnect;
int jmxPort;
boolean jmxSsl;
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
index dce53c0b9b1..0b517d0cc76 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
@@ -35,6 +35,7 @@ public class KafkaCluster {
private final Throwable lastZookeeperException;
private final Path protobufFile;
private final String protobufMessageName;
+ private final Map<String, String> protobufMessageNameByTopic;
private final Properties properties;
private final Boolean readOnly;
private final Boolean disableLogDirsCollection;
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java
index 816b0d9d87f..51e44915913 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java
@@ -35,7 +35,8 @@ private RecordSerDe createRecordDeserializerForCluster(KafkaCluster cluster) {
if (cluster.getProtobufFile() != null) {
log.info("Using ProtobufFileRecordSerDe for cluster '{}'", cluster.getName());
return new ProtobufFileRecordSerDe(cluster.getProtobufFile(),
- cluster.getProtobufMessageName(), objectMapper);
+ cluster.getProtobufMessageNameByTopic(), cluster.getProtobufMessageName(),
+ objectMapper);
} else {
log.info("Using SchemaRegistryAwareRecordSerDe for cluster '{}'", cluster.getName());
return new SchemaRegistryAwareRecordSerDe(cluster);
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java
index 69b4f1d1553..d8491b11794 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui.serde;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.protobuf.Descriptors.Descriptor;
import com.google.protobuf.DynamicMessage;
import com.google.protobuf.util.JsonFormat;
import com.provectus.kafka.ui.model.MessageSchema;
@@ -14,6 +15,8 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
@@ -30,15 +33,35 @@ public class ProtobufFileRecordSerDe implements RecordSerDe {
private final ObjectMapper objectMapper;
private final Path protobufSchemaPath;
private final ProtobufSchemaConverter schemaConverter = new ProtobufSchemaConverter();
+ private final Map<String, Descriptor> messageDescriptorMap;
+ private final Descriptor defaultMessageDescriptor;
- public ProtobufFileRecordSerDe(Path protobufSchemaPath, String messageName,
- ObjectMapper objectMapper) throws IOException {
+ public ProtobufFileRecordSerDe(Path protobufSchemaPath, Map<String, String> messageNameMap,
+ String defaultMessageName, ObjectMapper objectMapper)
+ throws IOException {
this.objectMapper = objectMapper;
this.protobufSchemaPath = protobufSchemaPath;
try (final Stream<String> lines = Files.lines(protobufSchemaPath)) {
- this.protobufSchema = new ProtobufSchema(
+ var schema = new ProtobufSchema(
lines.collect(Collectors.joining("\n"))
- ).copy(messageName);
+ );
+ if (defaultMessageName != null) {
+ this.protobufSchema = schema.copy(defaultMessageName);
+ } else {
+ this.protobufSchema = schema;
+ }
+ this.messageDescriptorMap = new HashMap<>();
+ if (messageNameMap != null) {
+ for (Map.Entry<String, String> entry : messageNameMap.entrySet()) {
+ var descriptor = Objects.requireNonNull(protobufSchema.toDescriptor(entry.getValue()),
+ "The given message type is not found in protobuf definition: "
+ + entry.getValue());
+ messageDescriptorMap.put(entry.getKey(), descriptor);
+ }
+ }
+ defaultMessageDescriptor = Objects.requireNonNull(protobufSchema.toDescriptor(),
+ "The given message type is not found in protobuf definition: "
+ + defaultMessageName);
}
}
@@ -51,7 +74,7 @@ public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
builder.keyFormat(MessageFormat.UNKNOWN);
}
if (msg.value() != null) {
- builder.value(parse(msg.value().get()));
+ builder.value(parse(msg.value().get(), getDescriptor(msg.topic())));
builder.valueFormat(MessageFormat.PROTOBUF);
}
return builder.build();
@@ -60,11 +83,15 @@ public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
}
}
+ private Descriptor getDescriptor(String topic) {
+ return messageDescriptorMap.getOrDefault(topic, defaultMessageDescriptor);
+ }
+
@SneakyThrows
- private String parse(byte[] value) {
+ private String parse(byte[] value, Descriptor descriptor) {
DynamicMessage protoMsg = DynamicMessage.parseFrom(
- protobufSchema.toDescriptor(),
- new ByteArrayInputStream(value)
+ descriptor,
+ new ByteArrayInputStream(value)
);
byte[] jsonFromProto = ProtobufSchemaUtils.toJson(protoMsg);
return new String(jsonFromProto);
@@ -78,7 +105,7 @@ public ProducerRecord<byte[], byte[]> serialize(String topic,
if (data == null) {
return new ProducerRecord<>(topic, partition, Objects.requireNonNull(key).getBytes(), null);
}
- DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
+ DynamicMessage.Builder builder = DynamicMessage.newBuilder(getDescriptor(topic));
try {
JsonFormat.parser().merge(data, builder);
final DynamicMessage message = builder.build();
@@ -97,8 +124,8 @@ public ProducerRecord<byte[], byte[]> serialize(String topic,
public TopicMessageSchema getTopicSchema(String topic) {
final JsonSchema jsonSchema = schemaConverter.convert(
- protobufSchemaPath.toUri(),
- protobufSchema.toDescriptor()
+ protobufSchemaPath.toUri(),
+ getDescriptor(topic)
);
final MessageSchema keySchema = new MessageSchema()
.name(protobufSchema.fullName())
diff --git a/kafka-ui-api/src/main/resources/application-local.yml b/kafka-ui-api/src/main/resources/application-local.yml
index 9b1458e1eda..93a5745c388 100644
--- a/kafka-ui-api/src/main/resources/application-local.yml
+++ b/kafka-ui-api/src/main/resources/application-local.yml
@@ -19,6 +19,14 @@ kafka:
# address: http://localhost:8083
# jmxPort: 9998
# read-only: true
+ # -
+ # name: localUsingProtobufFile
+ # bootstrapServers: localhost:9092
+ # protobufFile: messages.proto
+ # protobufMessageName: GenericMessage
+ # protobufMessageNameByTopic:
+ # input-topic: InputMessage
+ # output-topic: OutputMessage
admin-client-timeout: 5000
zookeeper:
connection-timeout: 1000
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDeTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDeTest.java
new file mode 100644
index 00000000000..0f87efe08f6
--- /dev/null
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDeTest.java
@@ -0,0 +1,102 @@
+package com.provectus.kafka.ui.serde;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.protobuf.DynamicMessage;
+import com.google.protobuf.util.JsonFormat;
+import com.provectus.kafka.ui.serde.schemaregistry.MessageFormat;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.Map;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.utils.Bytes;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+class ProtobufFileRecordSerDeTest {
+
+ // Sample message of type `test.Person`
+ private static byte[] personMessage;
+ // Sample message of type `test.AddressBook`
+ private static byte[] addressBookMessage;
+ private static Path protobufSchemaPath;
+
+ @BeforeAll
+ static void setUp() throws URISyntaxException, IOException {
+ protobufSchemaPath = Paths.get(ProtobufFileRecordSerDeTest.class.getClassLoader()
+ .getResource("address-book.proto").toURI());
+ ProtobufSchema protobufSchema = new ProtobufSchema(Files.readString(protobufSchemaPath));
+
+ DynamicMessage.Builder builder = protobufSchema.newMessageBuilder("test.Person");
+ JsonFormat.parser().merge(
+ "{ \"name\": \"My Name\",\"id\": 101, \"email\": \"[email protected]\" }", builder);
+ personMessage = builder.build().toByteArray();
+
+ builder = protobufSchema.newMessageBuilder("test.AddressBook");
+ JsonFormat.parser().merge(
+ "{\"version\": 1, \"people\": ["
+ + "{ \"name\": \"My Name\",\"id\": 102, \"email\": \"[email protected]\" }]}", builder);
+ addressBookMessage = builder.build().toByteArray();
+ }
+
+ @Test
+ void testDeserialize() throws IOException {
+ var messageNameMap = Map.of(
+ "topic1", "test.Person",
+ "topic2", "test.AddressBook");
+ var deserializer =
+ new ProtobufFileRecordSerDe(protobufSchemaPath, messageNameMap, null, new ObjectMapper());
+ var msg1 = deserializer
+ .deserialize(new ConsumerRecord<>("topic1", 1, 0, Bytes.wrap("key".getBytes()),
+ Bytes.wrap(personMessage)));
+ assertEquals(MessageFormat.PROTOBUF, msg1.getValueFormat());
+ assertTrue(msg1.getValue().contains("[email protected]"));
+
+ var msg2 = deserializer
+ .deserialize(new ConsumerRecord<>("topic2", 1, 1, Bytes.wrap("key".getBytes()),
+ Bytes.wrap(addressBookMessage)));
+ assertTrue(msg2.getValue().contains("[email protected]"));
+ }
+
+ @Test
+ void testNoDefaultMessageName() throws IOException {
+ // by default the first message type defined in proto definition is used
+ var deserializer =
+ new ProtobufFileRecordSerDe(protobufSchemaPath, Collections.emptyMap(), null,
+ new ObjectMapper());
+ var msg = deserializer
+ .deserialize(new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()),
+ Bytes.wrap(personMessage)));
+ assertTrue(msg.getValue().contains("[email protected]"));
+ }
+
+ @Test
+ void testDefaultMessageName() throws IOException {
+ var messageNameMap = Map.of("topic1", "test.Person");
+ var deserializer =
+ new ProtobufFileRecordSerDe(protobufSchemaPath, messageNameMap, "test.AddressBook",
+ new ObjectMapper());
+ var msg = deserializer
+ .deserialize(new ConsumerRecord<>("a_random_topic", 1, 0, Bytes.wrap("key".getBytes()),
+ Bytes.wrap(addressBookMessage)));
+ assertTrue(msg.getValue().contains("[email protected]"));
+ }
+
+ @Test
+ void testSerialize() throws IOException {
+ var messageNameMap = Map.of("topic1", "test.Person");
+ var serializer =
+ new ProtobufFileRecordSerDe(protobufSchemaPath, messageNameMap, "test.AddressBook",
+ new ObjectMapper());
+ var serialized = serializer.serialize("topic1", "key1", "{\"name\":\"MyName\"}", 0);
+ assertNotNull(serialized.value());
+ }
+}
diff --git a/kafka-ui-api/src/test/resources/address-book.proto b/kafka-ui-api/src/test/resources/address-book.proto
new file mode 100644
index 00000000000..72eab7aab8c
--- /dev/null
+++ b/kafka-ui-api/src/test/resources/address-book.proto
@@ -0,0 +1,39 @@
+// [START declaration]
+syntax = "proto3";
+package test;
+
+// [END declaration]
+
+// [START java_declaration]
+option java_multiple_files = true;
+option java_package = "com.example.tutorial.protos";
+option java_outer_classname = "AddressBookProtos";
+// [END java_declaration]
+
+// [START messages]
+message Person {
+ string name = 1;
+ int32 id = 2; // Unique ID number for this person.
+ string email = 3;
+
+ enum PhoneType {
+ MOBILE = 0;
+ HOME = 1;
+ WORK = 2;
+ }
+
+ message PhoneNumber {
+ string number = 1;
+ PhoneType type = 2;
+ }
+
+ repeated PhoneNumber phones = 4;
+
+}
+
+// Our address book file is just one of these.
+message AddressBook {
+ int32 version = 1;
+ repeated Person people = 2;
+}
+// [END messages]
\ No newline at end of file
| val | train | 2021-09-03T11:38:16 | "2021-09-04T13:24:05Z" | zarezadeh | train |
provectus/kafka-ui/856_857 | provectus/kafka-ui | provectus/kafka-ui/856 | provectus/kafka-ui/857 | [
"connected"
] | 49c188a13b8eb7592b7a3fd76c46c8e7441206c4 | 4660bb8759e33a2a229bcdc6343d3be1bdfd2f28 | [
"Hi, @giom-l. Thanks for creating this issue. We'll fix it in the next minor version.",
"@giom-l I fixed a possible issue with NPE. it would be with tag master in a few minutes. Could you please check it?",
"Hi @germanosin\r\nI tested the master branch, and it works :)\r\nIt took 58s to fetch all my consumer groups (I have 582 consumer on this cluster, but no error).\r\nHowever, the json size is not that big : is the fetch time mostly due to the response time from kafka ?\r\n\r\nBeside this issue (which is solved for me), I noticed a call on github api. Is it expected ?\r\n\r\nHere is a picture of both response time and call on github api.\r\n\r\n",
"Hi @giom-l. Nice to hear that it works now.\r\nWow, we didn't expect these numbers for the consumer groups page. This might be slow because we are querying offsets for each consumer group so we have additional 582 queries. They are working in parallel, but still, this might take a lot of time. Don't you mind creating another issue with consumer groups pagination?\r\n\r\nGithub API call is sending from frontend to check the latest version, and if you are not on the latest suggest you update.\r\nAgain thanks for your questions and contribution. "
] | [] | "2021-09-07T07:43:52Z" | [
"type/bug",
"scope/backend"
] | [0.2.0] consumers page is throwing error 500 | **Describe the bug**
When I want to get all consumer groups from a cluster (which I suppose to have a large number of customer group), the server always responds with an error HTTP 500.

:warning: also note the typo in the error message : "Consumer Gro**p**ups"
When I get consumer for a single topic, it works perfectly.
It also work on a cluster that have only some consumer groups (I have it working with a cluster that has 3)
Here is the stack trace generated by the server :
```
19:52:07.634 [kafka-admin-client-thread | adminclient-2] ERROR org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler - [762b7b6a] 500 Server Error for HTTP GET "/api/clusters/kafka%20Studio%20Dev/consumer-groups"
java.lang.NullPointerException: null
at java.util.Objects.requireNonNull(Unknown Source) ~[?:?]
Suppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException:
Error has been observed at the following site(s):
|_ checkpoint ⇢ Handler com.provectus.kafka.ui.controller.ConsumerGroupsController#getConsumerGroups(String, ServerWebExchange) [DispatcherHandler]
|_ checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]
|_ checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]
|_ checkpoint ⇢ HTTP GET "/api/clusters/kafka%20Studio%20Dev/consumer-groups" [ExceptionHandlingWebHandler]
Stack trace:
at java.util.Objects.requireNonNull(Unknown Source) ~[?:?]
at java.util.stream.Collectors.lambda$uniqKeysMapAccumulator$1(Unknown Source) ~[?:?]
at java.util.stream.ReduceOps$3ReducingSink.accept(Unknown Source) ~[?:?]
at java.util.stream.ReferencePipeline$2$1.accept(Unknown Source) ~[?:?]
at java.util.HashMap$EntrySpliterator.forEachRemaining(Unknown Source) ~[?:?]
at java.util.stream.AbstractPipeline.copyInto(Unknown Source) ~[?:?]
at java.util.stream.AbstractPipeline.wrapAndCopyInto(Unknown Source) ~[?:?]
at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(Unknown Source) ~[?:?]
at java.util.stream.AbstractPipeline.evaluate(Unknown Source) ~[?:?]
at java.util.stream.ReferencePipeline.collect(Unknown Source) ~[?:?]
at com.provectus.kafka.ui.util.ClusterUtil.filterConsumerGroupTopic(ClusterUtil.java:390) ~[classes!/:?]
at com.provectus.kafka.ui.service.KafkaService.lambda$getConsumerGroups$40(KafkaService.java:381) ~[classes!/:?]
at java.util.stream.ReferencePipeline$3$1.accept(Unknown Source) ~[?:?]
at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(Unknown Source) ~[?:?]
at java.util.stream.AbstractPipeline.copyInto(Unknown Source) ~[?:?]
at java.util.stream.AbstractPipeline.wrapAndCopyInto(Unknown Source) ~[?:?]
at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(Unknown Source) ~[?:?]
at java.util.stream.AbstractPipeline.evaluate(Unknown Source) ~[?:?]
at java.util.stream.ReferencePipeline.collect(Unknown Source) ~[?:?]
at com.provectus.kafka.ui.service.KafkaService.lambda$getConsumerGroups$42(KafkaService.java:389) ~[classes!/:?]
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:107) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1637) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1637) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1637) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1637) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoCollectList$MonoCollectListSubscriber.onComplete(MonoCollectList.java:121) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.ParallelMergeSequential$MergeSequentialMain.drainLoop(ParallelMergeSequential.java:286) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.ParallelMergeSequential$MergeSequentialMain.drain(ParallelMergeSequential.java:234) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.ParallelMergeSequential$MergeSequentialMain.onComplete(ParallelMergeSequential.java:226) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.ParallelMergeSequential$MergeSequentialInner.onComplete(ParallelMergeSequential.java:407) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxFlatMap$FlatMapMain.checkTerminated(FluxFlatMap.java:823) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxFlatMap$FlatMapMain.drainLoop(FluxFlatMap.java:589) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxFlatMap$FlatMapMain.innerComplete(FluxFlatMap.java:892) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxFlatMap$FlatMapInner.onComplete(FluxFlatMap.java:986) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onComplete(FluxMapFuseable.java:144) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1638) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at reactor.core.publisher.MonoCreate$DefaultMonoSink.success(MonoCreate.java:156) ~[reactor-core-3.3.2.RELEASE.jar!/:3.3.2.RELEASE]
at com.provectus.kafka.ui.util.ClusterUtil.lambda$toMono$0(ClusterUtil.java:64) ~[classes!/:?]
at org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:177) [kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.common.internals.KafkaFutureImpl$WhenCompleteBiConsumer.accept(KafkaFutureImpl.java:162) [kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.common.internals.KafkaFutureImpl.complete(KafkaFutureImpl.java:221) [kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.KafkaAdminClient$25.handleResponse(KafkaAdminClient.java:3362) [kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.handleResponses(KafkaAdminClient.java:1189) [kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.processRequests(KafkaAdminClient.java:1341) [kafka-clients-2.8.0.jar!/:?]
at org.apache.kafka.clients.admin.KafkaAdminClient$AdminClientRunnable.run(KafkaAdminClient.java:1264) [kafka-clients-2.8.0.jar!/:?]
at java.lang.Thread.run(Unknown Source) [?:?]
19:52:07.637 [kafka-admin-client-thread | adminclient-2] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [762b7b6a] Encoding [class ErrorResponse {
code: 5000
message: Unexpected internal error
timestamp: 163095792 (truncated)...]
```
**Set up**
(How do you run the app?)
**Steps to Reproduce**
Steps to reproduce the behavior:
1.
**Expected behavior**
(A clear and concise description of what you expected to happen)
**Screenshots**
(If applicable, add screenshots to help explain your problem)
**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/MapUtil.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
index 42a8b23882e..201bc9d1967 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
@@ -31,6 +31,7 @@
import com.provectus.kafka.ui.util.JmxClusterUtil;
import com.provectus.kafka.ui.util.JmxMetricsName;
import com.provectus.kafka.ui.util.JmxMetricsValueName;
+import com.provectus.kafka.ui.util.MapUtil;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Collection;
@@ -396,7 +397,7 @@ public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster c
ac.getAdminClient()
.listConsumerGroupOffsets(consumerGroupId)
.partitionsToOffsetAndMetadata()
- ).flatMap(ClusterUtil::toMono);
+ ).flatMap(ClusterUtil::toMono).map(MapUtil::removeNullValues);
}
public Map<TopicPartition, Long> topicPartitionsEndOffsets(
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/MapUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/MapUtil.java
new file mode 100644
index 00000000000..d1a5c035ee6
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/MapUtil.java
@@ -0,0 +1,21 @@
+package com.provectus.kafka.ui.util;
+
+import java.util.Map;
+import java.util.stream.Collectors;
+
+public class MapUtil {
+
+ private MapUtil() {
+ }
+
+ public static <K, V> Map<K, V> removeNullValues(Map<K, V> map) {
+ return map.entrySet().stream()
+ .filter(e -> e.getValue() != null)
+ .collect(
+ Collectors.toMap(
+ Map.Entry::getKey,
+ Map.Entry::getValue
+ )
+ );
+ }
+}
| null | train | train | 2021-09-03T11:38:16 | "2021-09-06T19:56:58Z" | giom-l | train |
provectus/kafka-ui/855_859 | provectus/kafka-ui | provectus/kafka-ui/855 | provectus/kafka-ui/859 | [
"connected"
] | 4660bb8759e33a2a229bcdc6343d3be1bdfd2f28 | 68cad6c76cb658b9e28049fe8ee39bcddb10465c | [
"Hi, @giom-l. Thanks for creating this issue. We'll fix it in next minor version."
] | [] | "2021-09-07T12:06:20Z" | [
"type/bug",
"scope/frontend"
] | [0.2.0] readonly mode not respected at topics list level & consumer groups level | **Describe the bug**
When a cluster is set readonly, I expect that no modification item is ever display.
It has been fixed some time ago on topic page, but in v0.2.0, some stuff are not correct.
On dashboard page, we can see the readonly mode

However, on each cluster "Topics" page, which list all the topic, I can see the checkboxes beside each topic.
And when I select any of them, the buttons "Delete selected topics" and "Purge messages of selected topics" appear, which is not wanted.

Also, on a consumer group page, I have access to "reset offsets" and "delete consumer group"

**Expected behavior**
When in readonly, I should not be able to modify/delete/create anything at any level.
**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx",
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx"
] | [
"kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx",
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/ListItem.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx b/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx
index de16db04ada..5ebf8c9cf41 100644
--- a/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx
+++ b/kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx
@@ -14,6 +14,7 @@ import {
import PageLoader from 'components/common/PageLoader/PageLoader';
import ConfirmationModal from 'components/common/ConfirmationModal/ConfirmationModal';
import { useHistory } from 'react-router';
+import ClusterContext from 'components/contexts/ClusterContext';
import ListItem from './ListItem';
@@ -45,6 +46,7 @@ const Details: React.FC<Props> = ({
const [isConfirmationModelVisible, setIsConfirmationModelVisible] =
React.useState<boolean>(false);
const history = useHistory();
+ const { isReadOnly } = React.useContext(ClusterContext);
const onDelete = () => {
setIsConfirmationModelVisible(false);
@@ -79,20 +81,27 @@ const Details: React.FC<Props> = ({
{isFetched ? (
<div className="box">
- <div className="level">
- <div className="level-item level-right buttons">
- <button type="button" className="button" onClick={onResetOffsets}>
- Reset offsets
- </button>
- <button
- type="button"
- className="button is-danger"
- onClick={() => setIsConfirmationModelVisible(true)}
- >
- Delete consumer group
- </button>
+ {!isReadOnly && (
+ <div className="level">
+ <div className="level-item level-right buttons">
+ <button
+ type="button"
+ className="button"
+ onClick={onResetOffsets}
+ >
+ Reset offsets
+ </button>
+ <button
+ type="button"
+ className="button is-danger"
+ onClick={() => setIsConfirmationModelVisible(true)}
+ >
+ Delete consumer group
+ </button>
+ </div>
</div>
- </div>
+ )}
+
<table className="table is-striped is-fullwidth">
<thead>
<tr>
diff --git a/kafka-ui-react-app/src/components/Topics/List/List.tsx b/kafka-ui-react-app/src/components/Topics/List/List.tsx
index a2f21437ae1..4ba1c0e8160 100644
--- a/kafka-ui-react-app/src/components/Topics/List/List.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/List.tsx
@@ -203,7 +203,7 @@ const List: React.FC<TopicsListProps> = ({
<table className="table is-fullwidth">
<thead>
<tr>
- <th> </th>
+ {!isReadOnly && <th> </th>}
<SortableColumnHeader
value={TopicColumnsToSort.NAME}
title="Topic Name"
diff --git a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
index 9414e9a716d..e93a56fb3bd 100644
--- a/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/ListItem.tsx
@@ -75,17 +75,19 @@ const ListItem: React.FC<ListItemProps> = ({
return (
<tr>
- <td>
- {!internal && (
- <input
- type="checkbox"
- checked={selected}
- onChange={() => {
- toggleTopicSelected(name);
- }}
- />
- )}
- </td>
+ {!isReadOnly && (
+ <td>
+ {!internal && (
+ <input
+ type="checkbox"
+ checked={selected}
+ onChange={() => {
+ toggleTopicSelected(name);
+ }}
+ />
+ )}
+ </td>
+ )}
<td className="has-text-overflow-ellipsis">
<NavLink
exact
| null | train | train | 2021-09-07T12:50:04 | "2021-09-06T19:43:47Z" | giom-l | train |
provectus/kafka-ui/868_871 | provectus/kafka-ui | provectus/kafka-ui/868 | provectus/kafka-ui/871 | [
"timestamp(timedelta=0.0, similarity=0.8518483951423081)",
"connected"
] | 64f957771c7f2893343cf8db1dad871bd286b644 | 43709cc3be8635b1e570c1452582589086d68461 | [] | [] | "2021-09-13T08:09:53Z" | [
"type/bug"
] | Consumer groups offsets is not working properly | **Describe the bug**
(A clear and concise description of what the bug is.)
**Set up**
(How do you run the app?)
**Steps to Reproduce**
Steps to reproduce the behavior:
1.
**Expected behavior**
(A clear and concise description of what you expected to happen)
**Screenshots**
(If applicable, add screenshots to help explain your problem)
**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
index b0bac49169a..b1437a5e119 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
@@ -70,42 +70,45 @@ public Mono<ResponseEntity<Void>> resetConsumerGroupOffsets(String clusterName,
Mono<ConsumerGroupOffsetsReset>
consumerGroupOffsetsReset,
ServerWebExchange exchange) {
- return consumerGroupOffsetsReset.map(reset -> {
+ return consumerGroupOffsetsReset.flatMap(reset -> {
var cluster =
clustersStorage.getClusterByName(clusterName).orElseThrow(ClusterNotFoundException::new);
switch (reset.getResetType()) {
case EARLIEST:
- offsetsResetService
+ return offsetsResetService
.resetToEarliest(cluster, group, reset.getTopic(), reset.getPartitions());
- break;
case LATEST:
- offsetsResetService
+ return offsetsResetService
.resetToLatest(cluster, group, reset.getTopic(), reset.getPartitions());
- break;
case TIMESTAMP:
if (reset.getResetToTimestamp() == null) {
- throw new ValidationException(
- "resetToTimestamp is required when TIMESTAMP reset type used");
+ return Mono.error(
+ new ValidationException(
+ "resetToTimestamp is required when TIMESTAMP reset type used"
+ )
+ );
}
- offsetsResetService
+ return offsetsResetService
.resetToTimestamp(cluster, group, reset.getTopic(), reset.getPartitions(),
reset.getResetToTimestamp());
- break;
case OFFSET:
if (CollectionUtils.isEmpty(reset.getPartitionsOffsets())) {
- throw new ValidationException(
- "partitionsOffsets is required when OFFSET reset type used");
+ return Mono.error(
+ new ValidationException(
+ "partitionsOffsets is required when OFFSET reset type used"
+ )
+ );
}
Map<Integer, Long> offsets = reset.getPartitionsOffsets().stream()
.collect(toMap(PartitionOffset::getPartition, PartitionOffset::getOffset));
- offsetsResetService.resetToOffsets(cluster, group, reset.getTopic(), offsets);
- break;
+ return offsetsResetService.resetToOffsets(cluster, group, reset.getTopic(), offsets);
default:
- throw new ValidationException("Unknown resetType " + reset.getResetType());
+ return Mono.error(
+ new ValidationException("Unknown resetType " + reset.getResetType())
+ );
}
- return ResponseEntity.ok().build();
- });
+ }).map(o -> ResponseEntity.ok().build());
}
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java
index 73d86cecb31..6420e6d7ec3 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java
@@ -1,18 +1,19 @@
package com.provectus.kafka.ui.service;
import static com.google.common.base.Preconditions.checkArgument;
+import static com.provectus.kafka.ui.util.ClusterUtil.toMono;
import static java.util.stream.Collectors.toMap;
import static java.util.stream.Collectors.toSet;
import static org.apache.kafka.common.ConsumerGroupState.DEAD;
import static org.apache.kafka.common.ConsumerGroupState.EMPTY;
-import com.google.common.collect.Sets;
import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.ValidationException;
-import com.provectus.kafka.ui.model.InternalConsumerGroup;
import com.provectus.kafka.ui.model.KafkaCluster;
import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import lombok.RequiredArgsConstructor;
@@ -24,6 +25,7 @@
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.common.TopicPartition;
import org.springframework.stereotype.Component;
+import reactor.core.publisher.Mono;
/**
* Implementation follows https://cwiki.apache.org/confluence/display/KAFKA/KIP-122%3A+Add+Reset+Consumer+Group+Offsets+tooling
@@ -36,64 +38,88 @@
public class OffsetsResetService {
private final KafkaService kafkaService;
-
- public void resetToEarliest(KafkaCluster cluster, String group, String topic,
- Collection<Integer> partitions) {
- checkGroupCondition(cluster, group);
- try (var consumer = getConsumer(cluster, group)) {
- var targetPartitions = getTargetPartitions(consumer, topic, partitions);
- var offsets = consumer.beginningOffsets(targetPartitions);
- commitOffsets(consumer, offsets);
- }
+ private final AdminClientService adminClientService;
+
+ public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToEarliest(
+ KafkaCluster cluster, String group, String topic, Collection<Integer> partitions) {
+ return checkGroupCondition(cluster, group)
+ .flatMap(g -> {
+ try (var consumer = getConsumer(cluster, group)) {
+ var targetPartitions = getTargetPartitions(consumer, topic, partitions);
+ var offsets = consumer.beginningOffsets(targetPartitions);
+ return commitOffsets(consumer, offsets);
+ }
+ });
}
- public void resetToLatest(KafkaCluster cluster, String group, String topic,
- Collection<Integer> partitions) {
- checkGroupCondition(cluster, group);
- try (var consumer = getConsumer(cluster, group)) {
- var targetPartitions = getTargetPartitions(consumer, topic, partitions);
- var offsets = consumer.endOffsets(targetPartitions);
- commitOffsets(consumer, offsets);
- }
+ public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToLatest(
+ KafkaCluster cluster, String group, String topic, Collection<Integer> partitions) {
+ return checkGroupCondition(cluster, group).flatMap(
+ g -> {
+ try (var consumer = getConsumer(cluster, group)) {
+ var targetPartitions = getTargetPartitions(consumer, topic, partitions);
+ var offsets = consumer.endOffsets(targetPartitions);
+ return commitOffsets(consumer, offsets);
+ }
+ }
+ );
}
- public void resetToTimestamp(KafkaCluster cluster, String group, String topic,
- Collection<Integer> partitions, long targetTimestamp) {
- checkGroupCondition(cluster, group);
- try (var consumer = getConsumer(cluster, group)) {
- var targetPartitions = getTargetPartitions(consumer, topic, partitions);
- var offsets = offsetsByTimestamp(consumer, targetPartitions, targetTimestamp);
- commitOffsets(consumer, offsets);
- }
+ public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToTimestamp(
+ KafkaCluster cluster, String group, String topic, Collection<Integer> partitions,
+ long targetTimestamp) {
+ return checkGroupCondition(cluster, group).flatMap(
+ g -> {
+ try (var consumer = getConsumer(cluster, group)) {
+ var targetPartitions = getTargetPartitions(consumer, topic, partitions);
+ var offsets = offsetsByTimestamp(consumer, targetPartitions, targetTimestamp);
+ return commitOffsets(consumer, offsets);
+ }
+ }
+ );
}
- public void resetToOffsets(KafkaCluster cluster, String group, String topic,
- Map<Integer, Long> targetOffsets) {
- checkGroupCondition(cluster, group);
- try (var consumer = getConsumer(cluster, group)) {
- var offsets = targetOffsets.entrySet().stream()
- .collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
- offsets = editOffsetsIfNeeded(consumer, offsets);
- commitOffsets(consumer, offsets);
- }
+ public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToOffsets(
+ KafkaCluster cluster, String group, String topic, Map<Integer, Long> targetOffsets) {
+ return checkGroupCondition(cluster, group).flatMap(
+ g -> {
+ try (var consumer = getConsumer(cluster, group)) {
+ var offsets = targetOffsets.entrySet().stream()
+ .collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
+ offsets = editOffsetsIfNeeded(consumer, offsets);
+ return commitOffsets(consumer, offsets);
+ }
+ }
+ );
}
- private void checkGroupCondition(KafkaCluster cluster, String groupId) {
- InternalConsumerGroup description =
- kafkaService.getConsumerGroupsInternal(cluster)
- .blockOptional()
- .stream()
- .flatMap(Collection::stream)
- .filter(cgd -> cgd.getGroupId().equals(groupId))
- .findAny()
- .orElseThrow(() -> new NotFoundException("Consumer group not found"));
-
- if (!Set.of(DEAD, EMPTY).contains(description.getState())) {
- throw new ValidationException(
- String.format(
- "Group's offsets can be reset only if group is inactive, but group is in %s state",
- description.getState()));
- }
+ private Mono<ConsumerGroupDescription> checkGroupCondition(KafkaCluster cluster, String groupId) {
+ return adminClientService.getOrCreateAdminClient(cluster)
+ .flatMap(ac ->
+ // we need to call listConsumerGroups() to check group existence, because
+ // describeConsumerGroups() will return consumer group even if it doesn't exist
+ toMono(ac.getAdminClient().listConsumerGroups().all())
+ .filter(cgs -> cgs.stream().anyMatch(g -> g.groupId().equals(groupId)))
+ .flatMap(cgs -> toMono(
+ ac.getAdminClient().describeConsumerGroups(List.of(groupId)).all()))
+ .filter(cgs -> cgs.containsKey(groupId))
+ .map(cgs -> cgs.get(groupId))
+ .flatMap(cg -> {
+ if (!Set.of(DEAD, EMPTY).contains(cg.state())) {
+ return Mono.error(
+ new ValidationException(
+ String.format(
+ "Group's offsets can be reset only if group is inactive,"
+ + " but group is in %s state",
+ cg.state()
+ )
+ )
+ );
+ }
+ return Mono.just(cg);
+ })
+ .switchIfEmpty(Mono.error(new NotFoundException("Consumer group not found")))
+ );
}
private Map<TopicPartition, Long> offsetsByTimestamp(Consumer<?, ?> consumer,
@@ -107,7 +133,10 @@ private Map<TopicPartition, Long> offsetsByTimestamp(Consumer<?, ?> consumer,
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset()));
// for partitions where we didnt find offset by timestamp, we use end offsets
- foundOffsets.putAll(consumer.endOffsets(Sets.difference(partitions, foundOffsets.keySet())));
+ Set<TopicPartition> endOffsets = new HashSet<>(partitions);
+ endOffsets.removeAll(foundOffsets.keySet());
+ foundOffsets.putAll(consumer.endOffsets(endOffsets));
+
return foundOffsets;
}
@@ -155,11 +184,13 @@ private Map<TopicPartition, Long> editOffsetsIfNeeded(Consumer<?, ?> consumer,
return result;
}
- private void commitOffsets(Consumer<?, ?> consumer, Map<TopicPartition, Long> offsets) {
- consumer.commitSync(
- offsets.entrySet().stream()
- .collect(toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue())))
- );
+ private Mono<Map<TopicPartition, OffsetAndMetadata>> commitOffsets(
+ Consumer<?, ?> consumer, Map<TopicPartition, Long> offsets
+ ) {
+ var toCommit = offsets.entrySet().stream()
+ .collect(toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue())));
+ consumer.commitSync(toCommit);
+ return Mono.just(toCommit);
}
private Consumer<?, ?> getConsumer(KafkaCluster cluster, String groupId) {
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index 7f446bdba2d..cb882ef078a 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -2256,7 +2256,6 @@ components:
format: int64
required:
- partition
- - offset
ConsumerGroupOffsetsResetType:
type: string
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java
index 779bda4d3ea..290cfec9d76 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java
@@ -56,7 +56,7 @@ void init() {
FeatureService featureService = new FeatureServiceImpl(brokerService);
adminClientService.setClientTimeout(5_000);
kafkaService = new KafkaService(null, null, null, null, adminClientService, featureService);
- offsetsResetService = new OffsetsResetService(kafkaService);
+ offsetsResetService = new OffsetsResetService(kafkaService, adminClientService);
createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
createConsumerGroup();
@@ -78,17 +78,22 @@ private void createConsumerGroup() {
@Test
void failsIfGroupDoesNotExists() {
assertThatThrownBy(
- () -> offsetsResetService.resetToEarliest(CLUSTER, "non-existing-group", topic, null))
- .isInstanceOf(NotFoundException.class);
+ () -> offsetsResetService
+ .resetToEarliest(CLUSTER, "non-existing-group", topic, null).block()
+ ).isInstanceOf(NotFoundException.class);
assertThatThrownBy(
- () -> offsetsResetService.resetToLatest(CLUSTER, "non-existing-group", topic, null))
- .isInstanceOf(NotFoundException.class);
+ () -> offsetsResetService
+ .resetToLatest(CLUSTER, "non-existing-group", topic, null).block()
+ ).isInstanceOf(NotFoundException.class);
assertThatThrownBy(() -> offsetsResetService
- .resetToTimestamp(CLUSTER, "non-existing-group", topic, null, System.currentTimeMillis()))
- .isInstanceOf(NotFoundException.class);
+ .resetToTimestamp(CLUSTER, "non-existing-group", topic, null, System.currentTimeMillis())
+ .block()
+ ).isInstanceOf(NotFoundException.class);
assertThatThrownBy(
- () -> offsetsResetService.resetToOffsets(CLUSTER, "non-existing-group", topic, Map.of()))
- .isInstanceOf(NotFoundException.class);
+ () -> offsetsResetService
+ .resetToOffsets(CLUSTER, "non-existing-group", topic, Map.of())
+ .block()
+ ).isInstanceOf(NotFoundException.class);
}
@Test
@@ -98,16 +103,19 @@ void failsIfGroupIsActive() {
consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
consumer.poll(Duration.ofMillis(100));
- assertThatThrownBy(() -> offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null))
- .isInstanceOf(ValidationException.class);
- assertThatThrownBy(() -> offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null))
- .isInstanceOf(ValidationException.class);
+ assertThatThrownBy(() ->
+ offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null).block()
+ ).isInstanceOf(ValidationException.class);
+ assertThatThrownBy(
+ () -> offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null).block()
+ ).isInstanceOf(ValidationException.class);
assertThatThrownBy(() -> offsetsResetService
- .resetToTimestamp(CLUSTER, groupId, topic, null, System.currentTimeMillis()))
- .isInstanceOf(ValidationException.class);
+ .resetToTimestamp(CLUSTER, groupId, topic, null, System.currentTimeMillis())
+ .block()
+ ).isInstanceOf(ValidationException.class);
assertThatThrownBy(
- () -> offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, Map.of()))
- .isInstanceOf(ValidationException.class);
+ () -> offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, Map.of()).block()
+ ).isInstanceOf(ValidationException.class);
}
}
@@ -116,7 +124,7 @@ void resetToOffsets() {
sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
var expectedOffsets = Map.of(0, 5L, 1, 5L, 2, 5L);
- offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, expectedOffsets);
+ offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, expectedOffsets).block();
assertOffsets(expectedOffsets);
}
@@ -126,7 +134,7 @@ void resetToOffsetsCommitsEarliestOrLatestOffsetsIfOffsetsBoundsNotValid() {
var offsetsWithInValidBounds = Map.of(0, -2L, 1, 5L, 2, 500L);
var expectedOffsets = Map.of(0, 0L, 1, 5L, 2, 10L);
- offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, offsetsWithInValidBounds);
+ offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, offsetsWithInValidBounds).block();
assertOffsets(expectedOffsets);
}
@@ -135,11 +143,11 @@ void resetToEarliest() {
sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
commit(Map.of(0, 5L, 1, 5L, 2, 5L));
- offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, List.of(0, 1));
+ offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, List.of(0, 1)).block();
assertOffsets(Map.of(0, 0L, 1, 0L, 2, 5L));
commit(Map.of(0, 5L, 1, 5L, 2, 5L));
- offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null);
+ offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null).block();
assertOffsets(Map.of(0, 0L, 1, 0L, 2, 0L, 3, 0L, 4, 0L));
}
@@ -148,11 +156,11 @@ void resetToLatest() {
sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10, 3, 10, 4, 10));
commit(Map.of(0, 5L, 1, 5L, 2, 5L));
- offsetsResetService.resetToLatest(CLUSTER, groupId, topic, List.of(0, 1));
+ offsetsResetService.resetToLatest(CLUSTER, groupId, topic, List.of(0, 1)).block();
assertOffsets(Map.of(0, 10L, 1, 10L, 2, 5L));
commit(Map.of(0, 5L, 1, 5L, 2, 5L));
- offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null);
+ offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null).block();
assertOffsets(Map.of(0, 10L, 1, 10L, 2, 10L, 3, 10L, 4, 10L));
}
@@ -169,7 +177,9 @@ void resetToTimestamp() {
new ProducerRecord<Bytes, Bytes>(topic, 2, 1100L, null, null),
new ProducerRecord<Bytes, Bytes>(topic, 2, 1200L, null, null)));
- offsetsResetService.resetToTimestamp(CLUSTER, groupId, topic, List.of(0, 1, 2, 3), 1600L);
+ offsetsResetService.resetToTimestamp(
+ CLUSTER, groupId, topic, List.of(0, 1, 2, 3), 1600L
+ ).block();
assertOffsets(Map.of(0, 2L, 1, 1L, 2, 3L, 3, 0L));
}
| train | train | 2021-09-13T08:20:40 | "2021-09-13T06:03:16Z" | germanosin | train |
provectus/kafka-ui/876_877 | provectus/kafka-ui | provectus/kafka-ui/876 | provectus/kafka-ui/877 | [
"connected"
] | 43709cc3be8635b1e570c1452582589086d68461 | 4c231980aac8e54f0f54b59c1a6e19b5504ecfe0 | [] | [] | "2021-09-15T07:06:21Z" | [
"type/bug",
"scope/backend"
] | Produce message is not working for topics with enums in schema | **Describe the bug**
Produce message is not working for topics with enums in schema. If you'll try to produce a message for a topic with schema and enum in it. Frontend will fail:
```
Type key is missing
at Object.a.default (index.ts:112)
at m (index.ts:69)
at Object.a.build [as default] (index.ts:218)
at a.default (index.ts:15)
at SendMessage.tsx:67
at Rl (react-dom.production.min.js:262)
at a.unstable_runWithPriority (scheduler.production.min.js:18)
at Gt (react-dom.production.min.js:122)
at Ll (react-dom.production.min.js:261)
at react-dom.production.min.js:261
```
The problem is In json schema generator which skip type property for enum fields.
| [
"kafka-ui-api/pom.xml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java"
] | [
"kafka-ui-api/pom.xml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java",
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverterTest.java"
] | diff --git a/kafka-ui-api/pom.xml b/kafka-ui-api/pom.xml
index 001c22143ff..0209bc27459 100644
--- a/kafka-ui-api/pom.xml
+++ b/kafka-ui-api/pom.xml
@@ -170,6 +170,12 @@
<version>${assertj.version}</version>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>com.github.java-json-tools</groupId>
+ <artifactId>json-schema-validator</artifactId>
+ <version>2.2.14</version>
+ <scope>test</scope>
+ </dependency>
</dependencies>
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
index d354340f080..ac12109d5a3 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
@@ -1,8 +1,10 @@
package com.provectus.kafka.ui.util.jsonschema;
import java.net.URI;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
@@ -41,13 +43,7 @@ private FieldSchema convertField(Schema.Field field, Map<String, FieldSchema> de
private FieldSchema convertSchema(String name, Schema schema,
Map<String, FieldSchema> definitions, boolean ref) {
- if (!schema.isUnion() || (schema.getTypes().size() == 2 && schema.isNullable())) {
- if (schema.isUnion()) {
- final Optional<Schema> firstType =
- schema.getTypes().stream().filter(t -> !t.getType().equals(Schema.Type.NULL))
- .findFirst();
- schema = firstType.orElseThrow();
- }
+ if (!schema.isUnion()) {
JsonType type = convertType(schema);
switch (type.getType()) {
case BOOLEAN:
@@ -68,20 +64,29 @@ private FieldSchema convertSchema(String name, Schema schema,
default: throw new RuntimeException("Unknown type");
}
} else {
- return new OneOfFieldSchema(
- schema.getTypes().stream()
- .map(typeSchema ->
- convertSchema(
- name + UUID.randomUUID().toString(),
- typeSchema,
- definitions,
- true
- )
- ).collect(Collectors.toList())
- );
+ return createUnionSchema(schema, definitions);
}
}
+ private FieldSchema createUnionSchema(Schema schema, Map<String, FieldSchema> definitions) {
+ final Map<String, FieldSchema> fields = schema.getTypes().stream()
+ .filter(t -> !t.getType().equals(Schema.Type.NULL))
+ .map(f -> Tuples.of(
+ f.getType().getName().toLowerCase(Locale.ROOT),
+ convertSchema(
+ f.getType().getName().toLowerCase(Locale.ROOT),
+ f, definitions, true
+ )
+ )).collect(Collectors.toMap(
+ Tuple2::getT1,
+ Tuple2::getT2
+ ));
+
+ return new ObjectFieldSchema(
+ fields, Collections.emptyList()
+ );
+ }
+
private FieldSchema createObjectSchema(String name, Schema schema,
Map<String, FieldSchema> definitions, boolean ref) {
final Map<String, FieldSchema> fields = schema.getFields().stream()
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java
index 13ac8c8b527..715f7d5f442 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java
@@ -2,6 +2,7 @@
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.TextNode;
import java.util.List;
import java.util.Map;
@@ -17,7 +18,9 @@ public EnumJsonType(List<String> values) {
@Override
public Map<String, JsonNode> toJsonNode(ObjectMapper mapper) {
return Map.of(
- this.type.getName(),
+ "type",
+ new TextNode(Type.STRING.getName()),
+ Type.ENUM.getName(),
mapper.valueToTree(values)
);
}
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
index 036b1c48edc..dbe37f5695b 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
@@ -1,15 +1,23 @@
package com.provectus.kafka.ui.util.jsonschema;
+import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.github.fge.jsonschema.core.exceptions.ProcessingException;
+import com.github.fge.jsonschema.core.report.ProcessingReport;
+import com.github.fge.jsonschema.main.JsonSchemaFactory;
+import com.provectus.kafka.ui.serde.schemaregistry.AvroMessageFormatter;
+import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
+import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericData;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class AvroJsonSchemaConverterTest {
@Test
- public void avroConvertTest() throws URISyntaxException {
+ public void avroConvertTest() throws URISyntaxException, JsonProcessingException {
final AvroJsonSchemaConverter converter = new AvroJsonSchemaConverter();
URI basePath = new URI("http://example.com/");
@@ -72,20 +80,98 @@ public void avroConvertTest() throws URISyntaxException {
+ " }"
);
- String expected =
- "{\"$id\":\"http://example.com/Message\","
- + "\"$schema\":\"https://json-schema.org/draft/2020-12/schema\","
- + "\"type\":\"object\",\"properties\":{\"record\":{\"$ref\":"
- + "\"#/definitions/RecordInnerMessage\"}},\"required\":[\"record\"],"
- + "\"definitions\":{\"RecordInnerMessage\":{\"type\":\"object\",\"properties\":"
- + "{\"long_text\":{\"type\":\"string\"},\"array\":{\"type\":\"array\",\"items\":"
- + "{\"type\":\"string\"}},\"id\":{\"type\":\"integer\"},\"text\":{\"type\":\"string\"},"
- + "\"map\":{\"type\":\"object\",\"additionalProperties\":{\"type\":\"integer\"}},"
- + "\"order\":{\"enum\":[\"SPADES\",\"HEARTS\",\"DIAMONDS\",\"CLUBS\"]}},"
- + "\"required\":[\"id\",\"text\",\"order\",\"array\",\"map\"]}}}";
+
+ String expected = "{\"$id\":\"http://example.com/Message\","
+ + "\"$schema\":\"https://json-schema.org/draft/2020-12/schema\","
+ + "\"type\":\"object\",\"properties\":{\"record\":"
+ + "{\"$ref\":\"#/definitions/RecordInnerMessage\"}},"
+ + "\"required\":[\"record\"],\"definitions\":"
+ + "{\"RecordInnerMessage\":{\"type\":\"object\",\""
+ + "properties\":{\"long_text\":{\"type\":\"object\","
+ + "\"properties\":{\"string\":{\"type\":\"string\"}}},"
+ + "\"array\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},"
+ + "\"id\":{\"type\":\"integer\"},\"text\":{\"type\":\"string\"},"
+ + "\"map\":{\"type\":\"object\",\"additionalProperties\":"
+ + "{\"type\":\"integer\"}},\"order\":{\"type\":\"string\","
+ + "\"enum\":[\"SPADES\",\"HEARTS\",\"DIAMONDS\",\"CLUBS\"]}},"
+ + "\"required\":[\"id\",\"text\",\"order\",\"array\",\"map\"]}}}";
final JsonSchema convertRecord = converter.convert(basePath, recordSchema);
- Assertions.assertEquals(expected, convertRecord.toJson(new ObjectMapper()));
+ ObjectMapper om = new ObjectMapper();
+ Assertions.assertEquals(
+ om.readTree(expected),
+ om.readTree(
+ convertRecord.toJson(om)
+ )
+ );
+
+ }
+
+ @Test
+ public void testNullableUnions() throws URISyntaxException, IOException, ProcessingException {
+ final AvroJsonSchemaConverter converter = new AvroJsonSchemaConverter();
+ URI basePath = new URI("http://example.com/");
+ final ObjectMapper objectMapper = new ObjectMapper();
+
+ Schema recordSchema = (new Schema.Parser()).parse(
+ " {"
+ + " \"type\": \"record\","
+ + " \"name\": \"Message\","
+ + " \"namespace\": \"com.provectus.kafka\","
+ + " \"fields\": ["
+ + " {"
+ + " \"name\": \"text\","
+ + " \"type\": ["
+ + " \"null\","
+ + " \"string\""
+ + " ],"
+ + " \"default\": null"
+ + " },"
+ + " {"
+ + " \"name\": \"value\","
+ + " \"type\": ["
+ + " \"null\","
+ + " \"string\","
+ + " \"long\""
+ + " ],"
+ + " \"default\": null"
+ + " }"
+ + " ]"
+ + " }"
+ );
+
+ final GenericData.Record record = new GenericData.Record(recordSchema);
+ record.put("text", "Hello world");
+ record.put("value", 100L);
+ byte[] jsonBytes = AvroSchemaUtils.toJson(record);
+ String serialized = new String(jsonBytes);
+
+ String expected =
+ "{\"$id\":\"http://example.com/Message\","
+ + "\"$schema\":\"https://json-schema.org/draft/2020-12/schema\","
+ + "\"type\":\"object\",\"properties\":{\"text\":"
+ + "{\"type\":\"object\",\"properties\":{\"string\":"
+ + "{\"type\":\"string\"}}},\"value\":{\"type\":\"object\","
+ + "\"properties\":{\"string\":{\"type\":\"string\"},"
+ + "\"long\":{\"type\":\"integer\"}}}}}";
+
+ final JsonSchema convert = converter.convert(basePath, recordSchema);
+ Assertions.assertEquals(
+ objectMapper.readTree(expected),
+ objectMapper.readTree(
+ convert.toJson(objectMapper)
+ )
+ );
+
+
+ final ProcessingReport validate =
+ JsonSchemaFactory.byDefault().getJsonSchema(
+ objectMapper.readTree(expected)
+ ).validate(
+ objectMapper.readTree(serialized)
+ );
+
+ Assertions.assertTrue(validate.isSuccess());
}
}
\ No newline at end of file
diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverterTest.java
index fa4235d42c2..cc905460779 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverterTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverterTest.java
@@ -1,5 +1,6 @@
package com.provectus.kafka.ui.util.jsonschema;
+import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import java.net.URI;
@@ -11,7 +12,7 @@
public class ProtobufSchemaConverterTest {
@Test
- public void testSimpleProto() throws URISyntaxException {
+ public void testSimpleProto() throws URISyntaxException, JsonProcessingException {
String proto = "syntax = \"proto3\";\n"
+ "package com.acme;\n"
@@ -49,7 +50,8 @@ public void testSimpleProto() throws URISyntaxException {
+ "{\"type\":\"object\",\"properties\":"
+ "{\"optionalField\":{\"oneOf\":[{\"type\":\"string\"},"
+ "{\"type\":\"integer\"}]},\"other_id\":"
- + "{\"type\":\"integer\"},\"order\":{\"enum\":[\"FIRST\",\"SECOND\"]}}}}}";
+ + "{\"type\":\"integer\"},\"order\":{\"enum\":[\"FIRST\",\"SECOND\"],"
+ + "\"type\":\"string\"}}}}}";
ProtobufSchema protobufSchema = new ProtobufSchema(proto);
@@ -58,6 +60,13 @@ public void testSimpleProto() throws URISyntaxException {
final JsonSchema convert =
converter.convert(basePath, protobufSchema.toDescriptor("MyRecord"));
- Assertions.assertEquals(expected, convert.toJson(new ObjectMapper()));
+
+ ObjectMapper om = new ObjectMapper();
+ Assertions.assertEquals(
+ om.readTree(expected),
+ om.readTree(
+ convert.toJson(om)
+ )
+ );
}
}
\ No newline at end of file
| train | train | 2021-09-14T14:42:04 | "2021-09-14T15:33:03Z" | germanosin | train |
provectus/kafka-ui/876_878 | provectus/kafka-ui | provectus/kafka-ui/876 | provectus/kafka-ui/878 | [
"connected"
] | 4c231980aac8e54f0f54b59c1a6e19b5504ecfe0 | 3d537f2bf15b6a07db334d9c68a0f8abcea9f984 | [] | [] | "2021-09-15T09:35:45Z" | [
"type/bug",
"scope/backend"
] | Produce message is not working for topics with enums in schema | **Describe the bug**
Produce message is not working for topics with enums in schema. If you'll try to produce a message for a topic with schema and enum in it. Frontend will fail:
```
Type key is missing
at Object.a.default (index.ts:112)
at m (index.ts:69)
at Object.a.build [as default] (index.ts:218)
at a.default (index.ts:15)
at SendMessage.tsx:67
at Rl (react-dom.production.min.js:262)
at a.unstable_runWithPriority (scheduler.production.min.js:18)
at Gt (react-dom.production.min.js:122)
at Ll (react-dom.production.min.js:261)
at react-dom.production.min.js:261
```
The problem is In json schema generator which skip type property for enum fields.
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
index ac12109d5a3..707b11dbf68 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
@@ -6,8 +6,6 @@
import java.util.List;
import java.util.Locale;
import java.util.Map;
-import java.util.Optional;
-import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import reactor.util.function.Tuple2;
@@ -82,9 +80,7 @@ private FieldSchema createUnionSchema(Schema schema, Map<String, FieldSchema> de
Tuple2::getT2
));
- return new ObjectFieldSchema(
- fields, Collections.emptyList()
- );
+ return new ObjectFieldSchema(fields, Collections.emptyList(), true);
}
private FieldSchema createObjectSchema(String name, Schema schema,
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java
index ca3182a7232..3fe1f26fcd0 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java
@@ -2,7 +2,10 @@
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.fasterxml.jackson.databind.node.TextNode;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@@ -12,11 +15,18 @@
public class ObjectFieldSchema implements FieldSchema {
private final Map<String, FieldSchema> properties;
private final List<String> required;
+ private final boolean nullable;
public ObjectFieldSchema(Map<String, FieldSchema> properties,
List<String> required) {
+ this(properties, required, false);
+ }
+
+ public ObjectFieldSchema(Map<String, FieldSchema> properties,
+ List<String> required, boolean nullable) {
this.properties = properties;
this.required = required;
+ this.nullable = nullable;
}
public Map<String, FieldSchema> getProperties() {
@@ -36,7 +46,18 @@ public JsonNode toJsonNode(ObjectMapper mapper) {
Tuple2::getT2
));
final ObjectNode objectNode = mapper.createObjectNode();
- objectNode.setAll(new SimpleJsonType(JsonType.Type.OBJECT).toJsonNode(mapper));
+ if (this.nullable) {
+ objectNode.set(
+ "type",
+ mapper.createArrayNode()
+ .add(JsonType.Type.OBJECT.getName())
+ .add(JsonType.Type.NULL.getName())
+ );
+ } else {
+ objectNode.setAll(
+ new SimpleJsonType(JsonType.Type.OBJECT).toJsonNode(mapper)
+ );
+ }
objectNode.set("properties", mapper.valueToTree(nodes));
if (!required.isEmpty()) {
objectNode.set("required", mapper.valueToTree(required));
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
index dbe37f5695b..58daa739cf7 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
@@ -87,7 +87,7 @@ public void avroConvertTest() throws URISyntaxException, JsonProcessingException
+ "{\"$ref\":\"#/definitions/RecordInnerMessage\"}},"
+ "\"required\":[\"record\"],\"definitions\":"
+ "{\"RecordInnerMessage\":{\"type\":\"object\",\""
- + "properties\":{\"long_text\":{\"type\":\"object\","
+ + "properties\":{\"long_text\":{\"type\":[\"object\", \"null\"],"
+ "\"properties\":{\"string\":{\"type\":\"string\"}}},"
+ "\"array\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},"
+ "\"id\":{\"type\":\"integer\"},\"text\":{\"type\":\"string\"},"
@@ -151,8 +151,8 @@ public void testNullableUnions() throws URISyntaxException, IOException, Process
"{\"$id\":\"http://example.com/Message\","
+ "\"$schema\":\"https://json-schema.org/draft/2020-12/schema\","
+ "\"type\":\"object\",\"properties\":{\"text\":"
- + "{\"type\":\"object\",\"properties\":{\"string\":"
- + "{\"type\":\"string\"}}},\"value\":{\"type\":\"object\","
+ + "{\"type\":[\"object\", \"null\"],\"properties\":{\"string\":"
+ + "{\"type\":\"string\"}}},\"value\":{\"type\":[\"object\", \"null\"],"
+ "\"properties\":{\"string\":{\"type\":\"string\"},"
+ "\"long\":{\"type\":\"integer\"}}}}}";
| val | train | 2021-09-15T10:21:05 | "2021-09-14T15:33:03Z" | germanosin | train |
provectus/kafka-ui/876_880 | provectus/kafka-ui | provectus/kafka-ui/876 | provectus/kafka-ui/880 | [
"connected"
] | 3d537f2bf15b6a07db334d9c68a0f8abcea9f984 | 3c05b603139bbb0fada2bdd2737db79ddea6a65a | [] | [] | "2021-09-15T10:39:20Z" | [
"type/bug",
"scope/backend"
] | Produce message is not working for topics with enums in schema | **Describe the bug**
Produce message is not working for topics with enums in schema. If you'll try to produce a message for a topic with schema and enum in it. Frontend will fail:
```
Type key is missing
at Object.a.default (index.ts:112)
at m (index.ts:69)
at Object.a.build [as default] (index.ts:218)
at a.default (index.ts:15)
at SendMessage.tsx:67
at Rl (react-dom.production.min.js:262)
at a.unstable_runWithPriority (scheduler.production.min.js:18)
at Gt (react-dom.production.min.js:122)
at Ll (react-dom.production.min.js:261)
at react-dom.production.min.js:261
```
The problem is In json schema generator which skip type property for enum fields.
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
index 707b11dbf68..e6cff30893d 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
@@ -67,6 +67,10 @@ private FieldSchema convertSchema(String name, Schema schema,
}
private FieldSchema createUnionSchema(Schema schema, Map<String, FieldSchema> definitions) {
+
+ final boolean nullable = schema.getTypes().stream()
+ .anyMatch(t -> t.getType().equals(Schema.Type.NULL));
+
final Map<String, FieldSchema> fields = schema.getTypes().stream()
.filter(t -> !t.getType().equals(Schema.Type.NULL))
.map(f -> Tuples.of(
@@ -80,7 +84,16 @@ private FieldSchema createUnionSchema(Schema schema, Map<String, FieldSchema> de
Tuple2::getT2
));
- return new ObjectFieldSchema(fields, Collections.emptyList(), true);
+ if (nullable) {
+ return new OneOfFieldSchema(
+ List.of(
+ new SimpleFieldSchema(new SimpleJsonType(JsonType.Type.NULL)),
+ new ObjectFieldSchema(fields, Collections.emptyList())
+ )
+ );
+ } else {
+ return new ObjectFieldSchema(fields, Collections.emptyList());
+ }
}
private FieldSchema createObjectSchema(String name, Schema schema,
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java
index 3fe1f26fcd0..296c5e0715a 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java
@@ -15,18 +15,11 @@
public class ObjectFieldSchema implements FieldSchema {
private final Map<String, FieldSchema> properties;
private final List<String> required;
- private final boolean nullable;
public ObjectFieldSchema(Map<String, FieldSchema> properties,
List<String> required) {
- this(properties, required, false);
- }
-
- public ObjectFieldSchema(Map<String, FieldSchema> properties,
- List<String> required, boolean nullable) {
this.properties = properties;
this.required = required;
- this.nullable = nullable;
}
public Map<String, FieldSchema> getProperties() {
@@ -46,18 +39,9 @@ public JsonNode toJsonNode(ObjectMapper mapper) {
Tuple2::getT2
));
final ObjectNode objectNode = mapper.createObjectNode();
- if (this.nullable) {
- objectNode.set(
- "type",
- mapper.createArrayNode()
- .add(JsonType.Type.OBJECT.getName())
- .add(JsonType.Type.NULL.getName())
- );
- } else {
- objectNode.setAll(
- new SimpleJsonType(JsonType.Type.OBJECT).toJsonNode(mapper)
- );
- }
+ objectNode.setAll(
+ new SimpleJsonType(JsonType.Type.OBJECT).toJsonNode(mapper)
+ );
objectNode.set("properties", mapper.valueToTree(nodes));
if (!required.isEmpty()) {
objectNode.set("required", mapper.valueToTree(required));
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
index 58daa739cf7..edcc9c559b4 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
@@ -87,13 +87,14 @@ public void avroConvertTest() throws URISyntaxException, JsonProcessingException
+ "{\"$ref\":\"#/definitions/RecordInnerMessage\"}},"
+ "\"required\":[\"record\"],\"definitions\":"
+ "{\"RecordInnerMessage\":{\"type\":\"object\",\""
- + "properties\":{\"long_text\":{\"type\":[\"object\", \"null\"],"
- + "\"properties\":{\"string\":{\"type\":\"string\"}}},"
- + "\"array\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},"
- + "\"id\":{\"type\":\"integer\"},\"text\":{\"type\":\"string\"},"
- + "\"map\":{\"type\":\"object\",\"additionalProperties\":"
- + "{\"type\":\"integer\"}},\"order\":{\"type\":\"string\","
- + "\"enum\":[\"SPADES\",\"HEARTS\",\"DIAMONDS\",\"CLUBS\"]}},"
+ + "properties\":{\"long_text\":{\"oneOf\":[{\"type\":\"null\"},"
+ + "{\"type\":\"object\",\"properties\":{\"string\":"
+ + "{\"type\":\"string\"}}}]},\"array\":{\"type\":\"array\",\"items\":"
+ + "{\"type\":\"string\"}},\"id\":{\"type\":\"integer\"},\"text\":"
+ + "{\"type\":\"string\"},\"map\":{\"type\":\"object\","
+ + "\"additionalProperties\":{\"type\":\"integer\"}},"
+ + "\"order\":{\"enum\":[\"SPADES\",\"HEARTS\",\"DIAMONDS\",\"CLUBS\"],"
+ + "\"type\":\"string\"}},"
+ "\"required\":[\"id\",\"text\",\"order\",\"array\",\"map\"]}}}";
final JsonSchema convertRecord = converter.convert(basePath, recordSchema);
@@ -151,10 +152,10 @@ public void testNullableUnions() throws URISyntaxException, IOException, Process
"{\"$id\":\"http://example.com/Message\","
+ "\"$schema\":\"https://json-schema.org/draft/2020-12/schema\","
+ "\"type\":\"object\",\"properties\":{\"text\":"
- + "{\"type\":[\"object\", \"null\"],\"properties\":{\"string\":"
- + "{\"type\":\"string\"}}},\"value\":{\"type\":[\"object\", \"null\"],"
- + "\"properties\":{\"string\":{\"type\":\"string\"},"
- + "\"long\":{\"type\":\"integer\"}}}}}";
+ + "{\"oneOf\":[{\"type\":\"null\"},{\"type\":\"object\","
+ + "\"properties\":{\"string\":{\"type\":\"string\"}}}]},\"value\":"
+ + "{\"oneOf\":[{\"type\":\"null\"},{\"type\":\"object\","
+ + "\"properties\":{\"string\":{\"type\":\"string\"},\"long\":{\"type\":\"integer\"}}}]}}}";
final JsonSchema convert = converter.convert(basePath, recordSchema);
Assertions.assertEquals(
| test | train | 2021-09-15T11:59:46 | "2021-09-14T15:33:03Z" | germanosin | train |
provectus/kafka-ui/793_884 | provectus/kafka-ui | provectus/kafka-ui/793 | provectus/kafka-ui/884 | [
"timestamp(timedelta=774.0, similarity=0.9129114134372687)",
"connected"
] | 3c05b603139bbb0fada2bdd2737db79ddea6a65a | 81a6564183c5bd76214ab6b68f30d14f73f85257 | [
"Hi, @tuananhlai. Thanks for creating this issue. Will try to add it in next version"
] | [] | "2021-09-16T09:55:49Z" | [
"type/bug",
"scope/backend",
"scope/frontend"
] | Connector doesn't show trace when failed | **Describe the bug**
Kafka Connector doesn't show stack trace when they fail to run.
**Set up**
1. Create a new connector (I used Debezium)
2. Config the connector with the wrong password.
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Click `Kafka Connector` in the side bar.
2. Click on a connector.
**Expected behavior**
`Trace` column show the stack trace of the failure.
**Screenshots**

**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml"
] | [
"kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml"
] | [] | diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
index a5b8f18a243..14c996ee545 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
@@ -409,6 +409,8 @@ components:
- UNASSIGNED
worker_id:
type: string
+ trace:
+ type: string
ConnectorStatus:
type: object
@@ -427,6 +429,8 @@ components:
- UNASSIGNED
worker_id:
type: string
+ trace:
+ type: string
tasks:
type: array
items:
| null | val | train | 2021-09-15T13:02:06 | "2021-08-13T07:18:53Z" | tuananhlai | train |
provectus/kafka-ui/886_888 | provectus/kafka-ui | provectus/kafka-ui/886 | provectus/kafka-ui/888 | [
"connected"
] | 315b6ed6d5208793cf2fcfe4b928562770f853f9 | 5e74bda56ec19e95c3ed11b349360691ad6e6914 | [
"Hi, thanks for creating this issue. We'll try to fix it in the next minor version. ",
"@germanosin Looks like it is still not fixed on master (based on Docker image from 2 days ago). It now accepts the comma-separated list, but it ONLY communicates with first host in the list. That is not how SchemaRegistry designed to work.\r\n\r\nWe had scheduled downtime on first node in cluster -- the entire stack works as expected, but kafka-ui could not read schemas.",
"@akamensky Thanks for checking this, this behaviour would be updated in the next versions.",
"I agree with @akamensk the implementation does not add much value besides being able to parse and select the first registry URI in a comma separated list. \r\n\r\n@akamensky I suppose you are expecting some kind of failover behavior ?",
"Something struck me, what if the multiple registries have different authentication credentials ?",
"@jonasgeiregat I am not sure how that would work. We have them all unauthenticated. But I imagine in clustered setup (where each registry provides exactly same data and uses same topics as data storage) the chances of individual instances having different credentials is very low as that would be an issue for failover cases for any clients.",
"We’re planning future improvements on this topic anyway. This should work fine for now. ",
"We’re planning future improvements on this topic anyway. This should work fine for now. \n\n> On 17 Mar 2022, at 13:49, Alexey Kamenskiy ***@***.***> wrote:\n> \n> \n> @jonasgeiregat I am not sure how that would work. We have them all unauthenticated. But I imagine in clustered setup (where each registry provides exactly same data and uses same topics as data storage) the chances of individual instances having different credentials is very low as that would be an issue for failover cases for any clients.\n> \n> —\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n> Triage notifications on the go with GitHub Mobile for iOS or Android. \n> You are receiving this because you modified the open/close state.\n"
] | [] | "2021-09-17T08:24:14Z" | [
"type/bug",
"scope/backend",
"status/accepted"
] | Schema Registry configuration does not support clustered endpoint | **Describe the bug**
All Kafka ecosystem tools support schema registry configuration in form `http(s)://host.1name:8081,http(s)://host2.name:8081,...`
This does not work with this UI, because it will try to do something like:
```
curl http(s)://host.1name:8081,http(s)://host2.name:8081,.../subjects
```
Which obviously crashes.
This is a bug since this contradicts to standard way of defining SR endpoints.
**Set up**
Docker
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Have clustered SR
2. Use standard endpoint to clustered SR in configuration
**Expected behavior**
Works
**Actual behavior**
```
kafka_kafka-ui.1.iqy3stcgghfl@redacted | 03:37:06.271 [reactor-http-epoll-8] ERROR org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler - [66f50769] 500 Server Error for HTTP GET "/api/clusters/cluster/schemas"
kafka_kafka-ui.1.iqy3stcgghfl@redacted | org.springframework.web.reactive.function.client.WebClientResponseException$NotFound: 404 Not Found from GET http://host1.name:8081,http://host2.name:8081,http://host3.name:8081/subjects
kafka_kafka-ui.1.iqy3stcgghfl@redacted | at org.springframework.web.reactive.function.client.WebClientResponseException.create(WebClientResponseException.java:185) ~[spring-webflux-5.2.3.RELEASE.jar!/:5.2.3.RELEASE]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | Suppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException:
kafka_kafka-ui.1.iqy3stcgghfl@redacted | Error has been observed at the following site(s):
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ 404 from GET http://host1.name:8081,http://host2.name:8081,http://host3.name:8081/subjects [DefaultWebClient]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ Handler com.provectus.kafka.ui.controller.SchemasController#getSchemas(String, ServerWebExchange) [DispatcherHandler]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]
kafka_kafka-ui.1.iqy3stcgghfl@redacted | |_ checkpoint ⇢ HTTP GET "/api/clusters/cluster/schemas" [ExceptionHandlingWebHandler]
```
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
index 99efa810d3d..3c53490e251 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
@@ -33,6 +33,8 @@
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
import java.math.BigDecimal;
import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
@@ -101,7 +103,11 @@ default InternalSchemaRegistry setSchemaRegistry(ClustersProperties.Cluster clus
InternalSchemaRegistry.InternalSchemaRegistryBuilder internalSchemaRegistry =
InternalSchemaRegistry.builder();
- internalSchemaRegistry.url(clusterProperties.getSchemaRegistry());
+ internalSchemaRegistry.url(
+ clusterProperties.getSchemaRegistry() != null
+ ? Arrays.asList(clusterProperties.getSchemaRegistry().split(","))
+ : Collections.emptyList()
+ );
if (clusterProperties.getSchemaRegistryAuth() != null) {
internalSchemaRegistry.username(clusterProperties.getSchemaRegistryAuth().getUsername());
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java
index 378f2706f58..0606579d78f 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSchemaRegistry.java
@@ -1,5 +1,6 @@
package com.provectus.kafka.ui.model;
+import java.util.List;
import lombok.Builder;
import lombok.Data;
@@ -8,5 +9,10 @@
public class InternalSchemaRegistry {
private final String username;
private final String password;
- private final String url;
+ private final List<String> url;
+
+ public String getFirstUrl() {
+ return url != null && !url.isEmpty() ? url.iterator().next() : null;
+ }
+
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
index fd58804760e..9c218f28430 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
@@ -90,7 +90,7 @@ private static SchemaRegistryClient createSchemaRegistryClient(KafkaCluster clus
"You specified password but do not specified username");
}
return new CachedSchemaRegistryClient(
- Collections.singletonList(cluster.getSchemaRegistry().getUrl()),
+ cluster.getSchemaRegistry().getUrl(),
CLIENT_IDENTITY_MAP_CAPACITY,
schemaProviders,
configs
@@ -218,7 +218,7 @@ public TopicMessageSchema getTopicSchema(String topic) {
private String convertSchema(SchemaMetadata schema) {
String jsonSchema;
- URI basePath = new URI(cluster.getSchemaRegistry().getUrl())
+ URI basePath = new URI(cluster.getSchemaRegistry().getFirstUrl())
.resolve(Integer.toString(schema.getId()));
final ParsedSchema schemaById = Objects.requireNonNull(schemaRegistryClient)
.getSchemaById(schema.getId());
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
index 611d6eedf42..c88394b5581 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
@@ -343,8 +343,9 @@ private WebClient.RequestBodySpec configuredWebClient(KafkaCluster cluster, Http
private WebClient.RequestBodySpec configuredWebClient(InternalSchemaRegistry schemaRegistry,
HttpMethod method, String uri,
Object... params) {
- return webClient.method(method)
- .uri(schemaRegistry.getUrl() + uri, params)
+ return webClient
+ .method(method)
+ .uri(schemaRegistry.getFirstUrl() + uri, params)
.headers(headers -> setBasicAuthIfEnabled(schemaRegistry, headers));
}
}
| null | test | train | 2021-09-16T12:53:31 | "2021-09-17T03:49:36Z" | akamensky | train |
provectus/kafka-ui/899_902 | provectus/kafka-ui | provectus/kafka-ui/899 | provectus/kafka-ui/902 | [
"connected"
] | 0a2316c51557c306dbc9f429eb631468a64830b8 | 928dbd7a77dcb4548da9d0fb13c27ee8fd5222f8 | [
"@Schiavi91. Thx for creating this issue. Could you please share your UI for Apache Kafka config, topic name, and schema names for value/key. ",
"I used docker-compose with this configuration:\r\n```\r\nkafka-ui:\r\n image: provectuslabs/kafka-ui:master\r\n hostname: kafka-ui\r\n container_name: kafka-ui\r\n depends_on:\r\n - zookeeper\r\n - broker\r\n - schema-registry\r\n ports:\r\n - \"8080:8080\"\r\n restart: always\r\n environment:\r\n KAFKA_CLUSTERS_0_NAME: \"local\"\r\n KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: \"broker:29092\"\r\n KAFKA_CLUSTERS_0_ZOOKEEPER: \"zookeeper:2181\"\r\n KAFKA_CLUSTERS_0_SCHEMAREGISTRY: \"http://schema-registry:8081\"\r\n networks:\r\n - my-net\r\n```\r\nThe topic name is \"my-topic\"\r\nThe schema name of the key is \"my-topic-key\"\r\nThe schema name of the value is \"my-topic-value\"",
"Thanks for sharing. You mentioned that there are magic key bytes in responses from sever. Could you please share your server response for this topic?",
"Sure, the server response is the following:\r\n```\r\n{\r\n \"type\": \"MESSAGE\",\r\n \"message\": {\r\n \"partition\": 0,\r\n \"offset\": 0,\r\n \"timestamp\": \"2021-09-21T13:13:17.837Z\",\r\n \"timestampType\": \"CREATE_TIME\",\r\n \"key\": \"\\u0000\\u0000\\u0000\\u0000\\u0001\\u0006asd\",\r\n \"headers\": {},\r\n \"content\": \"{\\\"number\\\":123}\",\r\n \"keyFormat\": \"UNKNOWN\",\r\n \"valueFormat\": \"AVRO\",\r\n \"keySize\": 9,\r\n \"valueSize\": 7,\r\n \"keySchemaId\": null,\r\n \"valueSchemaId\": \"2\",\r\n \"headersSize\": 0\r\n },\r\n \"phase\": null,\r\n \"consuming\": null\r\n}\r\n```\r\nWhile in the frontend I see this:\r\n",
"@Schiavi91, currently we are finding deserializer(both for key and value) on the first topic message read. After that, it is cached and will be used for all messages in the topic.\r\nIn your case for some reason we assigned default (String based) deserializer to topic's key. Maybe key schema was added after first message was written? \r\nCan you please try to restart kafka-ui and try to read topic again?",
"Hi @iliax, thanks for the reply. I first created the schemas for key and value then I generated the first message, otherwise I would not have known the id to use to serialize the key.\r\nI tried to restart kafka-ui and read topic again but the result is the same.",
"@Schiavi91 can you please share schema you created for key?",
"\"string\"\r\n\r\nThe call to the schema registry is the following:\r\n```\r\ncurl --location --request POST 'http://localhost:8081/subjects/my-topic-key/versions/' --header 'Accept: application/json' --header 'Content-Type: application/json' --data-raw '{ \"schema\": \"\\\"string\\\"\"}'\r\n```",
"@Schiavi91 we found and fixed a bug with avro deserilization with primitive type schemas. Can you please check?",
"@iliax I just checked and now it works.\r\nGiven the change made, I noticed two other things that might make sense to check:\r\n1. in the \"key\" field there are the quotation marks that could be removed (inside the red circle)\r\n2. in the message detail window there is a colon symbol before the key (inside the blue circle), is this the desired behavior? Or should it be removed?\r\n\r\n",
"@Schiavi91, we return value in json representation, this is why quotes are present. As for colon - I will talk with UI devs, to fix this. ",
"@Schiavi91 I discussed display with UI devs - this is expected for UI component we currently use. But I agree that it looks strange, so we will discuss how we can improve it in next release (0.3)",
"Ok thanks @iliax, i think the issue can be closed."
] | [] | "2021-09-23T11:43:15Z" | [
"type/bug",
"scope/backend"
] | Hide key magic bytes | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
I see the magic bytes when the key is deserialized by AVRO and I would like to hide them.
I saw the pull request following issue #515 but I noticed that the magic bytes are still displayed in the key, is this a desired behavior? Is it possible to hide them?
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Add a setting to hide magic bytes.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
Remove magic bytes. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java"
] | [
"kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java
index 817ade0fb51..e69d522ea03 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java
@@ -4,7 +4,6 @@
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.serializers.KafkaAvroDeserializer;
import lombok.SneakyThrows;
-import org.apache.avro.generic.GenericRecord;
public class AvroMessageFormatter implements MessageFormatter {
private final KafkaAvroDeserializer avroDeserializer;
@@ -16,8 +15,10 @@ public AvroMessageFormatter(SchemaRegistryClient client) {
@Override
@SneakyThrows
public String format(String topic, byte[] value) {
- GenericRecord avroRecord = (GenericRecord) avroDeserializer.deserialize(topic, value);
- byte[] jsonBytes = AvroSchemaUtils.toJson(avroRecord);
+ // deserialized will have type, that depends on schema type (record or primitive),
+ // AvroSchemaUtils.toJson(...) method will take it into account
+ Object deserialized = avroDeserializer.deserialize(topic, value);
+ byte[] jsonBytes = AvroSchemaUtils.toJson(deserialized);
return new String(jsonBytes);
}
| diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
index 7bd16af2725..3b578a6a892 100644
--- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
+++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
@@ -65,6 +65,13 @@ public class SendAndReadTests extends AbstractBaseTest {
+ "}"
);
+ private static final AvroSchema AVRO_SCHEMA_PRIMITIVE_STRING =
+ new AvroSchema("{ \"type\": \"string\" }");
+
+ private static final AvroSchema AVRO_SCHEMA_PRIMITIVE_INT =
+ new AvroSchema("{ \"type\": \"int\" }");
+
+
private static final String AVRO_SCHEMA_1_JSON_RECORD
= "{ \"field1\":\"testStr\", \"field2\": 123 }";
@@ -187,6 +194,22 @@ void noSchemaValueIsNull() {
});
}
+ @Test
+ void primitiveAvroSchemas() {
+ new SendAndReadSpec()
+ .withKeySchema(AVRO_SCHEMA_PRIMITIVE_STRING)
+ .withValueSchema(AVRO_SCHEMA_PRIMITIVE_INT)
+ .withMsgToSend(
+ new CreateTopicMessage()
+ .key("\"some string\"")
+ .content("123")
+ )
+ .doAssert(polled -> {
+ assertThat(polled.getKey()).isEqualTo("\"some string\"");
+ assertThat(polled.getContent()).isEqualTo("123");
+ });
+ }
+
@Test
void nonNullableKvWithAvroSchema() {
new SendAndReadSpec()
| val | train | 2021-09-18T08:40:19 | "2021-09-20T14:44:57Z" | Schiavi91 | train |
provectus/kafka-ui/904_913 | provectus/kafka-ui | provectus/kafka-ui/904 | provectus/kafka-ui/913 | [
"connected"
] | 0bf8db5e527655f23c11d219acf736ca4b7ffc4d | fe0294798cc5922645c59b6fa6fbdd6ac401a756 | [
"@metalshanked. Thanks for creating this issue. Could you please provide more information? \r\nWhat is your config for UI for Apache Kafka? \r\nWhat is the serializing format for these messages?\r\nIf yes, could you provide schemas?",
"> @metalshanked. Thanks for creating this issue. Could you please provide more information?\r\n> What is your config for UI for Apache Kafka?\r\n> What is the serializing format for these messages?\r\n> If yes, could you provide schemas?\r\n\r\nSure, here is the info.\r\nKafka version:2.3-IV1\r\nkafka setup is SSL only with below kafka-ui configs\r\n-e KAFKA_CLUSTERS_0_NAME=dev\r\n-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS={MY KAFKA SERVER:PORT}\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SSL\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM=\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION={MY JKS DIRECTORY}\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD={PASSWORD}\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION={MY JKS DIRECTORY}\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD={PASSWORD}\r\n-e KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEY_PASSWORD={PASSWORD}\r\n\r\nThe kafka messages are plaintext with with event per message. like log events.\r\nI seem to able to view everything else apart from the browsing the messages.\r\nI dont have schema registry setup\r\n\r\nIs there any special config required to view/browse messages apart from the above?",
"it seems that when deployed, the request path is not correct:\r\n\r\nconsumer groups (work)\r\n\r\n```\r\nhttps://sub.domain.com/my/special/path/api/clusters/kafka-shared/topics/my-topic/consumer-groups\r\n```\r\n\r\nmessages - original request made by kafka ui (doesn't work):\r\n\r\n```\r\nhttps://sub.domain.com/api/clusters/kafka-shared/topics/my-topic/messages?attempt=0&limit=100&seekDirection=FORWARD&seekType=OFFSET&seekTo=0::6\r\n```\r\n\r\nmessages - manually rewritten in new browser tab (works):\r\n\r\n```\r\nhttps://sub.domain.com/my/special/path/api/clusters/kafka-shared/topics/my-topic/messages?attempt=0&limit=100&seekDirection=FORWARD&seekType=OFFSET&seekTo=0::6\r\n```\r\n\r\nThe `/my/special/path` part is missing. \r\n\r\nI just can't find where is the problem, whether in React or Java code.",
"@zuzana Thanks for digging into this."
] | [] | "2021-09-28T09:33:12Z" | [
"type/bug",
"scope/frontend"
] | Cannot view/browse messages | **Describe the bug**
I set up an SSL enabled cluster and I can see most of the info except browsing/viewing messages.
I tested it with kafdrop and i can see the messages but no matter what offsets or settings i enter, kafka-ui shows "no messages found" for the same topic
**Set up**
Docker provectuslabs/kafka-ui:master version : v0.2.1-SNAPSHOT(928dbd7)
**Screenshots**

**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/Filters.tsx"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/Filters.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/Filters.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/Filters.tsx
index bdb38d8cd83..962b06132ba 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/Filters.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/Filters.tsx
@@ -17,6 +17,7 @@ import MultiSelect from 'react-multi-select-component';
import { Option } from 'react-multi-select-component/dist/lib/interfaces';
import BytesFormatted from 'components/common/BytesFormatted/BytesFormatted';
import { TopicName, ClusterName } from 'redux/interfaces';
+import { BASE_PARAMS } from 'lib/constants';
import {
filterOptions,
@@ -172,7 +173,7 @@ const Filters: React.FC<FiltersProps> = ({
// eslint-disable-next-line consistent-return
React.useEffect(() => {
if (location.search.length !== 0) {
- const url = `/api/clusters/${clusterName}/topics/${topicName}/messages${location.search}`;
+ const url = `${BASE_PARAMS.basePath}/api/clusters/${clusterName}/topics/${topicName}/messages${location.search}`;
const sse = new EventSource(url);
source.current = sse;
| null | train | train | 2021-09-25T13:39:14 | "2021-09-25T00:17:14Z" | metalshanked | train |
provectus/kafka-ui/861_917 | provectus/kafka-ui | provectus/kafka-ui/861 | provectus/kafka-ui/917 | [
"connected"
] | fe0294798cc5922645c59b6fa6fbdd6ac401a756 | 683ab231f436893b364a44d2fcff700f4ccbbbce | [
"Hi, @asatsi thx for creating this issue. We'll try to fix it in the next minor version. "
] | [
"0. name suggestion EmptyPathAwareRedirectStrategy\r\n1. please add a comment why we need this class\r\n2. maybe extend DefaultServerRedirectStrategy with sendRedirect method override?"
] | "2021-09-30T21:24:59Z" | [
"type/bug",
"scope/backend"
] | Simple login doesn't work with SERVER_SERVLET_CONTEXT_PATH | **Describe the bug**
I have setup SERVER_SERVLET_CONTEXT_PATH as part of the deployment in Kubernetes. The ingress rule uses this URI. Everything works well if the spring AUTH ENABLED is not set. Once I enable the AUTH, the URL takes me to the default /login page instead of the $SERVER_SERVLET_CONTEXT_PATH/login page and fails to load.
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java"
] | [
"docker/kafka-ui-auth-context.yaml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AuthController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/EmptyRedirectStrategy.java"
] | [] | diff --git a/docker/kafka-ui-auth-context.yaml b/docker/kafka-ui-auth-context.yaml
new file mode 100644
index 00000000000..77d6fea4866
--- /dev/null
+++ b/docker/kafka-ui-auth-context.yaml
@@ -0,0 +1,59 @@
+---
+version: '2'
+services:
+
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:latest
+ ports:
+ - 8080:8080
+ depends_on:
+ - zookeeper0
+ - kafka0
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
+ KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
+ KAFKA_CLUSTERS_0_JMXPORT: 9997
+ SERVER_SERVLET_CONTEXT_PATH: /kafkaui
+ AUTH_ENABLED: "true"
+ SPRING_SECURITY_USER_NAME: admin
+ SPRING_SECURITY_USER_PASSWORD: pass
+
+ zookeeper0:
+ image: confluentinc/cp-zookeeper:5.2.4
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_TICK_TIME: 2000
+ ports:
+ - 2181:2181
+
+ kafka0:
+ image: confluentinc/cp-kafka:5.2.4
+ depends_on:
+ - zookeeper0
+ ports:
+ - 9092:9092
+ - 9997:9997
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ JMX_PORT: 9997
+ KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+
+ kafka-init-topics:
+ image: confluentinc/cp-kafka:5.2.4
+ volumes:
+ - ./message.json:/data/message.json
+ depends_on:
+ - kafka0
+ command: "bash -c 'echo Waiting for Kafka to be ready... && \
+ cub kafka-ready -b kafka0:29092 1 30 && \
+ kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
+ kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
+ kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
+ kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java
index e9d897cf43b..aae652cd08a 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java
@@ -2,20 +2,53 @@
import com.provectus.kafka.ui.model.JmxConnectionInfo;
import com.provectus.kafka.ui.util.JmxPoolFactory;
+import java.util.Collections;
+import java.util.Map;
import javax.management.remote.JMXConnector;
+import lombok.AllArgsConstructor;
import org.apache.commons.pool2.KeyedObjectPool;
import org.apache.commons.pool2.impl.GenericKeyedObjectPool;
import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig;
+import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.web.ServerProperties;
+import org.springframework.boot.autoconfigure.web.reactive.WebFluxProperties;
+import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
+import org.springframework.http.server.reactive.ContextPathCompositeHandler;
+import org.springframework.http.server.reactive.HttpHandler;
import org.springframework.jmx.export.MBeanExporter;
+import org.springframework.util.StringUtils;
import org.springframework.util.unit.DataSize;
import org.springframework.web.reactive.function.client.WebClient;
+import org.springframework.web.server.adapter.WebHttpHandlerBuilder;
@Configuration
+@AllArgsConstructor
public class Config {
+ private final ApplicationContext applicationContext;
+
+ private final ServerProperties serverProperties;
+
+ @Bean
+ public HttpHandler httpHandler(ObjectProvider<WebFluxProperties> propsProvider) {
+
+ final String basePath = serverProperties.getServlet().getContextPath();
+
+ HttpHandler httpHandler = WebHttpHandlerBuilder
+ .applicationContext(this.applicationContext).build();
+
+ if (StringUtils.hasText(basePath)) {
+ Map<String, HttpHandler> handlersMap =
+ Collections.singletonMap(basePath, httpHandler);
+ return new ContextPathCompositeHandler(handlersMap);
+ }
+ return httpHandler;
+ }
+
+
@Bean
public KeyedObjectPool<JmxConnectionInfo, JMXConnector> pool() {
var pool = new GenericKeyedObjectPool<>(new JmxPoolFactory());
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java
index 6e46b607bcb..0128110ab72 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java
@@ -1,45 +1,58 @@
package com.provectus.kafka.ui.config;
+import lombok.AllArgsConstructor;
+import org.springframework.boot.autoconfigure.web.ServerProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Profile;
import org.springframework.core.io.ClassPathResource;
+import org.springframework.util.StringUtils;
import org.springframework.web.reactive.config.CorsRegistry;
-import org.springframework.web.reactive.config.EnableWebFlux;
import org.springframework.web.reactive.config.WebFluxConfigurer;
import org.springframework.web.reactive.function.server.RouterFunction;
import org.springframework.web.reactive.function.server.RouterFunctions;
import org.springframework.web.reactive.function.server.ServerResponse;
@Configuration
-@EnableWebFlux
@Profile("local")
+@AllArgsConstructor
public class CorsGlobalConfiguration implements WebFluxConfigurer {
+ private final ServerProperties serverProperties;
+
@Override
public void addCorsMappings(CorsRegistry registry) {
registry.addMapping("/**")
.allowedOrigins("*")
.allowedMethods("*")
.allowedHeaders("*")
- .allowCredentials(true);
+ .allowCredentials(false);
+ }
+
+ private String withContext(String pattern) {
+ final String basePath = serverProperties.getServlet().getContextPath();
+ if (StringUtils.hasText(basePath)) {
+ return basePath + pattern;
+ } else {
+ return pattern;
+ }
}
@Bean
public RouterFunction<ServerResponse> cssFilesRouter() {
return RouterFunctions
- .resources("/static/css/**", new ClassPathResource("static/static/css/"));
+ .resources(withContext("/static/css/**"), new ClassPathResource("static/static/css/"));
}
@Bean
public RouterFunction<ServerResponse> jsFilesRouter() {
return RouterFunctions
- .resources("/static/js/**", new ClassPathResource("static/static/js/"));
+ .resources(withContext("/static/js/**"), new ClassPathResource("static/static/js/"));
}
@Bean
public RouterFunction<ServerResponse> mediaFilesRouter() {
return RouterFunctions
- .resources("/static/media/**", new ClassPathResource("static/static/media/"));
+ .resources(withContext("/static/media/**"), new ClassPathResource("static/static/media/"));
}
}
\ No newline at end of file
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
index 6dce3b5e012..a74a3fa6a16 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
@@ -1,6 +1,5 @@
package com.provectus.kafka.ui.config;
-import org.springframework.boot.autoconfigure.web.ServerProperties;
import org.springframework.stereotype.Component;
import org.springframework.web.server.ServerWebExchange;
import org.springframework.web.server.WebFilter;
@@ -8,32 +7,25 @@
import reactor.core.publisher.Mono;
@Component
-
public class CustomWebFilter implements WebFilter {
- private final ServerProperties serverProperties;
-
- public CustomWebFilter(ServerProperties serverProperties) {
- this.serverProperties = serverProperties;
- }
-
@Override
public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {
- String contextPath = serverProperties.getServlet().getContextPath() != null
- ? serverProperties.getServlet().getContextPath() : "";
- final String path = exchange.getRequest().getURI().getPath().replaceAll("/$", "");
- if (path.equals(contextPath) || path.startsWith(contextPath + "/ui")) {
- return chain.filter(
- exchange.mutate().request(exchange.getRequest().mutate().path("/index.html").build())
- .build()
- );
- } else if (path.startsWith(contextPath)) {
+ final String basePath = exchange.getRequest().getPath().contextPath().value();
+
+ final String path = exchange.getRequest().getPath().pathWithinApplication().value();
+
+ if (path.startsWith("/ui") || path.equals("/")) {
return chain.filter(
- exchange.mutate().request(exchange.getRequest().mutate().contextPath(contextPath).build())
- .build()
+ exchange.mutate().request(
+ exchange.getRequest().mutate()
+ .path(basePath + "/index.html")
+ .contextPath(basePath)
+ .build()
+ ).build()
);
- }
+ }
return chain.filter(exchange);
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
index 3e8bcd478f2..4d1a16860fb 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/OAuthSecurityConfig.java
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui.config;
-import org.springframework.beans.factory.annotation.Autowired;
+import com.provectus.kafka.ui.util.EmptyRedirectStrategy;
+import lombok.AllArgsConstructor;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
@@ -8,18 +9,20 @@
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.ServerHttpSecurity;
import org.springframework.security.web.server.SecurityWebFilterChain;
+import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
import org.springframework.util.ClassUtils;
@Configuration
@EnableWebFluxSecurity
@ConditionalOnProperty(value = "auth.enabled", havingValue = "true")
+@AllArgsConstructor
public class OAuthSecurityConfig {
public static final String REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME =
"org.springframework.security.oauth2.client.registration."
+ "ReactiveClientRegistrationRepository";
- private static final boolean isOAuth2Present = ClassUtils.isPresent(
+ private static final boolean IS_OAUTH2_PRESENT = ClassUtils.isPresent(
REACTIVE_CLIENT_REGISTRATION_REPOSITORY_CLASSNAME,
OAuthSecurityConfig.class.getClassLoader()
);
@@ -31,37 +34,45 @@ public class OAuthSecurityConfig {
"/resources/**",
"/actuator/health",
"/actuator/info",
+ "/auth",
"/login",
"/logout",
"/oauth2/**"
};
- @Autowired
- ApplicationContext context;
+ private final ApplicationContext context;
@Bean
public SecurityWebFilterChain configure(ServerHttpSecurity http) {
http.authorizeExchange()
- .pathMatchers(AUTH_WHITELIST).permitAll()
+ .pathMatchers(
+ AUTH_WHITELIST
+ ).permitAll()
.anyExchange()
.authenticated();
- if (isOAuth2Present && OAuth2ClasspathGuard.shouldConfigure(this.context)) {
+ if (IS_OAUTH2_PRESENT && OAuth2ClasspathGuard.shouldConfigure(this.context)) {
OAuth2ClasspathGuard.configure(this.context, http);
} else {
+ final RedirectServerAuthenticationSuccessHandler handler =
+ new RedirectServerAuthenticationSuccessHandler();
+ handler.setRedirectStrategy(new EmptyRedirectStrategy());
+
http
.httpBasic().and()
- .formLogin();
+ .formLogin()
+ .loginPage("/auth")
+ .authenticationSuccessHandler(handler);
}
- SecurityWebFilterChain result = http.csrf().disable().build();
- return result;
+ return http.csrf().disable().build();
}
private static class OAuth2ClasspathGuard {
static void configure(ApplicationContext context, ServerHttpSecurity http) {
http
- .oauth2Login().and()
+ .oauth2Login()
+ .and()
.oauth2Client();
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java
index 35e0f8397bf..b998748fccb 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java
@@ -31,7 +31,7 @@ public Mono<Void> filter(ServerWebExchange exchange, @NotNull WebFilterChain cha
return chain.filter(exchange);
}
- var path = exchange.getRequest().getURI().getPath();
+ var path = exchange.getRequest().getPath().pathWithinApplication().value();
var matcher = CLUSTER_NAME_REGEX.matcher(path);
if (!matcher.find()) {
return chain.filter(exchange);
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AuthController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AuthController.java
new file mode 100644
index 00000000000..b847e58f990
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AuthController.java
@@ -0,0 +1,100 @@
+package com.provectus.kafka.ui.controller;
+
+import java.nio.charset.Charset;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.log4j.Log4j2;
+import org.springframework.security.web.server.csrf.CsrfToken;
+import org.springframework.util.MultiValueMap;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.server.ServerWebExchange;
+import reactor.core.publisher.Mono;
+
+@RestController
+@RequiredArgsConstructor
+@Log4j2
+public class AuthController {
+
+ @GetMapping(value = "/auth", produces = { "text/html" })
+ private Mono<byte[]> getAuth(ServerWebExchange exchange) {
+ Mono<CsrfToken> token = exchange.getAttributeOrDefault(CsrfToken.class.getName(), Mono.empty());
+ return token
+ .map(AuthController::csrfToken)
+ .defaultIfEmpty("")
+ .map(csrfTokenHtmlInput -> createPage(exchange, csrfTokenHtmlInput));
+ }
+
+ private byte[] createPage(ServerWebExchange exchange, String csrfTokenHtmlInput) {
+ MultiValueMap<String, String> queryParams = exchange.getRequest()
+ .getQueryParams();
+ String contextPath = exchange.getRequest().getPath().contextPath().value();
+ String page =
+ "<!DOCTYPE html>\n" + "<html lang=\"en\">\n" + " <head>\n"
+ + " <meta charset=\"utf-8\">\n"
+ + " <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, "
+ + "shrink-to-fit=no\">\n"
+ + " <meta name=\"description\" content=\"\">\n"
+ + " <meta name=\"author\" content=\"\">\n"
+ + " <title>Please sign in</title>\n"
+ + " <link href=\"https://maxcdn.bootstrapcdn.com/bootstrap/"
+ + "4.0.0-beta/css/bootstrap.min.css\" rel=\"stylesheet\" "
+ + "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
+ + "crossorigin=\"anonymous\">\n"
+ + " <link href=\"https://getbootstrap.com/docs/4.0/examples/signin/signin.css\" "
+ + "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
+ + " </head>\n"
+ + " <body>\n"
+ + " <div class=\"container\">\n"
+ + formLogin(queryParams, contextPath, csrfTokenHtmlInput)
+ + " </div>\n"
+ + " </body>\n"
+ + "</html>";
+
+ return page.getBytes(Charset.defaultCharset());
+ }
+
+ private String formLogin(
+ MultiValueMap<String, String> queryParams,
+ String contextPath, String csrfTokenHtmlInput) {
+
+ boolean isError = queryParams.containsKey("error");
+ boolean isLogoutSuccess = queryParams.containsKey("logout");
+ return
+ " <form class=\"form-signin\" method=\"post\" action=\"" + contextPath + "/auth\">\n"
+ + " <h2 class=\"form-signin-heading\">Please sign in</h2>\n"
+ + createError(isError)
+ + createLogoutSuccess(isLogoutSuccess)
+ + " <p>\n"
+ + " <label for=\"username\" class=\"sr-only\">Username</label>\n"
+ + " <input type=\"text\" id=\"username\" name=\"username\" class=\"form-control\" "
+ + "placeholder=\"Username\" required autofocus>\n"
+ + " </p>\n" + " <p>\n"
+ + " <label for=\"password\" class=\"sr-only\">Password</label>\n"
+ + " <input type=\"password\" id=\"password\" name=\"password\" "
+ + "class=\"form-control\" placeholder=\"Password\" required>\n"
+ + " </p>\n" + csrfTokenHtmlInput
+ + " <button class=\"btn btn-lg btn-primary btn-block\" "
+ + "type=\"submit\">Sign in</button>\n"
+ + " </form>\n";
+ }
+
+ private static String csrfToken(CsrfToken token) {
+ return " <input type=\"hidden\" name=\""
+ + token.getParameterName()
+ + "\" value=\""
+ + token.getToken()
+ + "\">\n";
+ }
+
+ private static String createError(boolean isError) {
+ return isError
+ ? "<div class=\"alert alert-danger\" role=\"alert\">Invalid credentials</div>"
+ : "";
+ }
+
+ private static String createLogoutSuccess(boolean isLogoutSuccess) {
+ return isLogoutSuccess
+ ? "<div class=\"alert alert-success\" role=\"alert\">You have been signed out</div>"
+ : "";
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java
index 2b48d53a7a9..f8278701c06 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/StaticController.java
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui.controller;
import com.provectus.kafka.ui.util.ResourceUtil;
+import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
@@ -11,27 +12,27 @@
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.server.ServerWebExchange;
import reactor.core.publisher.Mono;
@RestController
@RequiredArgsConstructor
@Log4j2
public class StaticController {
- private final ServerProperties serverProperties;
@Value("classpath:static/index.html")
private Resource indexFile;
private final AtomicReference<String> renderedIndexFile = new AtomicReference<>();
@GetMapping(value = "/index.html", produces = { "text/html" })
- public Mono<ResponseEntity<String>> getIndex() {
- return Mono.just(ResponseEntity.ok(getRenderedIndexFile()));
+ public Mono<ResponseEntity<String>> getIndex(ServerWebExchange exchange) {
+ return Mono.just(ResponseEntity.ok(getRenderedIndexFile(exchange)));
}
- public String getRenderedIndexFile() {
+ public String getRenderedIndexFile(ServerWebExchange exchange) {
String rendered = renderedIndexFile.get();
if (rendered == null) {
- rendered = buildIndexFile();
+ rendered = buildIndexFile(exchange.getRequest().getPath().contextPath().value());
if (renderedIndexFile.compareAndSet(null, rendered)) {
return rendered;
} else {
@@ -43,9 +44,7 @@ public String getRenderedIndexFile() {
}
@SneakyThrows
- private String buildIndexFile() {
- final String contextPath = serverProperties.getServlet().getContextPath() != null
- ? serverProperties.getServlet().getContextPath() : "";
+ private String buildIndexFile(String contextPath) {
final String staticPath = contextPath + "/static";
return ResourceUtil.readAsString(indexFile)
.replace("href=\"./static", "href=\"" + staticPath)
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/EmptyRedirectStrategy.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/EmptyRedirectStrategy.java
new file mode 100644
index 00000000000..39a4a7f2eb2
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/EmptyRedirectStrategy.java
@@ -0,0 +1,50 @@
+package com.provectus.kafka.ui.util;
+
+import java.net.URI;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.server.reactive.ServerHttpResponse;
+import org.springframework.security.web.server.ServerRedirectStrategy;
+import org.springframework.util.Assert;
+import org.springframework.web.server.ServerWebExchange;
+import reactor.core.publisher.Mono;
+
+public class EmptyRedirectStrategy implements ServerRedirectStrategy {
+
+ private HttpStatus httpStatus = HttpStatus.FOUND;
+
+ private boolean contextRelative = true;
+
+ public Mono<Void> sendRedirect(ServerWebExchange exchange, URI location) {
+ Assert.notNull(exchange, "exchange cannot be null");
+ Assert.notNull(location, "location cannot be null");
+ return Mono.fromRunnable(() -> {
+ ServerHttpResponse response = exchange.getResponse();
+ response.setStatusCode(this.httpStatus);
+ response.getHeaders().setLocation(createLocation(exchange, location));
+ });
+ }
+
+ private URI createLocation(ServerWebExchange exchange, URI location) {
+ if (!this.contextRelative) {
+ return location;
+ }
+
+ String url = location.getPath().isEmpty() ? "/"
+ : location.toASCIIString();
+
+ if (url.startsWith("/")) {
+ String context = exchange.getRequest().getPath().contextPath().value();
+ return URI.create(context + url);
+ }
+ return location;
+ }
+
+ public void setHttpStatus(HttpStatus httpStatus) {
+ Assert.notNull(httpStatus, "httpStatus cannot be null");
+ this.httpStatus = httpStatus;
+ }
+
+ public void setContextRelative(boolean contextRelative) {
+ this.contextRelative = contextRelative;
+ }
+}
| null | train | train | 2021-09-29T11:25:17 | "2021-09-08T08:01:44Z" | asatsi | train |
provectus/kafka-ui/925_926 | provectus/kafka-ui | provectus/kafka-ui/925 | provectus/kafka-ui/926 | [
"timestamp(timedelta=1.0, similarity=0.8869123268721283)",
"connected"
] | 81a6564183c5bd76214ab6b68f30d14f73f85257 | 07a9528d39cd6a40e544d57bef4407ebece83a7c | [] | [] | "2021-10-04T13:50:30Z" | [
"type/bug"
] | Support delete,compact topics | **Describe the bug**
If topic config is delete,compact topic list request is failing
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/CleanupPolicy.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/CleanupPolicy.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/CleanupPolicy.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/CleanupPolicy.java
index bfbfdf9d81a..6e75c3edecb 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/CleanupPolicy.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/CleanupPolicy.java
@@ -2,27 +2,37 @@
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
public enum CleanupPolicy {
DELETE("delete"),
COMPACT("compact"),
- COMPACT_DELETE("compact,delete"),
+ COMPACT_DELETE(Arrays.asList("compact,delete", "delete,compact")),
UNKNOWN("unknown");
- private final String cleanUpPolicy;
+ private final List<String> cleanUpPolicy;
CleanupPolicy(String cleanUpPolicy) {
+ this(Collections.singletonList(cleanUpPolicy));
+ }
+
+ CleanupPolicy(List<String> cleanUpPolicy) {
this.cleanUpPolicy = cleanUpPolicy;
}
public String getCleanUpPolicy() {
- return cleanUpPolicy;
+ return cleanUpPolicy.get(0);
}
public static CleanupPolicy fromString(String string) {
return Arrays.stream(CleanupPolicy.values())
- .filter(v -> v.cleanUpPolicy.equals(string.replace(" ", "")))
- .findFirst()
+ .filter(v ->
+ v.cleanUpPolicy.stream().anyMatch(
+ s -> s.equals(string.replace(" ", "")
+ )
+ )
+ ).findFirst()
.orElseThrow(() ->
new IllegalEntityStateException("Unknown cleanup policy value: " + string));
}
| null | test | train | 2021-10-03T06:25:33 | "2021-10-04T13:45:55Z" | germanosin | train |
provectus/kafka-ui/937_939 | provectus/kafka-ui | provectus/kafka-ui/937 | provectus/kafka-ui/939 | [
"connected"
] | 790b2a223a3b4e6c8345c8b2d5b13924fab84288 | 9c48e5682a9974a619f6cf0cb51c5ab2b80a5336 | [
"Hi, thanks for reaching out. Have you tried looking at [this example](https://github.com/provectus/kafka-ui/blob/master/docker/kafka-ui.yaml)? It seems like the difference between this example and your configuration is that `bootstrapservers` value contains just one kafka address in our case. Could you try that please?\r\nAlso, what's the status of zookeeper on your kafka-ui instance? Is it on or off? Green or red?",
"The status of zookeeper is off (red)",
"we are getting below error while clicking in broker\r\n\r\n{\"code\":5000,\"message\":\"The broker does not support DESCRIBE_CONFIGS\",\"timestamp\":1633509870299,\"requestId\":\"712f046f\",\"fieldsErrors\":null}",
"> how can we add multiple address in bootstrapservers ?\r\n\r\nyou did it the right way (I guess).\r\n\r\n\r\n\r\n> The status of zookeeper is off (red)\r\n\r\nCould you share application (kafka-ui) logs? There seems to be an issue with that.",
"> we are getting below error while clicking in broker\r\n> \r\n> {\"code\":5000,\"message\":\"The broker does not support DESCRIBE_CONFIGS\",\"timestamp\":1633509870299,\"requestId\":\"712f046f\",\"fieldsErrors\":null}\r\n\r\nSorry missed this comment. We'll take a look into this.",
"It seems like your broker version might be very outdated. Probably the issue is that required API is still not implemented in the version you have present.\r\nIs that possible to upgrade it? Don't you use azure by any chance?\r\n\r\nAlso, could you please provide application logs with that message? I need the stacktraces.",
"Thanks for your support.\r\n\r\nUpgrading broker to latest version resolved my issue.",
"how can we secure this UI with username and password ?",
"> Thanks for your support.\r\n> \r\n> Upgrading broker to latest version resolved my issue.\r\n\r\nThat's great, glad to hear!\r\n\r\n\r\n> how can we secure this UI with username and password ?\r\n\r\nThere are two options you have there:\r\n1) simple username/password auth -- [reference](https://github.com/provectus/kafka-ui/blob/master/docker/kafka-ui-auth-context.yaml#L19)\r\n2) OIDC -- [reference](https://github.com/provectus/kafka-ui/blob/master/guides/SSO.md)",
"Feel free to raise a new issue (or better a new discussion) if you'll have any problems with setting up authentication.",
"<img width=\"1440\" alt=\"Screenshot 2021-10-12 at 12 37 11 PM\" src=\"https://user-images.githubusercontent.com/39396208/136908314-e8259dac-0ee4-4268-973a-94a3ef6ec969.png\">\r\nHi,\r\n\r\nI am getting this error and kafka-ui unable to connect to broker",
"@hasan13579 could you please create a new issue for this? And please describe which auth you use and share the config."
] | [] | "2021-10-06T16:47:19Z" | [
"type/bug",
"status/accepted",
"status/confirmed"
] | unable to see brokers details in kafka-ui | Hi Team,
We have installed 3 zookeeper and 3 kafka node on same instance with different port and producing and consuming message using internal dns name.
Below are the details
2181 - zookeeper1.example
2182 - zookeeper2.example
2183 - zookeeper3.example
9093 - kafka1.example
9094 - kafka2.example
9095 - kafka3.example
and we are trying to use kafka-ui for this setup using docker.
Below is the configuration that we are trying.
"KAFKA_CLUSTERS_0_NAME", "value" : "kafka-dev",
"KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS", "value" : "kafka1.example:9093,kafka2.example:9094,kafka3.example:9095",
"KAFKA_CLUSTERS_0_ZOOKEEPER", "value" : "zookeeper1.example:2181,zookeeper2.example:2182, zookeeper3.example:2183"
But we are unable to see this brokers details in kafka-ui. Can you please help us with the configuration.
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java
index ba5d8932a07..236ea36d28f 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java
@@ -7,8 +7,8 @@
import com.provectus.kafka.ui.model.InternalBrokerConfig;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.util.ClusterUtil;
-import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@@ -18,6 +18,7 @@
import org.apache.kafka.clients.admin.DescribeConfigsOptions;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.config.ConfigResource;
+import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@@ -39,11 +40,15 @@ private Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(
.map(ExtendedAdminClient::getAdminClient)
.flatMap(adminClient ->
ClusterUtil.toMono(adminClient.describeConfigs(resources,
- new DescribeConfigsOptions().includeSynonyms(true)).all())
- .map(config -> config.entrySet().stream()
+ new DescribeConfigsOptions().includeSynonyms(true)).all())
+ .map(config -> config.entrySet()
+ .stream()
.collect(Collectors.toMap(
c -> Integer.valueOf(c.getKey().name()),
- c -> new ArrayList<>(c.getValue().entries())))));
+ c -> List.copyOf(c.getValue().entries())
+ ))
+ ))
+ .onErrorResume(UnsupportedVersionException.class, (e) -> Mono.just(new HashMap<>()));
}
private Mono<List<ConfigEntry>> loadBrokersConfig(
| null | test | train | 2021-10-06T08:59:52 | "2021-10-06T03:41:56Z" | hasan13579 | train |
provectus/kafka-ui/938_940 | provectus/kafka-ui | provectus/kafka-ui/938 | provectus/kafka-ui/940 | [
"keyword_pr_to_issue",
"timestamp(timedelta=0.0, similarity=0.9293281150177739)"
] | 9c48e5682a9974a619f6cf0cb51c5ab2b80a5336 | 962322f20f941f04dcb0bd52511a9cd5244f6bf8 | [
"Hi @Haarolean,\r\n\r\nCan I try to fix it (if so, can you assign the task to me)?\r\n\r\nWhat do you prefer the default for me to replace one with the other?",
"Hey, sure thing! \r\n\r\nI'd suggest we use log4j since it's being used more and and way longer than slf4j."
] | [] | "2021-10-06T21:06:07Z" | [
"good first issue",
"scope/backend",
"type/chore",
"hacktoberfest"
] | Fix different lombok logging annotations | For some reason we use both `@slf4j` and `@log4j2`. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java"
] | [
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java"
] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
index 3c21232539f..22730d6c815 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
@@ -32,7 +32,7 @@
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import lombok.extern.slf4j.Slf4j;
+import lombok.extern.log4j.Log4j2;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.ConfigEntry;
@@ -49,7 +49,7 @@
import reactor.core.publisher.Mono;
import reactor.util.function.Tuple2;
-@Slf4j
+@Log4j2
public class ClusterUtil {
private static final String CLUSTER_VERSION_PARAM_KEY = "inter.broker.protocol.version";
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java
index 2cf3f46b74b..8112ab6b6a6 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java
@@ -19,13 +19,13 @@
import javax.management.remote.JMXConnector;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
-import lombok.extern.slf4j.Slf4j;
+import lombok.extern.log4j.Log4j2;
import org.apache.commons.pool2.KeyedObjectPool;
import org.jetbrains.annotations.Nullable;
import org.springframework.stereotype.Component;
@Component
-@Slf4j
+@Log4j2
@RequiredArgsConstructor
public class JmxClusterUtil {
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java
index c5e7f91fe80..405d30b0b47 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java
@@ -8,13 +8,13 @@
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import javax.rmi.ssl.SslRMIClientSocketFactory;
-import lombok.extern.slf4j.Slf4j;
+import lombok.extern.log4j.Log4j2;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.pool2.BaseKeyedPooledObjectFactory;
import org.apache.commons.pool2.PooledObject;
import org.apache.commons.pool2.impl.DefaultPooledObject;
-@Slf4j
+@Log4j2
public class JmxPoolFactory extends BaseKeyedPooledObjectFactory<JmxConnectionInfo, JMXConnector> {
@Override
| diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java
index a5ac78f5660..477f7ed49c5 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java
@@ -9,7 +9,7 @@
import io.github.cdimascio.dotenv.Dotenv;
import io.qameta.allure.selenide.AllureSelenide;
import lombok.SneakyThrows;
-import lombok.extern.slf4j.Slf4j;
+import lombok.extern.log4j.Log4j2;
import org.apache.commons.io.FileUtils;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.DisplayNameGeneration;
@@ -22,7 +22,7 @@
import java.io.IOException;
import java.util.Arrays;
-@Slf4j
+@Log4j2
@DisplayNameGeneration(CamelCaseToSpacedDisplayNameGenerator.class)
public class BaseTest {
| test | train | 2021-10-06T20:01:46 | "2021-10-06T14:01:19Z" | Haarolean | train |
provectus/kafka-ui/953_957 | provectus/kafka-ui | provectus/kafka-ui/953 | provectus/kafka-ui/957 | [
"connected"
] | d5289e1d1363030af2184e86d2d4dfac540def58 | e8fb5d324131eaddc73978fb2f6a1ceefe647ac5 | [
"@germanosin this happens when both key and content schemas have the same value for `$id` (`http://unknown.unknown` in this case). But I'm not sure if this is a frontend issue or a backend issue. The lib that we use for validating JSON doesn't allow having two schemas with the same id. This sounds reasonable to me. And I don't know where this `http://unknown.unknown` comes from.",
"Hey @samlangdon, thanks for reaching out. We've fixed this thing, you can pull \"master\" image to get your fix :)",
"Great stuff - many thanks for fixing this so quickly. I've pulled the master image and can confirm this resolves the issue for me :) ",
"You're most welcome!",
"Hey @Haarolean, thanks for responding so quickly on this one. This issue still exists in the latest Docker image. Is it possible that master is not published there yet? Thanks",
"> Hey @Haarolean, thanks for responding so quickly on this one. This issue still exists in the latest Docker image. Is it possible that master is not published there yet? Thanks\r\n\r\nhi, which tag are you pulling? Try `master`, not `latest`.",
"Thank you! That was it. :)",
"You're welcome"
] | [] | "2021-10-11T07:45:59Z" | [
"type/bug",
"scope/backend",
"status/accepted"
] | Error when trying to produce messages through Kafka-UI | **Describe the bug**
When trying to produce a message, I get an error saying 'Content schema with key or id "http://unknown.unknown" already exists'
**Set up**
Install in Kubernetes in Azure (AKS) using a Helm chart, in the same Kubernetes cluster as my Kafka cluster. Accessing kafka-ui in a browser (port 8080) on my workstation by port forwarding in kubectl. Apart from this issue, everything else seems to work fine - I can connect to my brokers, create topics, view messages, etc.
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Go to my cluster (local)
2. Choose a topic (test)
3. Click 'Produce message' button toward top-right (now at URL http://127.0.0.1:8080/ui/clusters/local/topics/test/message)
4. Leave default key and content or change it
5. Click send
6. Red error text appears at bottom of screen saying: 'Content schema with key or id "http://unknown.unknown" already exists'
7. Message is not sent
**Expected behavior**
In step 6, I expect the message to send. Note I can send messages fine from other producers, so don't think there's any issue with my Kafka cluster.
**Screenshots**

| [
"kafka-ui-react-app/src/components/Topics/Topic/SendMessage/validateMessage.ts"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/SendMessage/validateMessage.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/SendMessage/validateMessage.ts b/kafka-ui-react-app/src/components/Topics/Topic/SendMessage/validateMessage.ts
index 242dd50f9fc..785c9fd2b05 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/SendMessage/validateMessage.ts
+++ b/kafka-ui-react-app/src/components/Topics/Topic/SendMessage/validateMessage.ts
@@ -8,14 +8,17 @@ const validateMessage = async (
setSchemaErrors: React.Dispatch<React.SetStateAction<string[]>>
): Promise<boolean> => {
setSchemaErrors([]);
- const ajv = new Ajv();
+ const keyAjv = new Ajv();
+ const contentAjv = new Ajv();
try {
if (messageSchema) {
let keyIsValid = false;
let contentIsValid = false;
try {
- const validateKey = ajv.compile(JSON.parse(messageSchema.key.schema));
+ const validateKey = keyAjv.compile(
+ JSON.parse(messageSchema.key.schema)
+ );
keyIsValid = validateKey(JSON.parse(key));
if (!keyIsValid) {
const errorString: string[] = [];
@@ -32,7 +35,7 @@ const validateMessage = async (
setSchemaErrors((e) => [...e, `Key ${err.message}`]);
}
try {
- const validateContent = ajv.compile(
+ const validateContent = contentAjv.compile(
JSON.parse(messageSchema.value.schema)
);
contentIsValid = validateContent(JSON.parse(content));
| null | val | train | 2021-10-09T14:31:34 | "2021-10-09T17:30:51Z" | samlangdon | train |
provectus/kafka-ui/792_970 | provectus/kafka-ui | provectus/kafka-ui/792 | provectus/kafka-ui/970 | [
"keyword_pr_to_issue"
] | 576f5d5b15f39700355a20a72f121b2a4caa0f59 | 547863fdb206de06b94afd7c41ecdf87d8884764 | [
"If this hasn't already been fixed yet, I will create a PR.",
"@tuananhlai, thanks for creating this issue. This one is not fixed. If you will create a PR it would be very helpful for us!",
"Hey @tuananhlai, \r\nthis has been fixed (kudos to @IndeedSi) in latest commit. \r\nYou can pull `master` labeled image to try it out. Let us know if there are any other problems."
] | [
"When two subjects use the same schema id, this will conflict in existing code. So I fixed this key duplicate problem"
] | "2021-10-15T01:12:59Z" | [
"type/bug",
"scope/frontend",
"status/confirmed"
] | Cannot view protobuf schema in Schema Registry | **Describe the bug**
(A clear and concise description of what the bug is.)
Kafka UI is able to list the name of all schema in Schema Registry, but when I want to view a **Protobuf** schema, the app crashed with the following error. I believed this is because the app tried to parse the schema as JSON and failed.
```
SyntaxError: Unexpected token s in JSON at position 0
at JSON.parse (<anonymous>)
at Wa (LatestVersionItem.tsx:38)
at ia (react-dom.production.min.js:157)
at Vs (react-dom.production.min.js:267)
at Eu (react-dom.production.min.js:250)
at Cu (react-dom.production.min.js:250)
at Su (react-dom.production.min.js:250)
at mu (react-dom.production.min.js:243)
at react-dom.production.min.js:123
at t.unstable_runWithPriority (scheduler.production.min.js:18)
SyntaxError: Unexpected token s in JSON at position 0
at JSON.parse (<anonymous>)
at za (SchemaVersion.tsx:20)
at ia (react-dom.production.min.js:157)
at Vs (react-dom.production.min.js:267)
at Eu (react-dom.production.min.js:250)
at Cu (react-dom.production.min.js:250)
at Su (react-dom.production.min.js:250)
at mu (react-dom.production.min.js:243)
at react-dom.production.min.js:123
at t.unstable_runWithPriority (scheduler.production.min.js:18)
```
**Set up**
(How do you run the app?)
1. Setup Kafka UI with Schema Registry
2. Add a proto schema to schema registry (via HTTP, etc...)
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Click on Schema Registry in the side bar
2. Click on a protobuf schema.
**Expected behavior**
The app doesn't crash.
**Screenshots**

**Additional context**
(Add any other context about the problem here) | [
"kafka-ui-react-app/src/components/Schemas/Details/LatestVersionItem.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/SchemaVersion.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/Details.spec.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/LatestVersionItem.spec.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/__snapshots__/Details.spec.tsx.snap",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/fixtures.ts",
"kafka-ui-react-app/src/components/Schemas/List/List.tsx"
] | [
"kafka-ui-react-app/src/components/Schemas/Details/LatestVersionItem.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/SchemaVersion.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/Details.spec.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/LatestVersionItem.spec.tsx",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/__snapshots__/Details.spec.tsx.snap",
"kafka-ui-react-app/src/components/Schemas/Details/__test__/fixtures.ts",
"kafka-ui-react-app/src/components/Schemas/List/List.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Schemas/Details/LatestVersionItem.tsx b/kafka-ui-react-app/src/components/Schemas/Details/LatestVersionItem.tsx
index ecc666abd06..cef2e415e18 100644
--- a/kafka-ui-react-app/src/components/Schemas/Details/LatestVersionItem.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/Details/LatestVersionItem.tsx
@@ -35,7 +35,11 @@ const LatestVersionItem: React.FC<LatestVersionProps> = ({
<JSONEditor
isFixedHeight
name="schema"
- value={JSON.stringify(JSON.parse(schema), null, '\t')}
+ value={
+ schema.trim().startsWith('{')
+ ? JSON.stringify(JSON.parse(schema), null, '\t')
+ : schema
+ }
setOptions={{
showLineNumbers: false,
maxLines: 40,
diff --git a/kafka-ui-react-app/src/components/Schemas/Details/SchemaVersion.tsx b/kafka-ui-react-app/src/components/Schemas/Details/SchemaVersion.tsx
index f230cad602c..d6840b1e0e4 100644
--- a/kafka-ui-react-app/src/components/Schemas/Details/SchemaVersion.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/Details/SchemaVersion.tsx
@@ -32,7 +32,11 @@ const SchemaVersion: React.FC<SchemaVersionProps> = ({
<JSONEditor
isFixedHeight
name="schema"
- value={JSON.stringify(JSON.parse(schema), null, '\t')}
+ value={
+ schema.trim().startsWith('{')
+ ? JSON.stringify(JSON.parse(schema), null, '\t')
+ : schema
+ }
setOptions={{
showLineNumbers: false,
maxLines: 40,
diff --git a/kafka-ui-react-app/src/components/Schemas/Details/__test__/Details.spec.tsx b/kafka-ui-react-app/src/components/Schemas/Details/__test__/Details.spec.tsx
index bf67d54262e..1134b40c73c 100644
--- a/kafka-ui-react-app/src/components/Schemas/Details/__test__/Details.spec.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/Details/__test__/Details.spec.tsx
@@ -7,7 +7,7 @@ import ClusterContext from 'components/contexts/ClusterContext';
import DetailsContainer from 'components/Schemas/Details/DetailsContainer';
import Details, { DetailsProps } from 'components/Schemas/Details/Details';
-import { schema, versions } from './fixtures';
+import { jsonSchema, versions } from './fixtures';
const clusterName = 'testCluster';
const fetchSchemaVersionsMock = jest.fn();
@@ -37,8 +37,8 @@ describe('Details', () => {
describe('View', () => {
const setupWrapper = (props: Partial<DetailsProps> = {}) => (
<Details
- subject={schema.subject}
- schema={schema}
+ subject={jsonSchema.subject}
+ schema={jsonSchema}
clusterName={clusterName}
fetchSchemaVersions={fetchSchemaVersionsMock}
deleteSchema={jest.fn()}
@@ -66,7 +66,7 @@ describe('Details', () => {
expect(fetchSchemaVersionsMock).toHaveBeenCalledWith(
clusterName,
- schema.subject
+ jsonSchema.subject
);
});
@@ -114,7 +114,7 @@ describe('Details', () => {
expect(wrapper.exists('LatestVersionItem')).toBeTruthy();
expect(wrapper.exists('button')).toBeTruthy();
expect(wrapper.exists('thead')).toBeTruthy();
- expect(wrapper.find('SchemaVersion').length).toEqual(2);
+ expect(wrapper.find('SchemaVersion').length).toEqual(3);
});
it('matches snapshot', () => {
diff --git a/kafka-ui-react-app/src/components/Schemas/Details/__test__/LatestVersionItem.spec.tsx b/kafka-ui-react-app/src/components/Schemas/Details/__test__/LatestVersionItem.spec.tsx
index 0e0aff8cea6..47b6e38b491 100644
--- a/kafka-ui-react-app/src/components/Schemas/Details/__test__/LatestVersionItem.spec.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/Details/__test__/LatestVersionItem.spec.tsx
@@ -2,18 +2,28 @@ import React from 'react';
import { mount, shallow } from 'enzyme';
import LatestVersionItem from 'components/Schemas/Details/LatestVersionItem';
-import { schema } from './fixtures';
+import { jsonSchema, protoSchema } from './fixtures';
describe('LatestVersionItem', () => {
- it('renders latest version of schema', () => {
- const wrapper = mount(<LatestVersionItem schema={schema} />);
+ it('renders latest version of json schema', () => {
+ const wrapper = mount(<LatestVersionItem schema={jsonSchema} />);
expect(wrapper.find('table').length).toEqual(1);
expect(wrapper.find('td').at(1).text()).toEqual('1');
expect(wrapper.exists('JSONEditor')).toBeTruthy();
});
+ it('renders latest version of proto schema', () => {
+ const wrapper = mount(<LatestVersionItem schema={protoSchema} />);
+
+ expect(wrapper.find('table').length).toEqual(1);
+ expect(wrapper.find('td').at(1).text()).toEqual('2');
+ expect(wrapper.exists('JSONEditor')).toBeTruthy();
+ });
+
it('matches snapshot', () => {
- expect(shallow(<LatestVersionItem schema={schema} />)).toMatchSnapshot();
+ expect(
+ shallow(<LatestVersionItem schema={jsonSchema} />)
+ ).toMatchSnapshot();
});
});
diff --git a/kafka-ui-react-app/src/components/Schemas/Details/__test__/__snapshots__/Details.spec.tsx.snap b/kafka-ui-react-app/src/components/Schemas/Details/__test__/__snapshots__/Details.spec.tsx.snap
index 5ca52fab2a9..61c5096f65a 100644
--- a/kafka-ui-react-app/src/components/Schemas/Details/__test__/__snapshots__/Details.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Schemas/Details/__test__/__snapshots__/Details.spec.tsx.snap
@@ -327,6 +327,26 @@ exports[`Details View when page with schema versions loaded when schema has vers
}
}
/>
+ <SchemaVersion
+ key="3"
+ version={
+ Object {
+ "compatibilityLevel": "BACKWARD",
+ "id": 3,
+ "schema": "syntax = \\"proto3\\";
+package com.indeed;
+
+message MyRecord {
+ int32 id = 1;
+ string name = 2;
+}
+",
+ "schemaType": "PROTOBUF",
+ "subject": "test",
+ "version": "3",
+ }
+ }
+ />
</tbody>
</table>
</div>
diff --git a/kafka-ui-react-app/src/components/Schemas/Details/__test__/fixtures.ts b/kafka-ui-react-app/src/components/Schemas/Details/__test__/fixtures.ts
index d53190c9f63..bc0b1f2bd2e 100644
--- a/kafka-ui-react-app/src/components/Schemas/Details/__test__/fixtures.ts
+++ b/kafka-ui-react-app/src/components/Schemas/Details/__test__/fixtures.ts
@@ -1,6 +1,6 @@
import { SchemaSubject, SchemaType } from 'generated-sources';
-export const schema: SchemaSubject = {
+export const jsonSchema: SchemaSubject = {
subject: 'test',
version: '1',
id: 1,
@@ -10,6 +10,16 @@ export const schema: SchemaSubject = {
schemaType: SchemaType.JSON,
};
+export const protoSchema: SchemaSubject = {
+ subject: 'test_proto',
+ version: '1',
+ id: 2,
+ schema:
+ 'syntax = "proto3";\npackage com.indeed;\n\nmessage MyRecord {\n int32 id = 1;\n string name = 2;\n}\n',
+ compatibilityLevel: 'BACKWARD',
+ schemaType: SchemaType.PROTOBUF,
+};
+
export const versions: SchemaSubject[] = [
{
subject: 'test',
@@ -29,4 +39,13 @@ export const versions: SchemaSubject[] = [
compatibilityLevel: 'BACKWARD',
schemaType: SchemaType.JSON,
},
+ {
+ subject: 'test',
+ version: '3',
+ id: 3,
+ schema:
+ 'syntax = "proto3";\npackage com.indeed;\n\nmessage MyRecord {\n int32 id = 1;\n string name = 2;\n}\n',
+ compatibilityLevel: 'BACKWARD',
+ schemaType: SchemaType.PROTOBUF,
+ },
];
diff --git a/kafka-ui-react-app/src/components/Schemas/List/List.tsx b/kafka-ui-react-app/src/components/Schemas/List/List.tsx
index 8b5082077b9..ab2128c8b50 100644
--- a/kafka-ui-react-app/src/components/Schemas/List/List.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/List/List.tsx
@@ -88,7 +88,10 @@ const List: React.FC<ListProps> = ({
</tr>
)}
{schemas.map((subject) => (
- <ListItem key={subject.id} subject={subject} />
+ <ListItem
+ key={[subject.id, subject.subject].join('-')}
+ subject={subject}
+ />
))}
</tbody>
</table>
| null | val | train | 2021-10-14T21:37:58 | "2021-08-13T03:59:33Z" | tuananhlai | train |
provectus/kafka-ui/973_983 | provectus/kafka-ui | provectus/kafka-ui/973 | provectus/kafka-ui/983 | [
"timestamp(timedelta=0.0, similarity=0.9906824177900114)",
"connected"
] | 547863fdb206de06b94afd7c41ecdf87d8884764 | 77226a2144d39e1f8d4576489460bb683a10c0e3 | [] | [] | "2021-10-18T16:05:08Z" | [
"type/bug",
"type/enhancement",
"scope/frontend",
"status/confirmed"
] | Support "compact,delete" cleanup policy | **Describe the bug**
That is one of the commonly used policies and it should be possible to create topic with this cleanup policy. It is not in the list of available policies (only "compact" and "delete" are available, not "compact,delete").
**Set up**
Docker from master tag `v0.2.2-SNAPSHOT(547863f)`
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Go to Topics -> Create
2. Check available cleanup policies
**Expected behavior**
`compact,delete` is available
| [
"kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts",
"kafka-ui-react-app/src/redux/interfaces/topic.ts"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx",
"kafka-ui-react-app/src/redux/actions/thunks/topics.ts",
"kafka-ui-react-app/src/redux/interfaces/topic.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx
index 50074edae1f..0f36612768f 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Edit/Edit.tsx
@@ -13,6 +13,7 @@ import { camelCase } from 'lodash';
import TopicForm from 'components/Topics/shared/Form/TopicForm';
import { clusterTopicPath } from 'lib/paths';
import { useHistory } from 'react-router';
+import { TOPIC_CUSTOM_PARAMS } from 'lib/constants';
import DangerZoneContainer from './DangerZoneContainer';
@@ -65,7 +66,11 @@ const topicParams = (topic: TopicWithDetailedInfo | undefined) => {
partitions: topic.partitionCount || DEFAULTS.partitions,
replicationFactor,
customParams: topic.config
- ?.filter((el) => el.value !== el.defaultValue)
+ ?.filter(
+ (el) =>
+ el.value !== el.defaultValue &&
+ Object.keys(TOPIC_CUSTOM_PARAMS).includes(el.name)
+ )
.map((el) => ({ name: el.name, value: el.value })),
...configs,
};
diff --git a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
index 8435ea657ae..9ad5e0a60b9 100644
--- a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
+++ b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
@@ -98,7 +98,7 @@ const TopicForm: React.FC<Props> = ({
type="number"
placeholder="Min In Sync Replicas"
defaultValue="1"
- {...register('minInSyncReplicas', {
+ {...register('minInsyncReplicas', {
required: 'Min In Sync Replicas is required.',
})}
/>
@@ -115,6 +115,7 @@ const TopicForm: React.FC<Props> = ({
<select defaultValue="delete" {...register('cleanupPolicy')}>
<option value="delete">Delete</option>
<option value="compact">Compact</option>
+ <option value="compact,delete">Compact,Delete</option>
</select>
</div>
</div>
diff --git a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
index b9f6206d8b4..6da2e433158 100644
--- a/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
+++ b/kafka-ui-react-app/src/redux/actions/thunks/topics.ts
@@ -171,7 +171,7 @@ export const formatTopicCreation = (form: TopicFormData): TopicCreation => {
retentionBytes,
retentionMs,
maxMessageBytes,
- minInSyncReplicas,
+ minInsyncReplicas,
customParams,
} = form;
@@ -184,7 +184,7 @@ export const formatTopicCreation = (form: TopicFormData): TopicCreation => {
'retention.ms': retentionMs.toString(),
'retention.bytes': retentionBytes.toString(),
'max.message.bytes': maxMessageBytes.toString(),
- 'min.insync.replicas': minInSyncReplicas.toString(),
+ 'min.insync.replicas': minInsyncReplicas.toString(),
...Object.values(customParams || {}).reduce(topicReducer, {}),
},
};
@@ -196,7 +196,7 @@ const formatTopicUpdate = (form: TopicFormDataRaw): TopicUpdate => {
retentionBytes,
retentionMs,
maxMessageBytes,
- minInSyncReplicas,
+ minInsyncReplicas,
customParams,
} = form;
@@ -206,7 +206,7 @@ const formatTopicUpdate = (form: TopicFormDataRaw): TopicUpdate => {
'retention.ms': retentionMs,
'retention.bytes': retentionBytes,
'max.message.bytes': maxMessageBytes,
- 'min.insync.replicas': minInSyncReplicas,
+ 'min.insync.replicas': minInsyncReplicas,
...Object.values(customParams || {}).reduce(topicReducer, {}),
},
};
diff --git a/kafka-ui-react-app/src/redux/interfaces/topic.ts b/kafka-ui-react-app/src/redux/interfaces/topic.ts
index e9407f8ef99..168faca7643 100644
--- a/kafka-ui-react-app/src/redux/interfaces/topic.ts
+++ b/kafka-ui-react-app/src/redux/interfaces/topic.ts
@@ -62,7 +62,7 @@ export interface TopicFormDataRaw {
name: string;
partitions: number;
replicationFactor: number;
- minInSyncReplicas: number;
+ minInsyncReplicas: number;
cleanupPolicy: string;
retentionMs: number;
retentionBytes: number;
@@ -74,7 +74,7 @@ export interface TopicFormData {
name: string;
partitions: number;
replicationFactor: number;
- minInSyncReplicas: number;
+ minInsyncReplicas: number;
cleanupPolicy: string;
retentionMs: number;
retentionBytes: number;
| null | test | train | 2021-10-15T15:16:45 | "2021-10-18T03:16:07Z" | akamensky | train |
provectus/kafka-ui/825_984 | provectus/kafka-ui | provectus/kafka-ui/825 | provectus/kafka-ui/984 | [
"connected"
] | e4dc1134abe45ee72c10d454eb7e4e326c52e194 | 4ebc74212cea7741dd68362881430517322fd018 | [] | [
"Is it necessary for something?",
"We have `@Slf4j` for logging \r\n`import lombok.extern.slf4j.Slf4j;`\r\n```\r\nimport lombok.extern.slf4j.Slf4j;\r\n@Slf4j\r\npublic class SomePage {\r\n....\r\n @Step\r\n public SomePage goTo(){\r\n log.warn(\"la-la\");\r\n }\r\n```",
"You might want to make sure, you have `IntelliJ Lombok plugin` installed",
"Please, use formatting here and in other files . (`CMD+OPTION+L` by default in IDEA)",
"formatting",
"- comments",
"why sleep?",
"why `extends MainPage`?",
"use camelCase for naming",
"formatting",
"why extends TopicView?",
"silent catches isn't a good idea. You can use @SneakyThrows annotation\r\nAlso, why there's a sleep in first place? ",
"If it's in the extension package, it should be used as an extension \r\nTake a look (https://projectlombok.org/features/experimental/ExtensionMethod) and the usage of `WaitUtils`\r\n",
"I don't think, that using logging in tests is a good idea",
"formatting",
"why hardcoded sleep?",
"Typo: type into content field?",
"Typo: type into header field?",
"camelCase",
"camelCase",
"camelCase",
"will fail, when url isn't localhost:8080, e.g. when it's not run on local browser. Might be one of the reasons, why ci failed(maybe not, safest option would be \r\n```\r\n String path = \"ui/clusters/%s/topics\";\r\n String baseUrl = TestConfiguration.BASE_URL+path.formatted(cluster));\r\n```\r\nAlso BASE_URL naming is already busy, its better to use more specific like topicsListUrl or something",
"formatting",
"Naming is unclear. How the topic gets updated?\r\nWhy creating/removing schemas is in the test about producing messages or what exactly tests this test? It's doing too much. For test that produces message, it doesn't check, that message is created",
"this assertion can be false positive, as you can probably be still on the page of deletion. It's better to assert first, that you're on the list of schemas, then assert, that it doesn't have schema. If you try to do it manually, you'll see that removed schema is still in the list.",
"`return this;`",
"why not `return first.isPresent();`?",
"`setContentField` ?",
"`setHeaderField`?",
"`return this;`",
"`return this;`",
"still here",
"`return this`",
"?",
"is there a reason to refresh the page here?",
"why refresh is here?",
" waiting until page is loaded ",
" waiting until page is loaded .",
"we need to enter value to this field for producing message. Produced message has 3 fields (header, content , key)",
"will do ",
"we need to enter value to this field for producing message. Produced message has 3 fields (header, content , key)",
"will change"
] | "2021-10-18T16:28:30Z" | [
"type/enhancement",
"good first issue",
"scope/QA",
"hacktoberfest"
] | [e2e] add e2e-checks for create/clear messages | [] | [
"kafka-ui-e2e-checks/src/test/resources/producedkey.txt",
"kafka-ui-e2e-checks/src/test/resources/schema_protobuf_value.txt",
"kafka-ui-e2e-checks/src/test/resources/testData.txt"
] | [
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/TestConfiguration.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/helpers/ApiHelper.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/MainPage.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/Pages.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/ProduceMessagePage.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/connector/ConnectorUpdateView.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaCreateView.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaEditView.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaRegistryList.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaView.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicCreateEditSettingsView.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicView.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/ConnectorsTests.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/SchemasTests.java",
"kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/TopicTests.java",
"kafka-ui-e2e-checks/src/test/resources/schema_Json_Value.json",
"kafka-ui-e2e-checks/src/test/resources/schema_avro_for_update.json",
"kafka-ui-e2e-checks/src/test/resources/schema_avro_value.json"
] | diff --git a/kafka-ui-e2e-checks/src/test/resources/producedkey.txt b/kafka-ui-e2e-checks/src/test/resources/producedkey.txt
new file mode 100644
index 00000000000..f1ccb75d55d
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/resources/producedkey.txt
@@ -0,0 +1,1 @@
+"key"
\ No newline at end of file
diff --git a/kafka-ui-e2e-checks/src/test/resources/schema_protobuf_value.txt b/kafka-ui-e2e-checks/src/test/resources/schema_protobuf_value.txt
new file mode 100644
index 00000000000..ec0000121c2
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/resources/schema_protobuf_value.txt
@@ -0,0 +1,5 @@
+enum SchemaType {
+ AVRO = 0;
+ JSON = 1;
+ PROTOBUF = 2;
+ }
\ No newline at end of file
diff --git a/kafka-ui-e2e-checks/src/test/resources/testData.txt b/kafka-ui-e2e-checks/src/test/resources/testData.txt
new file mode 100644
index 00000000000..9b9d3b66546
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/resources/testData.txt
@@ -0,0 +1,1 @@
+"print"
| diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java
index 6a866ee0004..2f41f8a95b3 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java
@@ -13,9 +13,8 @@
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.DisplayNameGeneration;
+import org.junit.jupiter.api.*;
+import org.openqa.selenium.Dimension;
import org.openqa.selenium.OutputType;
import org.openqa.selenium.TakesScreenshot;
import org.openqa.selenium.chrome.ChromeOptions;
@@ -23,11 +22,11 @@
import org.testcontainers.Testcontainers;
import org.testcontainers.containers.BrowserWebDriverContainer;
import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.utility.DockerImageName;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
-import java.time.Duration;
import java.util.Arrays;
@Slf4j
@@ -40,12 +39,9 @@ public class BaseTest {
private Screenshooter screenshooter = new Screenshooter();
- public static BrowserWebDriverContainer<?> webDriverContainer =
- new BrowserWebDriverContainer<>()
- .withCapabilities(new ChromeOptions()
- .addArguments("--no-sandbox")
- .addArguments("--disable-dev-shm-usage"))
- .waitingFor(Wait.defaultWaitStrategy().withStartupTimeout(Duration.ofSeconds(90)));
+ private static final String IMAGE_NAME = TestConfiguration.IMAGE_NAME;
+ private static final String IMAGE_TAG = TestConfiguration.IMAGE_TAG;
+ protected static BrowserWebDriverContainer<?> webDriverContainer = null;
public void compareScreenshots(String name) {
screenshooter.compareScreenshots(name);
@@ -55,13 +51,32 @@ public void compareScreenshots(String name, Boolean shouldUpdateScreenshots) {
screenshooter.compareScreenshots(name, shouldUpdateScreenshots);
}
+ @BeforeEach
+ public void setWebDriver(){
+ RemoteWebDriver remoteWebDriver = webDriverContainer.getWebDriver();
+ WebDriverRunner.setWebDriver(remoteWebDriver);
+ remoteWebDriver.manage().window().setSize(new Dimension(1440, 1024));
+ }
+
@BeforeAll
public static void start() {
+ DockerImageName image = DockerImageName.parse(IMAGE_NAME).withTag(IMAGE_TAG);
+ webDriverContainer = new BrowserWebDriverContainer<>(image)
+ .withCapabilities(new ChromeOptions().addArguments("--disable-dev-shm-usage"))
+ .waitingFor(Wait.forHttp("/"))
+ .waitingFor(Wait.forLogMessage(".*Started Selenium Standalone.*", 1));
Testcontainers.exposeHostPorts(8080);
webDriverContainer.start();
webDriverContainer.isRunning();
- RemoteWebDriver remoteWebDriver = webDriverContainer.getWebDriver();
- WebDriverRunner.setWebDriver(remoteWebDriver);
+ webDriverContainer.isHostAccessible();
+ }
+
+ @AfterAll
+ public static void tearDown(){
+ if(webDriverContainer.isRunning()) {
+ webDriverContainer.close();
+ webDriverContainer.stop();
+ }
}
static {
@@ -80,17 +95,14 @@ public static void start() {
setup();
}
-
@AfterEach
public void afterMethod() {
- webDriverContainer.getWebDriver().manage().deleteAllCookies();
Allure.addAttachment("Screenshot",
new ByteArrayInputStream(((TakesScreenshot) webDriverContainer.getWebDriver()).getScreenshotAs(OutputType.BYTES)));
}
@SneakyThrows
private static void setup() {
-
Configuration.reportsFolder = TestConfiguration.REPORTS_FOLDER;
Configuration.screenshots = TestConfiguration.SCREENSHOTS;
Configuration.savePageSource = TestConfiguration.SAVE_PAGE_SOURCE;
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/TestConfiguration.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/TestConfiguration.java
index dbc18e228f4..f28533cfa4f 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/TestConfiguration.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/TestConfiguration.java
@@ -9,7 +9,7 @@ public class TestConfiguration {
Boolean.parseBoolean(System.getProperty("USE_LOCAL_BROWSER", "true"));
public static String REPORTS_FOLDER = System.getProperty("REPORTS_FOLDER", "allure-results");
public static Boolean SCREENSHOTS =
- Boolean.parseBoolean(System.getProperty("SCREENSHOTS", "false"));
+ Boolean.parseBoolean(System.getProperty("SCREENSHOTS", "true"));
public static Boolean SAVE_PAGE_SOURCE =
Boolean.parseBoolean(System.getProperty("SAVE_PAGE_SOURCE", "false"));
public static Boolean REOPEN_BROWSER_ON_FAIL =
@@ -17,4 +17,7 @@ public class TestConfiguration {
public static String BROWSER = System.getProperty("BROWSER", "chrome");
public static String BROWSER_SIZE = System.getProperty("BROWSER_SIZE", "1920x1080");
public static Boolean ENABLE_VNC = Boolean.parseBoolean(System.getProperty("ENABLE_VNC", "true"));
+ public static String IMAGE_NAME = System.getProperty("SELENIUM_DOCKER_IMAGE", "selenium/standalone-chrome");
+ public static String IMAGE_TAG = System.getProperty("SELENIUM_IMAGE_TAG", "102.0");
+
}
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/helpers/ApiHelper.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/helpers/ApiHelper.java
index f0b8ff78421..c1394c8218c 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/helpers/ApiHelper.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/helpers/ApiHelper.java
@@ -1,15 +1,12 @@
package com.provectus.kafka.ui.helpers;
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.gson.Gson;
import com.provectus.kafka.ui.api.ApiClient;
import com.provectus.kafka.ui.api.api.KafkaConnectApi;
import com.provectus.kafka.ui.api.api.MessagesApi;
+import com.provectus.kafka.ui.api.api.SchemasApi;
import com.provectus.kafka.ui.api.api.TopicsApi;
-import com.provectus.kafka.ui.api.model.CreateTopicMessage;
-import com.provectus.kafka.ui.api.model.ErrorResponse;
-import com.provectus.kafka.ui.api.model.NewConnector;
-import com.provectus.kafka.ui.api.model.TopicCreation;
+import com.provectus.kafka.ui.api.model.*;
import com.provectus.kafka.ui.base.TestConfiguration;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
@@ -35,7 +32,22 @@ private TopicsApi topicApi() {
return new TopicsApi(new ApiClient().setBasePath(baseURL));
}
+ @SneakyThrows
+ private SchemasApi schemaApi() {
+ return new SchemasApi(new ApiClient().setBasePath(baseURL));
+ }
+
+ @SneakyThrows
+ private KafkaConnectApi connectorApi() {
+ return new KafkaConnectApi(new ApiClient().setBasePath(baseURL));
+ }
+
+ @SneakyThrows
+ private MessagesApi messageApi() {
+ return new MessagesApi(new ApiClient().setBasePath(baseURL));
+ }
+ @SneakyThrows
public void createTopic(String clusterName, String topicName) {
TopicCreation topic = new TopicCreation();
topic.setName(topicName);
@@ -50,26 +62,32 @@ public void createTopic(String clusterName, String topicName) {
}
}
-
public void deleteTopic(String clusterName, String topicName) {
try {
topicApi().deleteTopic(clusterName, topicName).block();
+ } catch (WebClientResponseException ignore) {
+ }
+ }
+
+ @SneakyThrows
+ public void createSchema(String clusterName, String schemaName, SchemaType type, String schemaValue) {
+ NewSchemaSubject schemaSubject = new NewSchemaSubject();
+ schemaSubject.setSubject(schemaName);
+ schemaSubject.setSchema(schemaValue);
+ schemaSubject.setSchemaType(type);
+ try {
+ schemaApi().createNewSchema(clusterName, schemaSubject).block();
} catch (WebClientResponseException ex) {
- ErrorResponse errorResponse = new Gson().fromJson(ex.getResponseBodyAsString(), ErrorResponse.class);
- if (errorResponse.getMessage().startsWith("This server does not host this")) {
- log.info("This server does not host this " + topicName);
- } else {
- throw ex;
- }
+ ex.printStackTrace();
}
}
@SneakyThrows
- private KafkaConnectApi connectorApi() {
- ApiClient defaultClient = new ApiClient();
- defaultClient.setBasePath(baseURL);
- KafkaConnectApi connectorsApi = new KafkaConnectApi(defaultClient);
- return connectorsApi;
+ public void deleteSchema(String clusterName, String schemaName) {
+ try {
+ schemaApi().deleteSchema(clusterName, schemaName).block();
+ } catch (WebClientResponseException ignore) {
+ }
}
@SneakyThrows
@@ -88,7 +106,7 @@ public void createConnector(String clusterName, String connectName, String conne
connector.setConfig(configMap);
try {
connectorApi().deleteConnector(clusterName, connectName, connectorName).block();
- } catch (WebClientResponseException ignored){
+ } catch (WebClientResponseException ignored) {
}
connectorApi().createConnector(clusterName, connectName, connector).block();
}
@@ -97,17 +115,9 @@ public String getFirstConnectName(String clusterName) {
return connectorApi().getConnects(clusterName).blockFirst().getName();
}
- @SneakyThrows
- private MessagesApi messageApi() {
- ApiClient defaultClient = new ApiClient();
- defaultClient.setBasePath(baseURL);
- MessagesApi messagesApi = new MessagesApi(defaultClient);
- return messagesApi;
- }
-
@SneakyThrows
public void sendMessage(String clusterName, String topicName, String messageContentJson,
- String messageKey) {
+ String messageKey) {
CreateTopicMessage createMessage = new CreateTopicMessage();
createMessage.partition(0);
createMessage.setContent(messageContentJson);
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/MainPage.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/MainPage.java
index 7bc96928621..66a8d37eebd 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/MainPage.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/MainPage.java
@@ -42,18 +42,20 @@ public void topicIsNotVisible(String topicName){
}
- public enum SideMenuOptions {
- BROKERS("Brokers"),
- TOPICS("Topics"),
- CONSUMERS("Consumers"),
- SCHEMA_REGISTRY("Schema registry");
+ public enum SideMenuOptions {
+ BROKERS("Brokers"),
+ TOPICS("Topics"),
+ CONSUMERS("Consumers"),
+ SCHEMA_REGISTRY("Schema Registry");
- String value;
+ String value;
- SideMenuOptions(String value) {
- this.value = value;
- }
+ SideMenuOptions(String value) {
+ this.value = value;
}
+ }
+
+
@Step
public MainPage goToSideMenu(String clusterName, SideMenuOptions option) {
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/Pages.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/Pages.java
index 0044201cc24..298bc22d8cf 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/Pages.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/Pages.java
@@ -2,6 +2,7 @@
import com.provectus.kafka.ui.pages.connector.ConnectorsList;
import com.provectus.kafka.ui.pages.connector.ConnectorsView;
+import com.provectus.kafka.ui.pages.schema.SchemaRegistryList;
import com.provectus.kafka.ui.pages.topic.TopicView;
import com.provectus.kafka.ui.pages.topic.TopicsList;
@@ -12,11 +13,13 @@ public class Pages {
public MainPage mainPage = new MainPage();
public TopicsList topicsList = new TopicsList();
public TopicView topicView = new TopicView();
+ public ProduceMessagePage produceMessagePage = new ProduceMessagePage();
public ConnectorsList connectorsList = new ConnectorsList();
public ConnectorsView connectorsView = new ConnectorsView();
+ public SchemaRegistryList schemaRegistry = new SchemaRegistryList();
public MainPage open() {
- return openMainPage();
+ return openMainPage();
}
public MainPage openMainPage() {
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/ProduceMessagePage.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/ProduceMessagePage.java
new file mode 100644
index 00000000000..3d430bb76e5
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/ProduceMessagePage.java
@@ -0,0 +1,43 @@
+package com.provectus.kafka.ui.pages;
+
+import com.codeborne.selenide.Condition;
+import com.codeborne.selenide.SelenideElement;
+import com.provectus.kafka.ui.pages.topic.TopicView;
+import org.openqa.selenium.By;
+import org.openqa.selenium.Keys;
+import org.openqa.selenium.support.ui.ExpectedConditions;
+
+import static com.codeborne.selenide.Selenide.$;
+import static com.codeborne.selenide.Selenide.Wait;
+
+public class ProduceMessagePage{
+
+ private final SelenideElement keyField = $(By.xpath("//div[@id = 'key']/textarea"));
+ private final SelenideElement contentField = $(By.xpath("//div[@id = 'content']/textarea"));
+ private final SelenideElement headersField = $(By.xpath("//div[@id = 'headers']/textarea"));
+ private final SelenideElement sendBtn = $(By.xpath("//button[@type = 'submit']"));
+
+ public ProduceMessagePage setKeyField(String value) {
+ Wait().until(ExpectedConditions.urlContains("message"));
+ keyField.sendKeys(Keys.chord(Keys.DELETE));
+ keyField.setValue(value);
+ return this;
+ }
+
+ public ProduceMessagePage setContentFiled(String value) {
+ Wait().until(ExpectedConditions.urlContains("message"));
+ contentField.sendKeys(Keys.DELETE);
+ contentField.setValue(value);
+ return this;
+ }
+
+ public ProduceMessagePage setHeaderFiled(String value) {
+ headersField.setValue(value);
+ return new ProduceMessagePage();
+ }
+
+ public TopicView submitProduceMessage() {
+ sendBtn.shouldBe(Condition.visible).click();
+ return new TopicView();
+ }
+}
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/connector/ConnectorUpdateView.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/connector/ConnectorUpdateView.java
index d793cdd03c0..4e7e7b52202 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/connector/ConnectorUpdateView.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/connector/ConnectorUpdateView.java
@@ -5,6 +5,7 @@
import com.provectus.kafka.ui.utils.BrowserUtils;
import io.qameta.allure.Step;
import org.openqa.selenium.By;
+import org.openqa.selenium.Keys;
import static com.codeborne.selenide.Selectors.byLinkText;
import static com.codeborne.selenide.Selenide.*;
@@ -33,8 +34,9 @@ public ConnectorUpdateView updateConnectorConfig(String configJson) {
}
@Step("Set connector config JSON")
- public ConnectorsView updConnectorConfig(String configJson) throws InterruptedException {
- contentTextArea.doubleClick();
+ public ConnectorsView updConnectorConfig(String configJson) {
+ $("#config").click();
+ contentTextArea.sendKeys(Keys.LEFT_CONTROL+"a");
contentTextArea.setValue("");
contentTextArea.setValue(String.valueOf(configJson.toCharArray()));
$("#config").click();
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaCreateView.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaCreateView.java
new file mode 100644
index 00000000000..641a08e12aa
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaCreateView.java
@@ -0,0 +1,51 @@
+package com.provectus.kafka.ui.pages.schema;
+
+import com.codeborne.selenide.SelenideElement;
+import com.provectus.kafka.ui.utils.BrowserUtils;
+import org.openqa.selenium.By;
+
+import static com.codeborne.selenide.Selenide.$;
+import static com.codeborne.selenide.Selenide.$x;
+
+public class SchemaCreateView {
+
+ private SelenideElement subjectName = $(By.xpath("//input[@name='subject']"));
+ private SelenideElement schemaField = $(By.xpath("//textarea[@name='schema']"));
+ private SelenideElement submitSchemaButton = $(By.xpath("//button[@type='submit']"));
+
+ public SchemaCreateView selectSchemaTypeFromDropdown(SchemaType schemaType) {
+ $("ul[role='listbox']").click();
+ $x("//li[text()='" + schemaType.getValue() + "']").click();
+ return this;
+ }
+
+ public SchemaView clickSubmit() {
+ BrowserUtils.javaExecutorClick(submitSchemaButton);
+ return new SchemaView();
+ }
+
+ public SchemaCreateView setSubjectName(String name) {
+ subjectName.setValue(name);
+ return this;
+ }
+
+ public SchemaCreateView setSchemaField(String text) {
+ schemaField.setValue(text);
+ return this;
+ }
+
+ public enum SchemaType {
+ AVRO("AVRO"),
+ JSON("JSON"),
+ PROTOBUF("PROTOBUF");
+
+ String value;
+
+ SchemaType(String value) {
+ this.value = value;
+ }
+ public String getValue(){
+ return value;
+ }
+ }
+}
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaEditView.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaEditView.java
new file mode 100644
index 00000000000..87f52d5bbf8
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaEditView.java
@@ -0,0 +1,52 @@
+package com.provectus.kafka.ui.pages.schema;
+
+import com.codeborne.selenide.Condition;
+import com.codeborne.selenide.Selenide;
+import com.codeborne.selenide.SelenideElement;
+import com.provectus.kafka.ui.api.model.CompatibilityLevel;
+import com.provectus.kafka.ui.utils.BrowserUtils;
+import io.qameta.allure.Step;
+import org.openqa.selenium.By;
+import org.openqa.selenium.Keys;
+
+import static com.codeborne.selenide.Selenide.$;
+import static com.codeborne.selenide.Selenide.$x;
+
+public class SchemaEditView {
+
+ SelenideElement newSchemaTextArea = $("#newSchema [wrap]");
+
+
+ public SchemaEditView selectSchemaTypeFromDropdown(SchemaCreateView.SchemaType schemaType) {
+ $x("//ul[@name='schemaType']").click();
+ $x("//li[text()='" + schemaType.getValue() + "']").click();
+ return this;
+ }
+
+ public SchemaEditView selectCompatibilityLevelFromDropdown(CompatibilityLevel.CompatibilityEnum level) {
+ $x("//ul[@name='compatibilityLevel']").click();
+ $x("//li[text()='" + level.getValue() + "']").click();
+ return this;
+ }
+
+ public SchemaView clickSubmit() {
+ BrowserUtils.javaExecutorClick($(By.xpath("//button[@type='submit']")));
+ return new SchemaView();
+ }
+
+ @Step("Set new schema value")
+ public SchemaEditView setNewSchemaValue(String configJson) {
+ $("#newSchema").click();
+ newSchemaTextArea.sendKeys(Keys.CONTROL + "a", Keys.BACK_SPACE);
+ Selenide.executeJavaScript("arguments[0].value = '';", $("#newSchema"));
+ newSchemaTextArea.setValue(configJson);
+ return this;
+ }
+
+
+ public SchemaRegistryList removeSchema() {
+ $(By.xpath("//*[contains(text(),'Remove')]")).click();
+ $(By.xpath("//*[text()='Confirm']")).shouldBe(Condition.visible).click();
+ return new SchemaRegistryList();
+ }
+}
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaRegistryList.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaRegistryList.java
new file mode 100644
index 00000000000..b7fe3f75e34
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaRegistryList.java
@@ -0,0 +1,40 @@
+package com.provectus.kafka.ui.pages.schema;
+
+import com.codeborne.selenide.Condition;
+import com.codeborne.selenide.SelenideElement;
+import com.provectus.kafka.ui.utils.BrowserUtils;
+import io.qameta.allure.Step;
+import lombok.SneakyThrows;
+import org.openqa.selenium.By;
+
+import static com.codeborne.selenide.Selenide.*;
+
+public class SchemaRegistryList {
+
+ private SelenideElement schemaButton = $(By.xpath("//*[contains(text(),'Create Schema')]"));
+
+ public SchemaCreateView clickCreateSchema() {
+ BrowserUtils.javaExecutorClick(schemaButton);
+ return new SchemaCreateView();
+ }
+
+ public SchemaView openSchema(String schemaName) {
+ $(By.xpath("//*[contains(text(),'" + schemaName + "')]")).click();
+ return new SchemaView();
+ }
+
+ @SneakyThrows
+ public SchemaRegistryList isNotVisible(String schemaName) {
+ $x(String.format("//*[contains(text(),'%s')]",schemaName)).shouldNotBe(Condition.visible);
+ return this;
+ }
+
+ @Step
+ public SchemaRegistryList isSchemaVisible(String schemaName) {
+ $$("tbody td>a")
+ .find(Condition.exactText(schemaName))
+ .shouldBe(Condition.visible);
+ return this;
+ }
+}
+
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaView.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaView.java
new file mode 100644
index 00000000000..cce7ecefc4c
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/schema/SchemaView.java
@@ -0,0 +1,38 @@
+package com.provectus.kafka.ui.pages.schema;
+
+import com.codeborne.selenide.Condition;
+import com.provectus.kafka.ui.api.model.CompatibilityLevel;
+import com.provectus.kafka.ui.utils.BrowserUtils;
+import io.qameta.allure.Step;
+import org.openqa.selenium.By;
+
+import static com.codeborne.selenide.Selenide.$;
+import static com.codeborne.selenide.Selenide.$x;
+
+public class SchemaView {
+
+ @Step
+ public SchemaView isOnSchemaViewPage() {
+ $("div#schema").shouldBe(Condition.visible);
+ return this;
+ }
+
+ @Step
+ public SchemaView isCompatibility(CompatibilityLevel.CompatibilityEnum compatibility){
+ $x("//div//p[.='" + compatibility.getValue() + "']").shouldBe(Condition.visible);
+ return this;
+ }
+
+ @Step
+ public SchemaEditView openEditSchema(){
+ $x("//button[text()= 'Edit Schema']").click();
+ return new SchemaEditView();
+ }
+
+ public SchemaRegistryList removeSchema() {
+ BrowserUtils.javaExecutorClick($(".dropdown.is-right button"));
+ $(By.xpath("//*[contains(text(),'Remove')]")).click();
+ $(By.xpath("//*[text()='Submit']")).shouldBe(Condition.visible).click();
+ return new SchemaRegistryList();
+ }
+}
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicCreateEditSettingsView.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicCreateEditSettingsView.java
index 0e850e50722..e2d293632ee 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicCreateEditSettingsView.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicCreateEditSettingsView.java
@@ -14,7 +14,7 @@ public class TopicCreateEditSettingsView {
private final SelenideElement timeToRetain = $(By.cssSelector("input#timeToRetain"));
private final SelenideElement maxMessageBytes = $(By.name("maxMessageBytes"));
- public TopicCreateEditSettingsView setTopicName(String topicName){
+ public TopicCreateEditSettingsView setTopicName(String topicName) {
$("input#topicFormName").setValue(topicName);
return this;
}
@@ -68,11 +68,10 @@ public TopicCreateEditSettingsView selectCleanupPolicy(CleanupPolicyValue cleanu
public TopicCreateEditSettingsView selectCleanupPolicy(String cleanupPolicyOptionValue) {
$("ul#topicFormCleanupPolicy").click();
- $x("//li[text()='" + cleanupPolicyOptionValue +"']").click();
+ $x("//li[text()='" + cleanupPolicyOptionValue + "']").click();
return this;
}
-
public TopicCreateEditSettingsView selectRetentionBytes(String visibleValue) {
return selectFromDropDownByVisibleText("retentionBytes", visibleValue);
}
@@ -158,7 +157,7 @@ private static class KafkaUISelectElement {
private SelenideElement selectElement;
public KafkaUISelectElement(String selectElementName) {
- this.selectElement = $("ul[role=listbox][name="+selectElementName+"]");
+ this.selectElement = $("ul[role=listbox][name=" + selectElementName + "]");
}
public KafkaUISelectElement(SelenideElement selectElement) {
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicView.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicView.java
index 7911e0d240f..25d750387ee 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicView.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/pages/topic/TopicView.java
@@ -5,9 +5,12 @@
import com.codeborne.selenide.SelenideElement;
import com.provectus.kafka.ui.base.TestConfiguration;
import com.provectus.kafka.ui.extensions.WaitUtils;
+import com.provectus.kafka.ui.pages.ProduceMessagePage;
+import com.provectus.kafka.ui.utils.BrowserUtils;
import io.qameta.allure.Step;
import lombok.SneakyThrows;
import lombok.experimental.ExtensionMethod;
+import org.openqa.selenium.By;
import static com.codeborne.selenide.Selectors.byLinkText;
import static com.codeborne.selenide.Selenide.*;
@@ -16,7 +19,7 @@
public class TopicView {
private static final String path = "/ui/clusters/%s/topics/%s";
- private final SelenideElement dotMenuHeader = $(".fPWftu.sc-fHYyUA > .dropdown.is-right");
+ private final SelenideElement dotMenuHeader = $$(".dropdown.is-right button").first();
private final SelenideElement dotMenuFooter = $$(".dropdown.is-right button").get(1);
@Step
@@ -27,30 +30,47 @@ public TopicView goTo(String cluster, String topic) {
@Step
public TopicView isOnTopicViewPage() {
- $("nav[role=navigation] a.is-active.is-primary").shouldBe(Condition.visible);
+ $(By.linkText("Overview")).shouldBe(Condition.visible);
return this;
}
@SneakyThrows
public TopicCreateEditSettingsView openEditSettings() {
- dotMenuHeader.click();
- $x("//a[text()= '" + DotMenuHeaderItems.EDIT_SETTINGS.getValue() +"']").click();
+ BrowserUtils.javaExecutorClick(dotMenuHeader);
+ $x("//a[text()= '" + DotMenuHeaderItems.EDIT_SETTINGS.getValue() + "']").click();
return new TopicCreateEditSettingsView();
}
+ @Step
+ public TopicView openTopicMenu(TopicMenu menu) {
+ $(By.linkText(menu.getValue())).shouldBe(Condition.visible).click();
+ return this;
+ }
@SneakyThrows
public TopicsList deleteTopic() {
- dotMenuHeader.click();
+ BrowserUtils.javaExecutorClick(dotMenuHeader);
$("#dropdown-menu").$(byLinkText(DotMenuHeaderItems.REMOVE_TOPIC.getValue())).click();
$$("div[role=\"dialog\"] button").find(Condition.exactText("Submit")).click();
return new TopicsList();
}
+ @SneakyThrows
+ public ProduceMessagePage clickOnButton(String buttonName) {
+ BrowserUtils.javaExecutorClick($(By.xpath("//div//button[text()='%s']".formatted(buttonName))));
+ return new ProduceMessagePage();
+ }
+
+ public boolean isKeyMessageVisible(String keyMessage) {
+ return keyMessage.equals($("td[title]").getText());
+ }
+
+ public boolean isContentMessageVisible(String contentMessage) {
+ return contentMessage.equals($(".bPpPJI.sc-gkdBiK").getText());
+ }
+
private enum DotMenuHeaderItems {
- EDIT_SETTINGS("Edit settings"),
- CLEAR_MESSAGES("Clear messages"),
- REMOVE_TOPIC("Remove topic");
+ EDIT_SETTINGS("Edit settings"), CLEAR_MESSAGES("Clear messages"), REMOVE_TOPIC("Remove topic");
private String value;
@@ -64,9 +84,26 @@ public String getValue() {
@Override
public String toString() {
- return "DotMenuHeaderItems{" +
- "value='" + value + '\'' +
- '}';
+ return "DotMenuHeaderItems{" + "value='" + value + '\'' + '}';
+ }
+ }
+
+ public enum TopicMenu {
+ OVERVIEW("Overview"), MESSAGES("Messages"), CONSUMERS("Consumers"), SETTINGS("Settings");
+
+ private String value;
+
+ TopicMenu(String value) {
+ this.value = value;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return "TopicMenu{" + "value='" + value + '\'' + '}';
}
}
}
\ No newline at end of file
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/ConnectorsTests.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/ConnectorsTests.java
index 3731b53a120..da850acb39d 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/ConnectorsTests.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/ConnectorsTests.java
@@ -72,7 +72,6 @@ public void createConnector() {
.connectorIsVisibleInList(SINK_CONNECTOR, TOPIC_FOR_CONNECTOR);
}
- //disable test due 500 error during create connector via api
@SneakyThrows
@DisplayName("should update a connector")
@Test
@@ -83,7 +82,8 @@ public void updateConnector() {
pages.connectorsView.connectorIsVisibleOnOverview();
pages.connectorsView.openEditConfig()
.updConnectorConfig(FileUtils.getResourceAsString("config_for_update_connector.json"));
- pages.openConnectorsList(LOCAL_CLUSTER).connectorIsVisibleInList(CONNECTOR_FOR_UPDATE, TOPIC_FOR_UPDATE_CONNECTOR);
+ pages.openConnectorsList(LOCAL_CLUSTER)
+ .connectorIsVisibleInList(CONNECTOR_FOR_UPDATE, TOPIC_FOR_UPDATE_CONNECTOR);
}
@SneakyThrows
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/SchemasTests.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/SchemasTests.java
new file mode 100644
index 00000000000..c69b83825d0
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/SchemasTests.java
@@ -0,0 +1,161 @@
+package com.provectus.kafka.ui.tests;
+
+import com.provectus.kafka.ui.api.model.CompatibilityLevel;
+import com.provectus.kafka.ui.api.model.SchemaType;
+import com.provectus.kafka.ui.base.BaseTest;
+import com.provectus.kafka.ui.helpers.Helpers;
+import com.provectus.kafka.ui.pages.MainPage;
+import com.provectus.kafka.ui.pages.schema.SchemaCreateView;
+import lombok.SneakyThrows;
+import org.junit.jupiter.api.*;
+
+import static org.apache.kafka.common.utils.Utils.readFileAsString;
+
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+public class SchemasTests extends BaseTest {
+
+ public static final String SECOND_LOCAL = "secondLocal";
+ public static final String SCHEMA_AVRO_CREATE = "avro_schema";
+ public static final String SCHEMA_JSON_CREATE = "json_schema";
+ public static final String SCHEMA_PROTOBUF_CREATE = "protobuf_schema";
+ public static final String SCHEMA_AVRO_API_UPDATE = "avro_schema_for_update_api";
+ public static final String SCHEMA_AVRO_API = "avro_schema_api";
+ public static final String SCHEMA_JSON_API = "json_schema_api";
+ public static final String SCHEMA_PROTOBUF_API = "protobuf_schema_api";
+ private static final String PATH_AVRO_VALUE = System.getProperty("user.dir") + "/src/test/resources/schema_avro_value.json";
+ private static final String PATH_AVRO_FOR_UPDATE = System.getProperty("user.dir") + "/src/test/resources/schema_avro_for_update.json";
+ private static final String PATH_PROTOBUF_VALUE = System.getProperty("user.dir") + "/src/test/resources/schema_protobuf_value.txt";
+ private static final String PATH_JSON_VALUE = System.getProperty("user.dir") + "/src/test/resources/schema_Json_Value.json";
+
+ @BeforeAll
+ @SneakyThrows
+ public static void beforeAll() {
+ Helpers.INSTANCE.apiHelper.createSchema(SECOND_LOCAL, SCHEMA_AVRO_API_UPDATE, SchemaType.AVRO, readFileAsString(PATH_AVRO_VALUE));
+ Helpers.INSTANCE.apiHelper.createSchema(SECOND_LOCAL, SCHEMA_AVRO_API, SchemaType.AVRO, readFileAsString(PATH_AVRO_VALUE));
+ Helpers.INSTANCE.apiHelper.createSchema(SECOND_LOCAL, SCHEMA_JSON_API, SchemaType.JSON, readFileAsString(PATH_JSON_VALUE));
+ Helpers.INSTANCE.apiHelper.createSchema(SECOND_LOCAL, SCHEMA_PROTOBUF_API, SchemaType.PROTOBUF, readFileAsString(PATH_PROTOBUF_VALUE));
+ }
+
+ @AfterAll
+ @SneakyThrows
+ public static void afterAll() {
+ Helpers.INSTANCE.apiHelper.deleteSchema(SECOND_LOCAL, SCHEMA_AVRO_CREATE);
+ Helpers.INSTANCE.apiHelper.deleteSchema(SECOND_LOCAL, SCHEMA_JSON_CREATE);
+ Helpers.INSTANCE.apiHelper.deleteSchema(SECOND_LOCAL, SCHEMA_PROTOBUF_CREATE);
+ Helpers.INSTANCE.apiHelper.deleteSchema(SECOND_LOCAL, SCHEMA_AVRO_API_UPDATE);
+ Helpers.INSTANCE.apiHelper.deleteSchema(SECOND_LOCAL, SCHEMA_AVRO_API);
+ Helpers.INSTANCE.apiHelper.deleteSchema(SECOND_LOCAL, SCHEMA_JSON_API);
+ Helpers.INSTANCE.apiHelper.deleteSchema(SECOND_LOCAL, SCHEMA_PROTOBUF_API);
+
+ }
+
+ @SneakyThrows
+ @DisplayName("should create AVRO schema")
+ @Test
+ @Order(1)
+ void createSchemaAvro() {
+ pages.openMainPage()
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.clickCreateSchema()
+ .setSubjectName(SCHEMA_AVRO_CREATE)
+ .setSchemaField(readFileAsString(PATH_AVRO_VALUE))
+ .selectSchemaTypeFromDropdown(SchemaCreateView.SchemaType.AVRO)
+ .clickSubmit()
+ .isOnSchemaViewPage();
+ pages.mainPage
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.isSchemaVisible(SCHEMA_AVRO_CREATE);
+ }
+
+ @SneakyThrows
+ @DisplayName("should update AVRO schema")
+ @Test
+ @Order(2)
+ void updateSchemaAvro() {
+ pages.openMainPage()
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.openSchema(SCHEMA_AVRO_API_UPDATE)
+ .isOnSchemaViewPage()
+ .openEditSchema()
+ .selectCompatibilityLevelFromDropdown(CompatibilityLevel.CompatibilityEnum.NONE)
+ .setNewSchemaValue(readFileAsString(PATH_AVRO_FOR_UPDATE))
+ .clickSubmit()
+ .isOnSchemaViewPage()
+ .isCompatibility(CompatibilityLevel.CompatibilityEnum.NONE);
+ }
+
+ @SneakyThrows
+ @DisplayName("should delete AVRO schema")
+ @Test
+ @Order(3)
+ void deleteSchemaAvro() {
+ pages.openMainPage()
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.openSchema(SCHEMA_AVRO_API)
+ .isOnSchemaViewPage()
+ .removeSchema()
+ .isNotVisible(SCHEMA_AVRO_API);
+ }
+
+ @SneakyThrows
+ @DisplayName("should create JSON schema")
+ @Test
+ @Order(4)
+ void createSchemaJson() {
+ pages.openMainPage()
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.clickCreateSchema()
+ .setSubjectName(SCHEMA_JSON_CREATE)
+ .setSchemaField(readFileAsString(PATH_JSON_VALUE))
+ .selectSchemaTypeFromDropdown(SchemaCreateView.SchemaType.JSON)
+ .clickSubmit()
+ .isOnSchemaViewPage();
+ pages.mainPage
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.isSchemaVisible(SCHEMA_JSON_CREATE);
+ }
+
+ @SneakyThrows
+ @DisplayName("should delete JSON schema")
+ @Test
+ @Order(5)
+ void deleteSchemaJson() {
+ pages.openMainPage()
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.openSchema(SCHEMA_JSON_API)
+ .isOnSchemaViewPage()
+ .removeSchema()
+ .isNotVisible(SCHEMA_JSON_API);
+ }
+
+ @SneakyThrows
+ @DisplayName("should create PROTOBUF schema")
+ @Test
+ @Order(6)
+ void createSchemaProtobuf() {
+ pages.openMainPage()
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.clickCreateSchema()
+ .setSubjectName(SCHEMA_PROTOBUF_CREATE)
+ .setSchemaField(readFileAsString(PATH_PROTOBUF_VALUE))
+ .selectSchemaTypeFromDropdown(SchemaCreateView.SchemaType.PROTOBUF)
+ .clickSubmit()
+ .isOnSchemaViewPage();
+ pages.mainPage
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.isSchemaVisible(SCHEMA_PROTOBUF_CREATE);
+ }
+
+ @SneakyThrows
+ @DisplayName("should delete PROTOBUF schema")
+ @Test
+ @Order(7)
+ void deleteSchemaProtobuf() {
+ pages.openMainPage()
+ .goToSideMenu(SECOND_LOCAL, MainPage.SideMenuOptions.SCHEMA_REGISTRY);
+ pages.schemaRegistry.openSchema(SCHEMA_PROTOBUF_API)
+ .isOnSchemaViewPage()
+ .removeSchema()
+ .isNotVisible(SCHEMA_PROTOBUF_API);
+ }
+}
diff --git a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/TopicTests.java b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/TopicTests.java
index 00d1c4fb34e..fc2c8ccfaa3 100644
--- a/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/TopicTests.java
+++ b/kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/tests/TopicTests.java
@@ -3,10 +3,14 @@
import com.provectus.kafka.ui.base.BaseTest;
import com.provectus.kafka.ui.helpers.Helpers;
import com.provectus.kafka.ui.pages.MainPage;
+import com.provectus.kafka.ui.pages.topic.TopicView;
import io.qameta.allure.Issue;
import lombok.SneakyThrows;
import org.junit.jupiter.api.*;
+import static org.apache.kafka.common.utils.Utils.readFileAsString;
+
+
public class TopicTests extends BaseTest {
public static final String NEW_TOPIC = "new-topic";
@@ -17,6 +21,11 @@ public class TopicTests extends BaseTest {
public static final String UPDATED_TIME_TO_RETAIN_VALUE = "604800001";
public static final String UPDATED_MAX_SIZE_ON_DISK = "20 GB";
public static final String UPDATED_MAX_MESSAGE_BYTES = "1000020";
+ private static final String KEY_TO_PRODUCE_MESSAGE = System.getProperty("user.dir") + "/src/test/resources/producedkey.txt";
+ private static final String CONTENT_TO_PRODUCE_MESSAGE = System.getProperty("user.dir") + "/src/test/resources/testData.txt";
+
+
+
@BeforeAll
@SneakyThrows
@@ -61,6 +70,7 @@ public void updateTopic() {
pages.openTopicsList(SECOND_LOCAL)
.isOnPage();
pages.openTopicView(SECOND_LOCAL, TOPIC_TO_UPDATE)
+ .isOnTopicViewPage()
.openEditSettings()
.selectCleanupPolicy(COMPACT_POLICY_VALUE)
.setMinInsyncReplicas(10)
@@ -93,4 +103,23 @@ public void deleteTopic() {
.isOnPage()
.isTopicNotVisible(TOPIC_TO_DELETE);
}
+
+ @Disabled("Due to issue https://github.com/provectus/kafka-ui/issues/2140 ignore this test")
+ @Issue("2140")
+ @SneakyThrows
+ @DisplayName("produce message")
+ @Test
+ void produceMessage(){
+ pages.openTopicsList(SECOND_LOCAL)
+ .isOnPage()
+ .openTopic(TOPIC_TO_UPDATE)
+ .isOnTopicViewPage()
+ .openTopicMenu(TopicView.TopicMenu.MESSAGES)
+ .clickOnButton("Produce Message")
+ .setContentFiled(readFileAsString(CONTENT_TO_PRODUCE_MESSAGE))
+ .setKeyField(readFileAsString(KEY_TO_PRODUCE_MESSAGE))
+ .submitProduceMessage();
+ Assertions.assertTrue(pages.topicView.isKeyMessageVisible(readFileAsString(KEY_TO_PRODUCE_MESSAGE)));
+ Assertions.assertTrue(pages.topicView.isContentMessageVisible(readFileAsString(CONTENT_TO_PRODUCE_MESSAGE)));
+ }
}
diff --git a/kafka-ui-e2e-checks/src/test/resources/schema_Json_Value.json b/kafka-ui-e2e-checks/src/test/resources/schema_Json_Value.json
new file mode 100644
index 00000000000..5c5b7aba84b
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/resources/schema_Json_Value.json
@@ -0,0 +1,7 @@
+{
+ "connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
+ "connection.url": "jdbc:postgresql://postgres-db:5432/test",
+ "connection.user": "dev_user",
+ "connection.password": "12345",
+ "topics": "topic_for_connector"
+}
\ No newline at end of file
diff --git a/kafka-ui-e2e-checks/src/test/resources/schema_avro_for_update.json b/kafka-ui-e2e-checks/src/test/resources/schema_avro_for_update.json
new file mode 100644
index 00000000000..2979d69a9bf
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/resources/schema_avro_for_update.json
@@ -0,0 +1,24 @@
+{
+ "type": "record",
+ "name": "Message",
+ "namespace": "com.provectus.kafka",
+ "fields": [
+ {
+ "name": "text",
+ "type": [
+ "null",
+ "string"
+ ],
+ "default": null
+ },
+ {
+ "name": "value",
+ "type": [
+ "null",
+ "string",
+ "long"
+ ],
+ "default": null
+ }
+ ]
+}
\ No newline at end of file
diff --git a/kafka-ui-e2e-checks/src/test/resources/schema_avro_value.json b/kafka-ui-e2e-checks/src/test/resources/schema_avro_value.json
new file mode 100644
index 00000000000..1bc30f6d1cf
--- /dev/null
+++ b/kafka-ui-e2e-checks/src/test/resources/schema_avro_value.json
@@ -0,0 +1,15 @@
+{
+ "type": "record",
+ "name": "Student",
+ "namespace": "DataFlair",
+ "fields": [
+ {
+ "name": "Name",
+ "type": "string"
+ },
+ {
+ "name": "Age",
+ "type": "int"
+ }
+ ]
+}
\ No newline at end of file
| test | train | 2022-06-09T13:50:43 | "2021-08-26T14:20:32Z" | antipova926 | train |
|
provectus/kafka-ui/1035_1041 | provectus/kafka-ui | provectus/kafka-ui/1035 | provectus/kafka-ui/1041 | [
"connected",
"timestamp(timedelta=0.0, similarity=0.8678493527938854)"
] | c986bc178cd09f0d332508ae5cd132501280c17e | 45a2fc2b472c64eb69565096b92aea41b0a7b456 | [] | [] | "2021-11-01T14:34:14Z" | [
"type/bug",
"scope/frontend",
"status/accepted"
] | Topic search issue (when pagination enabled) | **Describe the bug**
Go to Topics -> select page 2 -> insert any topic name in search string (to filter only 1 topic) -> table will be empty.
This is because after search we still passing page=2 query param -> we need to set 'page' query param to 1 after search criteria edit
**Set up**
(How do you run the app?)
**Steps to Reproduce**
Steps to reproduce the behavior:
see desc
**Expected behavior**
(A clear and concise description of what you expected to happen)
**Screenshots**
(If applicable, add screenshots to help explain your problem)
**Additional context**
(Add any other context about the problem here
<img width="768" alt="Screenshot 2021-10-31 at 17 38 00" src="https://user-images.githubusercontent.com/702205/139588763-9d41e6b1-70ef-4d6b-a58c-809c75bd8884.png">
)
| [
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap"
] | [
"kafka-ui-react-app/src/components/Topics/List/List.tsx",
"kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/List/List.tsx b/kafka-ui-react-app/src/components/Topics/List/List.tsx
index 4ba1c0e8160..4fcd0285fb1 100644
--- a/kafka-ui-react-app/src/components/Topics/List/List.tsx
+++ b/kafka-ui-react-app/src/components/Topics/List/List.tsx
@@ -121,6 +121,13 @@ const List: React.FC<TopicsListProps> = ({
closeConfirmationModal();
clearSelectedTopics();
}, [clusterName, selectedTopics]);
+ const searchHandler = React.useCallback(
+ (searchString: string) => {
+ setTopicsSearch(searchString);
+ history.push(`${pathname}?page=1&perPage=${perPage || PER_PAGE}`);
+ },
+ [search, pathname, perPage]
+ );
return (
<div className="section">
@@ -142,7 +149,7 @@ const List: React.FC<TopicsListProps> = ({
</div>
<div className="column">
<Search
- handleSearch={setTopicsSearch}
+ handleSearch={searchHandler}
placeholder="Search by Topic Name"
value={search}
/>
diff --git a/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
index 59f2f1930e3..d2bb3c690a2 100644
--- a/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/List/__tests__/__snapshots__/List.spec.tsx.snap
@@ -111,7 +111,7 @@ exports[`List when it does not have readonly flag matches the snapshot 1`] = `
className="column"
>
<Search
- handleSearch={[MockFunction]}
+ handleSearch={[Function]}
placeholder="Search by Topic Name"
value=""
>
| null | train | train | 2021-10-30T12:42:08 | "2021-10-31T14:42:12Z" | iliax | train |
provectus/kafka-ui/1058_1061 | provectus/kafka-ui | provectus/kafka-ui/1058 | provectus/kafka-ui/1061 | [
"timestamp(timedelta=0.0, similarity=0.873880692284669)",
"connected"
] | 289791ad85535e2d4929e1e1a7877eb5e8809661 | 3c9eb824ca6ea51b6fe19ef6feb68af67544a3ff | [
"Hey, sounds like a great idea. Would be nice to see that!"
] | [] | "2021-11-08T15:53:36Z" | [
"type/enhancement",
"status/accepted",
"scope/infrastructure"
] | Request to add a github action to the repo | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
(Write your answer here.)
No, rather related to adding a new feature
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
(Describe your proposed solution here.)
I see that the repo is quite engaging
I would love to see a github - action welcoming all the new contributors on their issues and Prs
If you love the idea , assign me
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
(Write your answer here.)
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.)
If you love the idea , assign me
| [] | [
".github/workflows/welcome-first-time-contributors.yml"
] | [] | diff --git a/.github/workflows/welcome-first-time-contributors.yml b/.github/workflows/welcome-first-time-contributors.yml
new file mode 100644
index 00000000000..f0caa2053e1
--- /dev/null
+++ b/.github/workflows/welcome-first-time-contributors.yml
@@ -0,0 +1,30 @@
+name: Welcome first time contributors
+
+on:
+ pull_request_target:
+ types:
+ - opened
+ issues:
+ types:
+ - opened
+
+jobs:
+ welcome:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/first-interaction@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ issue-message: |
+ Hello there ${{ github.actor }}! 👋
+
+ Thank you and congratulations 🎉 for opening your very first issue in this project! 💖
+
+ In case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀
+
+ pr-message: |
+ Hello there ${{ github.actor }}! 👋
+
+ Thank you and congrats 🎉 for opening your first PR on this project! ✨ 💖
+
+ We will try to review it soon!
| null | train | train | 2021-11-01T18:32:05 | "2021-11-08T15:38:32Z" | Jassi10000-zz | train |
provectus/kafka-ui/1021_1063 | provectus/kafka-ui | provectus/kafka-ui/1021 | provectus/kafka-ui/1063 | [
"timestamp(timedelta=0.0, similarity=0.9451887696750566)",
"connected"
] | 6df2d0b602f416b5b90d306a8cc7125e37cb08ef | 5db2c17994b4d8835aa6c5b52e9aaebf664b5903 | [] | [
"It's a bad idea to have provider-dependent classes. Could we just add a configurable URL for logout?",
"I don't like the idea to have such configs.",
"Not really, this one is required since cognito doesn't work like others oauth providers, this custom logic is required. We have discussed this previously within the same feature in ODD.",
"The same as in ODD as well, though. What do you suggest? We don't have any other markers to identify if the user's using cognito or not besides this introduced variable.",
"done",
"looks dirty",
"extracted a props bean",
"done"
] | "2021-11-08T19:55:13Z" | [
"type/enhancement",
"scope/backend",
"status/accepted"
] | Cognito logout | [
"documentation/compose/DOCKER_COMPOSE.md",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java"
] | [
"documentation/compose/DOCKER_COMPOSE.md",
"documentation/compose/oauth-cognito.yaml",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CognitoOidcLogoutSuccessHandler.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/CognitoOAuthSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/props/CognitoProperties.java"
] | [] | diff --git a/documentation/compose/DOCKER_COMPOSE.md b/documentation/compose/DOCKER_COMPOSE.md
index c5fc1f1476c..d019427d747 100644
--- a/documentation/compose/DOCKER_COMPOSE.md
+++ b/documentation/compose/DOCKER_COMPOSE.md
@@ -11,3 +11,4 @@
9. [kafka-ui-reverse-proxy.yaml](./kafka-ui-reverse-proxy.yaml) - An example for using the app behind a proxy (like nginx).
10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
+12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
diff --git a/documentation/compose/oauth-cognito.yaml b/documentation/compose/oauth-cognito.yaml
new file mode 100644
index 00000000000..a905c9c2ee5
--- /dev/null
+++ b/documentation/compose/oauth-cognito.yaml
@@ -0,0 +1,22 @@
+---
+version: '3.4'
+services:
+
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:local
+ ports:
+ - 8080:8080
+ depends_on:
+ - kafka0 # OMITTED, TAKE UP AN EXAMPLE FROM OTHER COMPOSE FILES
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
+ AUTH_TYPE: OAUTH2_COGNITO
+ AUTH_COGNITO_ISSUER_URI: "https://cognito-idp.eu-central-1.amazonaws.com/eu-central-xxxxxx"
+ AUTH_COGNITO_CLIENT_ID: ""
+ AUTH_COGNITO_CLIENT_SECRET: ""
+ AUTH_COGNITO_SCOPE: "openid"
+ AUTH_COGNITO_USER_NAME_ATTRIBUTE: "username"
+ AUTH_COGNITO_LOGOUT_URI: "https://<domain>.auth.eu-central-1.amazoncognito.com/logout"
\ No newline at end of file
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CognitoOidcLogoutSuccessHandler.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CognitoOidcLogoutSuccessHandler.java
new file mode 100644
index 00000000000..52e366f3abf
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CognitoOidcLogoutSuccessHandler.java
@@ -0,0 +1,53 @@
+package com.provectus.kafka.ui.config;
+
+import java.net.URI;
+import java.nio.charset.StandardCharsets;
+import lombok.RequiredArgsConstructor;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.server.reactive.ServerHttpResponse;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.web.server.WebFilterExchange;
+import org.springframework.security.web.server.authentication.logout.ServerLogoutSuccessHandler;
+import org.springframework.security.web.util.UrlUtils;
+import org.springframework.web.server.WebSession;
+import org.springframework.web.util.UriComponents;
+import org.springframework.web.util.UriComponentsBuilder;
+import reactor.core.publisher.Mono;
+
+@RequiredArgsConstructor
+public class CognitoOidcLogoutSuccessHandler implements ServerLogoutSuccessHandler {
+
+ private final String logoutUrl;
+ private final String clientId;
+
+ @Override
+ public Mono<Void> onLogoutSuccess(final WebFilterExchange exchange, final Authentication authentication) {
+ final ServerHttpResponse response = exchange.getExchange().getResponse();
+ response.setStatusCode(HttpStatus.FOUND);
+
+ final var requestUri = exchange.getExchange().getRequest().getURI();
+
+ final var fullUrl = UrlUtils.buildFullRequestUrl(requestUri.getScheme(),
+ requestUri.getHost(), requestUri.getPort(),
+ requestUri.getPath(), requestUri.getQuery());
+
+ final UriComponents baseUrl = UriComponentsBuilder
+ .fromHttpUrl(fullUrl)
+ .replacePath("/")
+ .replaceQuery(null)
+ .fragment(null)
+ .build();
+
+ final var uri = UriComponentsBuilder
+ .fromUri(URI.create(logoutUrl))
+ .queryParam("client_id", clientId)
+ .queryParam("logout_uri", baseUrl)
+ .encode(StandardCharsets.UTF_8)
+ .build()
+ .toUri();
+
+ response.getHeaders().setLocation(uri);
+ return exchange.getExchange().getSession().flatMap(WebSession::invalidate);
+ }
+}
+
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java
index 6bd56a877fe..ae98dfdd7a3 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java
@@ -2,7 +2,7 @@
import com.provectus.kafka.ui.util.EmptyRedirectStrategy;
import java.net.URI;
-import lombok.extern.log4j.Log4j2;
+import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@@ -17,7 +17,7 @@
@Configuration
@EnableWebFluxSecurity
@ConditionalOnProperty(value = "auth.type", havingValue = "LOGIN_FORM")
-@Log4j2
+@Slf4j
public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
public static final String LOGIN_URL = "/auth";
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/CognitoOAuthSecurityConfig.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/CognitoOAuthSecurityConfig.java
new file mode 100644
index 00000000000..9db66e142d7
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/CognitoOAuthSecurityConfig.java
@@ -0,0 +1,80 @@
+package com.provectus.kafka.ui.config.auth;
+
+import com.provectus.kafka.ui.config.CognitoOidcLogoutSuccessHandler;
+import com.provectus.kafka.ui.config.auth.props.CognitoProperties;
+import java.util.Optional;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
+import org.springframework.security.config.web.server.ServerHttpSecurity;
+import org.springframework.security.oauth2.client.registration.ClientRegistration;
+import org.springframework.security.oauth2.client.registration.ClientRegistrations;
+import org.springframework.security.oauth2.client.registration.InMemoryReactiveClientRegistrationRepository;
+import org.springframework.security.web.server.SecurityWebFilterChain;
+import org.springframework.security.web.server.authentication.logout.ServerLogoutSuccessHandler;
+
+@Configuration
+@EnableWebFluxSecurity
+@ConditionalOnProperty(value = "auth.type", havingValue = "OAUTH2_COGNITO")
+@RequiredArgsConstructor
+@Slf4j
+public class CognitoOAuthSecurityConfig extends AbstractAuthSecurityConfig {
+
+ private static final String COGNITO = "cognito";
+
+ @Bean
+ public SecurityWebFilterChain configure(ServerHttpSecurity http, CognitoProperties props) {
+ log.info("Configuring Cognito OAUTH2 authentication.");
+
+ String clientId = props.getClientId();
+ String logoutUrl = props.getLogoutUri();
+
+ final ServerLogoutSuccessHandler logoutHandler = new CognitoOidcLogoutSuccessHandler(logoutUrl, clientId);
+
+ return http.authorizeExchange()
+ .pathMatchers(AUTH_WHITELIST)
+ .permitAll()
+ .anyExchange()
+ .authenticated()
+
+ .and()
+ .oauth2Login()
+
+ .and()
+ .oauth2Client()
+
+ .and()
+ .logout()
+ .logoutSuccessHandler(logoutHandler)
+
+ .and()
+ .csrf().disable()
+ .build();
+ }
+
+ @Bean
+ public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository(CognitoProperties props) {
+ ClientRegistration.Builder builder = ClientRegistrations
+ .fromIssuerLocation(props.getIssuerUri())
+ .registrationId(COGNITO);
+
+ builder.clientId(props.getClientId());
+ builder.clientSecret(props.getClientSecret());
+
+ Optional.ofNullable(props.getScope()).ifPresent(builder::scope);
+ Optional.ofNullable(props.getUserNameAttribute()).ifPresent(builder::userNameAttributeName);
+
+ return new InMemoryReactiveClientRegistrationRepository(builder.build());
+ }
+
+ @Bean
+ @ConfigurationProperties("auth.cognito")
+ public CognitoProperties cognitoProperties() {
+ return new CognitoProperties();
+ }
+
+}
\ No newline at end of file
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java
index d30aa4631bd..4b1cc9a9330 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java
@@ -1,6 +1,6 @@
package com.provectus.kafka.ui.config.auth;
-import lombok.extern.log4j.Log4j2;
+import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.ApplicationContext;
@@ -14,7 +14,7 @@
@Configuration
@EnableWebFluxSecurity
@ConditionalOnProperty(value = "auth.type", havingValue = "DISABLED")
-@Log4j2
+@Slf4j
public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
@Bean
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java
index 9681c36bc9f..62fdde4bf02 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java
@@ -1,7 +1,7 @@
package com.provectus.kafka.ui.config.auth;
import java.util.List;
-import lombok.extern.log4j.Log4j2;
+import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
@@ -25,7 +25,7 @@
@Configuration
@EnableWebFluxSecurity
@ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
-@Log4j2
+@Slf4j
public class LdapSecurityConfig extends AbstractAuthSecurityConfig {
@Value("${spring.ldap.urls}")
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/props/CognitoProperties.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/props/CognitoProperties.java
new file mode 100644
index 00000000000..4eb4508b97e
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/props/CognitoProperties.java
@@ -0,0 +1,44 @@
+package com.provectus.kafka.ui.config.auth.props;
+
+import lombok.Data;
+import lombok.ToString;
+import org.jetbrains.annotations.Nullable;
+
+@Data
+@ToString(exclude = "clientSecret")
+public class CognitoProperties {
+
+ String clientId;
+ String logoutUri;
+ String issuerUri;
+ String clientSecret;
+ @Nullable
+ String scope;
+ @Nullable
+ String userNameAttribute;
+
+ public String getClientId() {
+ return clientId;
+ }
+
+ public String getLogoutUri() {
+ return logoutUri;
+ }
+
+ public String getIssuerUri() {
+ return issuerUri;
+ }
+
+ public String getClientSecret() {
+ return clientSecret;
+ }
+
+ public @Nullable String getScope() {
+ return scope;
+ }
+
+ public @Nullable String getUserNameAttribute() {
+ return userNameAttribute;
+ }
+
+}
| null | train | train | 2022-09-15T03:07:45 | "2021-10-27T10:11:07Z" | Haarolean | train |
|
provectus/kafka-ui/1060_1065 | provectus/kafka-ui | provectus/kafka-ui/1060 | provectus/kafka-ui/1065 | [
"keyword_pr_to_issue"
] | 3c9eb824ca6ea51b6fe19ef6feb68af67544a3ff | 7603199e5323302a804b99be473b55176d76e8a6 | [
"Hey, thanks for reaching out. We'll get a look at this."
] | [] | "2021-11-09T11:19:11Z" | [
"type/bug",
"scope/frontend",
"status/accepted"
] | Calculation error in Time to retain data [topic creation] | **Describe the bug**
(A clear and concise description of what the bug is.)
Time to retain data tab displays 168d when 4w is selected; 28d should be selected; calculation error in ms (s vs ms)
**Set up**
(How do you run the app?)
docker-compose -f ./docker/kafka-ui.yaml up
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Topics
2. Add a Topic
3. Navigate to Time to retain data
4. Select 4w
5. 168d (supposedly 14515200000ms) is displayed
**Expected behavior**
(A clear and concise description of what you expected to happen)
28d (2.419e+9ms) should be displayed when 4w is selected
**Screenshots**
(If applicable, add screenshots to help explain your problem)
**Additional context**
(Add any other context about the problem here)
| [
"kafka-ui-react-app/src/components/Topics/shared/Form/TimeToRetainBtns.tsx"
] | [
"kafka-ui-react-app/src/components/Topics/shared/Form/TimeToRetainBtns.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/shared/Form/TimeToRetainBtns.tsx b/kafka-ui-react-app/src/components/Topics/shared/Form/TimeToRetainBtns.tsx
index afd974b151b..17221bf8e7c 100644
--- a/kafka-ui-react-app/src/components/Topics/shared/Form/TimeToRetainBtns.tsx
+++ b/kafka-ui-react-app/src/components/Topics/shared/Form/TimeToRetainBtns.tsx
@@ -29,7 +29,7 @@ const TimeToRetainBtns: React.FC<Props> = ({ name }) => (
<TimeToRetainBtn
text="4w"
inputName={name}
- value={MILLISECONDS_IN_DAY * 7 * 24}
+ value={MILLISECONDS_IN_DAY * 7 * 4}
/>
</div>
);
| null | test | train | 2021-11-08T20:47:27 | "2021-11-08T15:52:08Z" | Khakha-A | train |
provectus/kafka-ui/1016_1085 | provectus/kafka-ui | provectus/kafka-ui/1016 | provectus/kafka-ui/1085 | [
"keyword_pr_to_issue"
] | 01f04d38d4fd39786aa73dddf54a195ce2c72e74 | 6c9f7f7b770d738c65952ac712ab0eeafaacb185 | [
"Broken since #931"
] | [] | "2021-11-13T15:29:26Z" | [
"type/bug",
"good first issue",
"scope/QA",
"status/accepted",
"status/confirmed",
"hacktoberfest"
] | Topic tests failing | ```
[ERROR] com.provectus.kafka.ui.topics.TopicTests Time elapsed: 0.483 s <<< ERROR!
java.lang.NoSuchMethodError: 'java.lang.String io.netty.util.internal.ObjectUtil.checkNonEmptyAfterTrim(java.lang.String, java.lang.String)'
at com.provectus.kafka.ui.topics.TopicTests.beforeAll(TopicTests.java:27)
``` | [
"kafka-ui-e2e-checks/pom.xml",
"pom.xml"
] | [
"kafka-ui-e2e-checks/pom.xml",
"pom.xml"
] | [] | diff --git a/kafka-ui-e2e-checks/pom.xml b/kafka-ui-e2e-checks/pom.xml
index 64e66748910..7353423b10d 100644
--- a/kafka-ui-e2e-checks/pom.xml
+++ b/kafka-ui-e2e-checks/pom.xml
@@ -11,7 +11,7 @@
<artifactId>kafka-ui-e2e-checks</artifactId>
<properties>
- <kafka-ui-contract>${project.version}</kafka-ui-contract>
+ <kafka-ui-contract>${project.version}</kafka-ui-contract>
<junit.version>5.7.0</junit.version>
<aspectj.version>1.9.6</aspectj.version>
<allure.version>2.13.7</allure.version>
@@ -31,6 +31,8 @@
<allure.screendiff-plugin.version>2.13.9</allure.screendiff-plugin.version>
<maven.surefire-plugin.version>2.22.2</maven.surefire-plugin.version>
<allure-maven.version>2.10.0</allure-maven.version>
+ <kafka.version>3.0.0</kafka.version>
+ <netty.version>4.1.69.Final</netty.version>
</properties>
<dependencies>
@@ -43,7 +45,87 @@
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.13</artifactId>
<version>${kafka.version}</version>
+ <exclusions> <!-- could be removed when kafka version will contain zookeeper with netty 4.1.69 -->
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-buffer</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-common</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-handler</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-resolver</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport-native-epoll</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport-native-unix-common</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
+
+ <!--
+ whole netty dependency block could be removed
+ when kafka version will contain zookeeper with netty 4.1.69
+ -->
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-buffer</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-common</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-handler</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-resolver</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport-native-epoll</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport-native-unix-common</artifactId>
+ <version>${netty.version}</version>
+ </dependency>
+
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
diff --git a/pom.xml b/pom.xml
index 8550242a0d4..44ee572a213 100644
--- a/pom.xml
+++ b/pom.xml
@@ -18,9 +18,9 @@
<jackson-databind-nullable.version>0.2.1</jackson-databind-nullable.version>
<org.mapstruct.version>1.4.2.Final</org.mapstruct.version>
<org.projectlombok.version>1.18.20</org.projectlombok.version>
- <org.projectlombok.e2e-checks.version>1.18.20</org.projectlombok.e2e-checks.version>
+ <org.projectlombok.e2e-checks.version>1.18.20</org.projectlombok.e2e-checks.version>
<git.revision>latest</git.revision>
- <zookeper.version>3.5.7</zookeper.version>
+ <zookeper.version>3.7.0</zookeper.version>
<kafka-clients.version>2.8.0</kafka-clients.version>
<node.version>v14.17.1</node.version>
<dockerfile-maven-plugin.version>1.4.10</dockerfile-maven-plugin.version>
@@ -32,7 +32,6 @@
<openapi-generator-maven-plugin.version>4.3.0</openapi-generator-maven-plugin.version>
<swagger-annotations.version>1.6.0</swagger-annotations.version>
<springdoc-openapi-webflux-ui.version>1.2.32</springdoc-openapi-webflux-ui.version>
- <kafka.version>2.8.0</kafka.version>
<avro.version>1.9.2</avro.version>
<confluent.version>5.5.1</confluent.version>
<apache.commons.version>2.2</apache.commons.version>
| null | train | train | 2021-11-12T22:40:39 | "2021-10-26T10:42:11Z" | Haarolean | train |
provectus/kafka-ui/1084_1090 | provectus/kafka-ui | provectus/kafka-ui/1084 | provectus/kafka-ui/1090 | [
"keyword_pr_to_issue"
] | d6ada34a7e30a9b95d4ce206fc05b0af37472f9e | 352760b1b334aa5907e52110bfbb694f20347cd8 | [
"Hi,\r\n\r\nthere's nothing wrong with using older versions of dependencies (implying there are no known security vulnerabilities, which is true for 1.19). On the other hand, bumping dependency version may require additional testing overhead, because sometimes even minor bumps can cause some bugs/problems/instabilities.\r\nConsidering these facts, I'm closing this since there's no need in bumping this dependency so far.",
"Discussed in #1083, that's a transitive dependency."
] | [] | "2021-11-15T14:07:21Z" | [
"scope/backend",
"type/security",
"status/accepted"
] | bump apache-commons-compress from 1.19 to 1.21 in kafka-ui-api | Current version in use is almost two years old.
https://mvnrepository.com/artifact/org.apache.commons/commons-compress | [
"pom.xml"
] | [
"pom.xml"
] | [] | diff --git a/pom.xml b/pom.xml
index 44ee572a213..cb5bf344660 100644
--- a/pom.xml
+++ b/pom.xml
@@ -32,7 +32,7 @@
<openapi-generator-maven-plugin.version>4.3.0</openapi-generator-maven-plugin.version>
<swagger-annotations.version>1.6.0</swagger-annotations.version>
<springdoc-openapi-webflux-ui.version>1.2.32</springdoc-openapi-webflux-ui.version>
- <avro.version>1.9.2</avro.version>
+ <avro.version>1.11.0</avro.version>
<confluent.version>5.5.1</confluent.version>
<apache.commons.version>2.2</apache.commons.version>
<test.containers.version>1.16.2</test.containers.version>
| null | val | train | 2021-11-16T01:20:26 | "2021-11-13T00:52:55Z" | rgsurfs | train |
provectus/kafka-ui/1106_1107 | provectus/kafka-ui | provectus/kafka-ui/1106 | provectus/kafka-ui/1107 | [
"keyword_pr_to_issue",
"connected"
] | 352760b1b334aa5907e52110bfbb694f20347cd8 | d712d33c79cee6bd1c04f24f2ea04d3d0470dc6a | [] | [] | "2021-11-18T19:45:55Z" | [
"type/bug",
"scope/backend",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Topic disk usage size is incorrect in topic details view | **Describe the bug**
If topic size > int.max, topic details page will display "-bytes". This is happening because field segmentSize is int for topicdetails object.
| [
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml"
] | [] | diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index cb882ef078a..d7c1fe30824 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -1707,6 +1707,7 @@ components:
type: number
segmentSize:
type: integer
+ format: int64
segmentCount:
type: integer
underReplicatedPartitions:
| null | train | train | 2021-11-17T22:24:41 | "2021-11-18T19:42:51Z" | germanosin | train |
provectus/kafka-ui/1121_1122 | provectus/kafka-ui | provectus/kafka-ui/1121 | provectus/kafka-ui/1122 | [
"connected"
] | 35c4de66e1d08d4435bad76bd40186e8785d4a58 | 5e1e87140a9e10884a28ee570b82b64032f27d96 | [
"Hi , this PR will fix using personal deployment creds https://github.com/provectus/kafka-ui/pull/1122 "
] | [] | "2021-11-22T12:20:17Z" | [
"type/enhancement",
"scope/infrastructure"
] | Create a separate technical account for branch deployments | With a separate credentials
Discussed [here](https://github.com/provectus/kafka-ui/pull/1108#discussion_r752768474) | [
".github/workflows/branch-deploy.yml",
".github/workflows/branch-remove.yml",
".github/workflows/master.yaml"
] | [
".github/workflows/branch-deploy.yml",
".github/workflows/branch-remove.yml",
".github/workflows/master.yaml"
] | [] | diff --git a/.github/workflows/branch-deploy.yml b/.github/workflows/branch-deploy.yml
index 25f59849f58..62a77919d26 100644
--- a/.github/workflows/branch-deploy.yml
+++ b/.github/workflows/branch-deploy.yml
@@ -78,14 +78,14 @@ jobs:
steps:
- name: clone
run: |
- git clone https://azsafin:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
+ git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: create deployment
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Branch:${{ needs.build.outputs.tag }}"
./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }}
- git config --global user.email "[email protected]"
- git config --global user.name "azsafin"
+ git config --global user.email "[email protected]"
+ git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
- name: make comment with deployment link
diff --git a/.github/workflows/branch-remove.yml b/.github/workflows/branch-remove.yml
index 848825e5e58..cef13859e81 100644
--- a/.github/workflows/branch-remove.yml
+++ b/.github/workflows/branch-remove.yml
@@ -22,14 +22,14 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: clone
run: |
- git clone https://azsafin:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
+ git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: remove env
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Branch:${{ steps.extract_branch.outputs.tag }}"
./delete-env.sh ${{ steps.extract_branch.outputs.tag }}
- git config --global user.email "[email protected]"
- git config --global user.name "azsafin"
+ git config --global user.email "[email protected]"
+ git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
- name: make comment with deployment link
diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml
index 421a8f33752..0417c5203b1 100644
--- a/.github/workflows/master.yaml
+++ b/.github/workflows/master.yaml
@@ -67,11 +67,11 @@ jobs:
#################################
- name: update-master-deployment
run: |
- git clone https://azsafin:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
+ git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
- git config --global user.email "[email protected]"
- git config --global user.name "azsafin"
+ git config --global user.email "[email protected]"
+ git config --global user.name "kafka-ui-infra"
git add ../kafka-ui/*
git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push
| null | test | train | 2021-11-19T09:49:33 | "2021-11-22T11:43:31Z" | Haarolean | train |
provectus/kafka-ui/1128_1130 | provectus/kafka-ui | provectus/kafka-ui/1128 | provectus/kafka-ui/1130 | [
"connected"
] | 53a641cb5886095acd9ab9f1c770c8d410379fca | d674808f3c7d4403ab17b8206e9a5218080c468a | [
"@akamensky fixed, please try master version",
"@iliax I've tried the latest master version, but I get this error on service start (and it crashes):\r\n\r\n```\r\n05:05:36.501 [main] ERROR org.springframework.boot.SpringApplication - Application run failed\r\norg.springframework.beans.factory.BeanCreationException: Error creating bean with name 'clustersMetricsScheduler': Invocation of init method failed; nested exception is org.apache.kafka.common.errors.TimeoutException: Call(callName=listNodes, deadlineMs=1638075933368, tries=1, nextAllowedTryMs=1638075933469) timed out at 1638075933369 after 1 attempt(s)\r\n\tat org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:160) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.applyBeanPostProcessorsBeforeInitialization(AbstractAutowireCapableBeanFactory.java:440) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1796) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:620) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:335) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:333) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:208) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:944) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:918) ~[spring-context-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:583) ~[spring-context-5.3.12.jar!/:5.3.12]\r\n\tat org.springframework.boot.web.reactive.context.ReactiveWebServerApplicationContext.refresh(ReactiveWebServerApplicationContext.java:64) ~[spring-boot-2.5.6.jar!/:2.5.6]\r\n\tat org.springframework.boot.SpringApplication.refresh(SpringApplication.java:754) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\tat org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:434) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:338) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:1343) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:1332) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\tat com.provectus.kafka.ui.KafkaUiApplication.main(KafkaUiApplication.java:14) [classes!/:?]\r\n\tat jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]\r\n\tat jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]\r\n\tat jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]\r\n\tat java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]\r\n\tat org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:49) [kafka-ui-api.jar:?]\r\n\tat org.springframework.boot.loader.Launcher.launch(Launcher.java:108) [kafka-ui-api.jar:?]\r\n\tat org.springframework.boot.loader.Launcher.launch(Launcher.java:58) [kafka-ui-api.jar:?]\r\n\tat org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:88) [kafka-ui-api.jar:?]\r\nCaused by: org.apache.kafka.common.errors.TimeoutException: Call(callName=listNodes, deadlineMs=1638075933368, tries=1, nextAllowedTryMs=1638075933469) timed out at 1638075933369 after 1 attempt(s)\r\n\tSuppressed: java.lang.Exception: #block terminated with an error\r\n\t\tat reactor.core.publisher.BlockingSingleSubscriber.blockingGet(BlockingSingleSubscriber.java:99) ~[reactor-core-3.4.11.jar!/:3.4.11]\r\n\t\tat reactor.core.publisher.Mono.block(Mono.java:1706) ~[reactor-core-3.4.11.jar!/:3.4.11]\r\n\t\tat com.provectus.kafka.ui.service.ClustersMetricsScheduler.updateMetrics(ClustersMetricsScheduler.java:35) ~[classes!/:?]\r\n\t\tat jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]\r\n\t\tat jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]\r\n\t\tat jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]\r\n\t\tat java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]\r\n\t\tat org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:389) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:333) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor.postProcessBeforeInitialization(InitDestroyAnnotationBeanPostProcessor.java:157) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.applyBeanPostProcessorsBeforeInitialization(AbstractAutowireCapableBeanFactory.java:440) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1796) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:620) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:335) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:333) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:208) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:944) ~[spring-beans-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:918) ~[spring-context-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:583) ~[spring-context-5.3.12.jar!/:5.3.12]\r\n\t\tat org.springframework.boot.web.reactive.context.ReactiveWebServerApplicationContext.refresh(ReactiveWebServerApplicationContext.java:64) ~[spring-boot-2.5.6.jar!/:2.5.6]\r\n\t\tat org.springframework.boot.SpringApplication.refresh(SpringApplication.java:754) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\t\tat org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:434) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\t\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:338) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\t\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:1343) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\t\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:1332) [spring-boot-2.5.6.jar!/:2.5.6]\r\n\t\tat com.provectus.kafka.ui.KafkaUiApplication.main(KafkaUiApplication.java:14) [classes!/:?]\r\n\t\tat jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]\r\n\t\tat jdk.internal.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) ~[?:?]\r\n\t\tat jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) ~[?:?]\r\n\t\tat java.lang.reflect.Method.invoke(Unknown Source) ~[?:?]\r\n\t\tat org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:49) [kafka-ui-api.jar:?]\r\n\t\tat org.springframework.boot.loader.Launcher.launch(Launcher.java:108) [kafka-ui-api.jar:?]\r\n\t\tat org.springframework.boot.loader.Launcher.launch(Launcher.java:58) [kafka-ui-api.jar:?]\r\n\t\tat org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:88) [kafka-ui-api.jar:?]\r\nCaused by: org.apache.kafka.common.errors.TimeoutException: Timed out waiting for a node assignment. Call: listNodes\r\n```",
"@akamensky \r\nIn current master we do first metrics collection on application startup and fail if we cant handle any of clusters - I [just fixed ](https://github.com/provectus/kafka-ui/pull/1136) logging issue - please refresh master and try again - error message should be more descriptive. \r\n\r\nBTW: this eager initialization can be a problem in some situations when you have a lot of cluster, we will discuss and maybe add option to disable it. ",
"@iliax I think at least it shouldn't crash if any of the clusters are unreachable. Logging that is good. Showing in UI a red dot instead of green (as it was working previously) even better. But definitely shouldn't crash with exception IMO. ",
"@akamensky thank you for feedback, we will rework initialization soon",
"Thanks, looks like with the fix it works as expected now -- the error messages shown in log, and the application is accessible via UI.\r\n\r\nAs for the original issue -- ordering looks good now. Thanks @iliax "
] | [] | "2021-11-26T10:39:48Z" | [
"type/bug",
"scope/backend",
"status/accepted",
"status/confirmed"
] | Cluster order is not respected | **Describe the bug**
Previously we had only 3 clusters defined in Docker service config, with that the order was preserved (from 0 to 2 as per env vars index number). After evaluation we have added remaining 12 clusters (yes, we have total of 15 Kafka clusters). The order now is completely random. **This happens in both nav menu (left side of UI with vertical list) and in dashboard view**.
**Set up**
Docker off of main branch using `master` tag in Docker hub.
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Add 15 or more clusters to config using user-friendly name in `KAFKA_CLUSTERS_0_NAME:`
2. Look at UI
**Expected behavior**
Order is preserved as in Config
**Actual behavior**
Order seems to be random
**Screenshot**
Cannot share because of private information.
**Additional context**
It is definitely not ordered by anything else. I checked, it does not follow index ordering, nor alphabetic ordering, it is just random, but once the "random" order is shown this exact ordering will persist between reloads.
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java
index 0555243bb17..712077d604b 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java
@@ -1,6 +1,5 @@
package com.provectus.kafka.ui.service;
-import java.util.Map;
import javax.annotation.PostConstruct;
import lombok.RequiredArgsConstructor;
import lombok.extern.log4j.Log4j2;
@@ -24,10 +23,9 @@ public class ClustersMetricsScheduler {
initialDelayString = "${kafka.update-metrics-rate-millis:30000}"
)
public void updateMetrics() {
- Flux.fromIterable(clustersStorage.getKafkaClustersMap().entrySet())
+ Flux.fromIterable(clustersStorage.getKafkaClusters())
.parallel()
.runOn(Schedulers.parallel())
- .map(Map.Entry::getValue)
.flatMap(cluster -> {
log.debug("Start getting metrics for kafkaCluster: {}", cluster.getName());
return metricsService.updateCache(cluster)
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java
index b77135f034b..08b71b4d318 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java
@@ -1,33 +1,22 @@
package com.provectus.kafka.ui.service;
+import com.google.common.collect.ImmutableMap;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.model.KafkaCluster;
import java.util.Collection;
-import java.util.Map;
import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import javax.annotation.PostConstruct;
-import lombok.RequiredArgsConstructor;
-import org.mapstruct.factory.Mappers;
import org.springframework.stereotype.Component;
@Component
-@RequiredArgsConstructor
public class ClustersStorage {
- private final Map<String, KafkaCluster> kafkaClusters = new ConcurrentHashMap<>();
+ private final ImmutableMap<String, KafkaCluster> kafkaClusters;
- private final ClustersProperties clusterProperties;
-
- private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
-
- @PostConstruct
- public void init() {
- for (ClustersProperties.Cluster clusterProperties : clusterProperties.getClusters()) {
- KafkaCluster cluster = clusterMapper.toKafkaCluster(clusterProperties);
- kafkaClusters.put(clusterProperties.getName(), cluster);
- }
+ public ClustersStorage(ClustersProperties properties, ClusterMapper mapper) {
+ var builder = ImmutableMap.<String, KafkaCluster>builder();
+ properties.getClusters().forEach(c -> builder.put(c.getName(), mapper.toKafkaCluster(c)));
+ this.kafkaClusters = builder.build();
}
public Collection<KafkaCluster> getKafkaClusters() {
@@ -37,8 +26,4 @@ public Collection<KafkaCluster> getKafkaClusters() {
public Optional<KafkaCluster> getClusterByName(String clusterName) {
return Optional.ofNullable(kafkaClusters.get(clusterName));
}
-
- public Map<String, KafkaCluster> getKafkaClustersMap() {
- return kafkaClusters;
- }
}
| null | val | train | 2021-11-24T18:34:53 | "2021-11-26T08:43:04Z" | akamensky | train |
provectus/kafka-ui/1095_1135 | provectus/kafka-ui | provectus/kafka-ui/1095 | provectus/kafka-ui/1135 | [
"keyword_pr_to_issue",
"timestamp(timedelta=0.0, similarity=0.8758304854922276)"
] | d674808f3c7d4403ab17b8206e9a5218080c468a | 960f0a050d836f43b5e58b21319dca3149315d5c | [
"This one is tricky (https://github.com/OpenAPITools/jackson-databind-nullable/issues/25). Will fix a bit later."
] | [] | "2021-11-29T09:15:46Z" | [
"scope/backend",
"type/security",
"status/accepted"
] | bump jackson-databind from 2.10.2 to 2.13.0 in the kafka-ui-api | Request review and update the bump jackson-databind from 2.10.2 to latest supported version.
Latest versions may be found here:
https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-databind
jackson-databind 2.10.2 is vulnerable to the following CVEs:
https://nvd.nist.gov/vuln/detail/CVE-2020-25649
Vulnerabilities identified using Prisma Twistlock Scans against the kafka-ui-api-0.2.1.jar file conducted on 10 November 2021.
Jar file was extracted and files were located here: \kafka-ui-api-0.2.1\BOOT-INF\lib\
note: This jar also contains 1.3.61 versions for these related files which may also need updating: jackson-annotations, jackson-core, jackson-dataformat-cbor, jackson-datatype-guava, jackson-datatype-jdk8, jackson-datatype-joda,jackson-datatype-jsr310 and jackson-module-parameter-names | [
"pom.xml"
] | [
"pom.xml"
] | [] | diff --git a/pom.xml b/pom.xml
index cb5bf344660..a410b6bff07 100644
--- a/pom.xml
+++ b/pom.xml
@@ -15,7 +15,7 @@
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<spring-boot.version>2.5.6</spring-boot.version>
- <jackson-databind-nullable.version>0.2.1</jackson-databind-nullable.version>
+ <jackson-databind-nullable.version>0.2.2</jackson-databind-nullable.version>
<org.mapstruct.version>1.4.2.Final</org.mapstruct.version>
<org.projectlombok.version>1.18.20</org.projectlombok.version>
<org.projectlombok.e2e-checks.version>1.18.20</org.projectlombok.e2e-checks.version>
| null | train | train | 2021-11-26T12:50:32 | "2021-11-15T21:56:49Z" | rgsurfs | train |
provectus/kafka-ui/1017_1181 | provectus/kafka-ui | provectus/kafka-ui/1017 | provectus/kafka-ui/1181 | [
"connected"
] | f90433c82c00a35b7fb2bf11b7b33f92575e383e | 92edf8036b2653be7be2f4d80f49fae50d07b822 | [
"Hi, thank you for the feedback (and contribution). We'll reflect the changes in the documentation soon.",
"Thanks once again, done.",
"Thanks @Haarolean . I hate to be a PITA, but a few days ago I attempted to deploy the latest build of `master` and this guidance was no longer working for me. Digging deeper, it appears as if the `server.use-forward-headers` option was deprecated in Spring Boot 2.2 (and my guess is that `kafka-ui` has recently updated to a version of Spring Boot that finally has removed the setting).\r\n\r\nThe guidance from that project now is to use `server.forward-headers-strategy=native`, which would be setting `SERVER_FORWARDHEADERSSTRATEGY=native` in this project.\r\n\r\nSee https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-2.2-Release-Notes#deprecations-in-spring-boot-22.",
"@clocklear hey, yeah, we've updated boot some time ago. Thanks for this information, I'll update the docs."
] | [] | "2021-12-07T12:48:31Z" | [
"type/documentation",
"good first issue",
"status/accepted",
"hacktoberfest"
] | Proposal: Add info about `SERVER_USEFORWARDHEADERS` to SSO guide | ### Is your proposal related to a problem?
I recently set up kafka-ui with MSK in ECS on AWS with the intent of using SSO with Auth0 to secure the application. I am terminating SSL on the load balancer; connections between the load balancer and the kafka-ui container are HTTP. Following the SSO guide, I set up only the environment variables that were relevant to SSO:
```
AUTH_ENABLED=true
SECURITY_BASIC_ENABLED=true
SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB
SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i
SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/
```
However, what I observed is that the redirect URI that was formed by the application and passed to Auth0 used the HTTP scheme instead of HTTPS, ostensibly because the connection between the LB and the app is non-TLS.
### Describe the solution you'd like
After digging through a few articles online (and deducing that Spring Security is being used to facilitate authN/Z), I found that there is a Spring setting that must be enabled in my particular configuration that causes Spring to use the `X-Forwarded-For` headers to determine the originating scheme in order to form the correct redirect URI to pass to Auth0. By setting `SERVER_USEFORWARDHEADERS` to `true`, the application now properly handles my SSO authentication with Auth0. I propose that the SSO guide be augmented in such a way to call out the scenario in which SSL is terminated prior to the application (such as on a load balancer), the app _may_ need to be configured to respect the `X-Forwarded-For` headers (which is apparently not the default).
(tl;dr -- the docs for SSO should suggest setting `SERVER_USEFORWARDHEADERS` to `true` when TLS is terminated by a proxy.)
### Describe alternatives you've considered
None.
### Additional context
More info can be found in this SO answer: https://stackoverflow.com/a/40879393 | [
"guides/SSO.md"
] | [
"guides/SSO.md"
] | [] | diff --git a/guides/SSO.md b/guides/SSO.md
index 2cb2e7fc603..2f1e49892cc 100644
--- a/guides/SSO.md
+++ b/guides/SSO.md
@@ -46,3 +46,6 @@ docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_ENABLED=true \
-e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
provectuslabs/kafka-ui:0.1.0
```
+
+#### Step 4 (optional)
+If you're using load balancer/proxy and use HTTP between the proxy and the app, you might wanna set `SERVER_USEFORWARDHEADERS` to true as well, for more info refer to [this issue](https://github.com/provectus/kafka-ui/issues/1017).
| null | train | train | 2021-12-06T14:15:09 | "2021-10-26T13:38:12Z" | clocklear | train |
provectus/kafka-ui/1183_1187 | provectus/kafka-ui | provectus/kafka-ui/1183 | provectus/kafka-ui/1187 | [
"connected"
] | 5df5cfb6ff3cadc070952faf6877626ff4b224ea | c9811e4f60682dceda5b9167be593b972a677360 | [
"Looking at YAML configuration file I've tried to use following in Docker stack envvars:\r\n\r\n```\r\n KAFKA_ADMIN-CLIENT-TIMEOUT: 30000\r\n ZOOKEEPER_CONNECTION-TIMEOUT: 30000\r\n```\r\n\r\nWhich works, but again for some clusters it timeouts (but fewer this time). I don't understand why it needs such a high value, because in fact the connections are not that slow to those clusters, bundled-in Kafka scripts complete their requests in under a second to the most remote cluster (tested from the same host where UI runs).\r\n\r\nIt looks to me something with connection logic is off here.",
"@akamensky please try to incerease `KAFKA_ADMIN-CLIENT-TIMEOUT` to 60000 or more and check behaviour. Note that it is not connection timeout - it is API operation timeout. If it will help it mean that problem is in high latency. Otherwise maybe some connections error can take place (like closed ports or smth). ",
"@iliax yes, already done that and it does work with 60000, but it is still quite high value I feel and perhaps default value is too low.\r\n\r\nBut overall I think this issue is more about documenting the configuration options and perhaps make them uniform (replace `-` with `_` for envvar options). But would be nice to perhaps raise default values as well to something higher as some operations may be slow. In our case it is the network link speed, because this connection goes over \"management\" link, which is slow (to leave capacity for actual data link).",
"@akamensky I increased default timeout to 30s (it was 5s) and added doc for this property"
] | [] | "2021-12-08T10:11:47Z" | [
"type/bug",
"status/pending",
"scope/backend",
"status/accepted"
] | Timeouts are not configurable in envvars | **Describe the bug**
Default timeouts are way too short and there is no documented way to configure them when running from official Docker image. In our system only clusters that are physically nearby the UI are "online", the rest are all "offline" with logs:
```
01:35:06.481 [kafka-admin-client-thread | adminclient-11] ERROR com.provectus.kafka.ui.service.MetricsService - Failed to collect cluster CLUSTER_NAME info
org.apache.kafka.common.errors.TimeoutException: Call(callName=describeConfigs, deadlineMs=1638927306473, tries=12, nextAllowedTryMs=1638927306574) timed out at 1638927306474 after 12 attempt(s)
```
Meantime I can use Kafka own cli tools just fine to connect to those and get all information that I need, so it is just that timeouts are way too short and there is not way to configure them.
**Set up**
* Globally distributed large number of clusters
* Single UI to manage all of them
* Private (MPLS) networking between sites
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Have a cluster that physically on the other side of the planet
2. Run cluster in Docker swarm using official "master" image
2. Add clusters to UI cluster list using env vars
3. ....
4. Observe timeouts on every connect
**Expected behavior**
Works
**Actual behavior**
Timeouts on remote clusters
**Screenshots**
N/A
**Additional context**
N/A
| [
"README.md",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/AdminClientServiceImpl.java",
"kafka-ui-api/src/main/resources/application-local.yml",
"kafka-ui-api/src/main/resources/application-sdp.yml",
"kafka-ui-api/src/main/resources/application.yml"
] | [
"README.md",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/AdminClientServiceImpl.java",
"kafka-ui-api/src/main/resources/application-local.yml",
"kafka-ui-api/src/main/resources/application-sdp.yml",
"kafka-ui-api/src/main/resources/application.yml"
] | [
"kafka-ui-api/src/test/resources/application-test.yml"
] | diff --git a/README.md b/README.md
index dcfa9f652a5..7eb9a4f5859 100644
--- a/README.md
+++ b/README.md
@@ -162,7 +162,11 @@ For example, if you want to use an environment variable to set the `name` parame
|Name |Description
|-----------------------|-------------------------------
-|`SERVER_SERVLET_CONTEXT_PATH` | URI basePath
+|`SERVER_SERVLET_CONTEXT_PATH` | URI basePath
+|`LOGGING_LEVEL_ROOT` | Setting log level (trace, debug, info, warn, error, fatal, off). Default: debug
+|`LOGGING_LEVEL_COM_PROVECTUS` |Setting log level (trace, debug, info, warn, error, fatal, off). Default: debug
+|`SERVER_PORT` |Port for the embedded server. Default: `8080`
+|`KAFKA_ADMIN-CLIENT-TIMEOUT` | Kafka API timeout in ms. Default: `30000`
|`KAFKA_CLUSTERS_0_NAME` | Cluster name
|`KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS` |Address where to connect
|`KAFKA_CLUSTERS_0_ZOOKEEPER` | Zookeeper service address
@@ -177,9 +181,6 @@ For example, if you want to use an environment variable to set the `name` parame
|`KAFKA_CLUSTERS_0_DISABLELOGDIRSCOLLECTION` |Disable collecting segments information. It should be true for confluent cloud. Default: false
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` |Given name for the Kafka Connect cluster
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
-|`LOGGING_LEVEL_ROOT` | Setting log level (trace, debug, info, warn, error, fatal, off). Default: debug
-|`LOGGING_LEVEL_COM_PROVECTUS` |Setting log level (trace, debug, info, warn, error, fatal, off). Default: debug
-|`SERVER_PORT` |Port for the embedded server. Default `8080`
|`KAFKA_CLUSTERS_0_JMXSSL` |Enable SSL for JMX? `true` or `false`. For advanced setup, see `kafka-ui-jmx-secured.yml`
|`KAFKA_CLUSTERS_0_JMXUSERNAME` |Username for JMX authentication
|`KAFKA_CLUSTERS_0_JMXPASSWORD` |Password for JMX authentication
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/AdminClientServiceImpl.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/AdminClientServiceImpl.java
index 88cc259b999..f92ea1e1d5b 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/AdminClientServiceImpl.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/AdminClientServiceImpl.java
@@ -20,7 +20,7 @@
public class AdminClientServiceImpl implements AdminClientService, Closeable {
private final Map<String, ReactiveAdminClient> adminClientCache = new ConcurrentHashMap<>();
@Setter // used in tests
- @Value("${kafka.admin-client-timeout}")
+ @Value("${kafka.admin-client-timeout:30000}")
private int clientTimeout;
@Override
diff --git a/kafka-ui-api/src/main/resources/application-local.yml b/kafka-ui-api/src/main/resources/application-local.yml
index 93a5745c388..54bdb59f5b5 100644
--- a/kafka-ui-api/src/main/resources/application-local.yml
+++ b/kafka-ui-api/src/main/resources/application-local.yml
@@ -27,9 +27,6 @@ kafka:
# protobufMessageNameByTopic:
# input-topic: InputMessage
# output-topic: OutputMessage
- admin-client-timeout: 5000
-zookeeper:
- connection-timeout: 1000
spring:
jmx:
enabled: true
diff --git a/kafka-ui-api/src/main/resources/application-sdp.yml b/kafka-ui-api/src/main/resources/application-sdp.yml
index 2de30ccb7ca..f8d4445fedb 100644
--- a/kafka-ui-api/src/main/resources/application-sdp.yml
+++ b/kafka-ui-api/src/main/resources/application-sdp.yml
@@ -11,6 +11,3 @@ kafka:
# zookeeper: zookeeper1:2181
# bootstrapServers: kafka1:29092
# schemaRegistry: http://schemaregistry1:8085
- admin-client-timeout: 5000
-zookeeper:
- connection-timeout: 1000
diff --git a/kafka-ui-api/src/main/resources/application.yml b/kafka-ui-api/src/main/resources/application.yml
index 2cda55ac2a9..54d4c090117 100644
--- a/kafka-ui-api/src/main/resources/application.yml
+++ b/kafka-ui-api/src/main/resources/application.yml
@@ -1,7 +1,3 @@
-kafka:
- admin-client-timeout: 5000
-zookeeper:
- connection-timeout: 1000
auth:
enabled: false
management:
@@ -13,4 +9,4 @@ management:
endpoints:
web:
exposure:
- include: "info,health"
+ include: "info,health"
\ No newline at end of file
| diff --git a/kafka-ui-api/src/test/resources/application-test.yml b/kafka-ui-api/src/test/resources/application-test.yml
index ec67d666a01..63dada67238 100644
--- a/kafka-ui-api/src/test/resources/application-test.yml
+++ b/kafka-ui-api/src/test/resources/application-test.yml
@@ -6,9 +6,6 @@ kafka:
zookeeper: localhost:2181
schemaRegistry: http://localhost:8081
jmxPort: 9997
- admin-client-timeout: 5000
-zookeeper:
- connection-timeout: 1000
spring:
jmx:
enabled: true
| train | train | 2021-12-08T09:28:28 | "2021-12-08T01:50:26Z" | akamensky | train |
provectus/kafka-ui/1215_1220 | provectus/kafka-ui | provectus/kafka-ui/1215 | provectus/kafka-ui/1220 | [
"keyword_pr_to_issue",
"timestamp(timedelta=0.0, similarity=0.841488973242652)"
] | 452969ddf904e3262170ab316ec1d46ed264d433 | 242f85aa2e2426f6410aa06b6ccae7486a66b5aa | [
"Hello there moritzluedtke! 👋\n\nThank you and congratulations 🎉 for opening your very first issue in this project! 💖\n\nIn case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀",
"Hi, thanks for raising the issue. We'll take a look into this.",
"@moritzluedtke it will take some time for us to update the chart on artifacthub. It will happen presumably near 19th of December.",
"@Haarolean no problem, thanks for fixing this! 👍 "
] | [] | "2021-12-10T10:55:24Z" | [
"type/bug",
"status/accepted",
"scope/infrastructure",
"scope/k8s"
] | Image registry is missing in k8s deployment files | **Describe the bug**
When using custom private registrys the Helm chart doesn't consider the image registry input.
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Add the following (as an example) in the `values.yml` file:
```
image:
registry: YOUR_REGISTRY
repository: kafka-ui
tag: 0.2.1
```
2. Deploy the helm chart to your cluster.
2. The deployment file is missing the registry in the image name.
**Expected behavior**
The image registry should be added to the image name in the k8s `deployment.yml`.
**Fix**
In the template file `deployment.yaml` replace the line for `containers.image` with the following:
```
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
``` | [
"charts/kafka-ui/templates/deployment.yaml"
] | [
"charts/kafka-ui/templates/deployment.yaml"
] | [] | diff --git a/charts/kafka-ui/templates/deployment.yaml b/charts/kafka-ui/templates/deployment.yaml
index b1aae57b6fa..0f995b837d8 100644
--- a/charts/kafka-ui/templates/deployment.yaml
+++ b/charts/kafka-ui/templates/deployment.yaml
@@ -36,7 +36,7 @@ spec:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
envFrom:
{{- if .Values.existingConfigMap }}
| null | train | train | 2021-12-09T15:47:52 | "2021-12-09T14:45:28Z" | moritzluedtke | train |
provectus/kafka-ui/1207_1247 | provectus/kafka-ui | provectus/kafka-ui/1207 | provectus/kafka-ui/1247 | [
"connected",
"timestamp(timedelta=1.0, similarity=0.8626846678582087)"
] | 6e198167dce24b99fee83f744869a0088b40fd7b | f5d421d9f04a13ea01653ff62bb8e0732d3928a0 | [] | [] | "2021-12-14T08:32:02Z" | [
"type/bug",
"good first issue",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Wrong counter value for Connectors | **Describe the bug**
The counter on the page Connectors displays the wrong value (always "1")
**Set up**
(How do you run the app?
Which version of the app are you running? Provide either docker image version or check commit hash at the top left corner. We won't be able to help you without this information.)
http://redesign.internal.kafka-ui.provectus.io/
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Navigate to menu item Kafka Connect
2. Create more than 1 connector if they do not exist yet
3. Observe the counter in the left upper corner of the right panel - Connects 1 (see attached screenshot)
**Expected behavior**
The value of connects counter equals the number of connectors.
**Screenshots**

**Additional context**
(Add any other context about the problem here)
Wrong value for the counter present in both versions - old (https://www.kafka-ui.provectus.io/ui/clusters/local/connectors) and redesign (http://redesign.internal.kafka-ui.provectus.io/ui/clusters/local/connectors) | [
"kafka-ui-react-app/src/components/Connect/List/List.tsx"
] | [
"kafka-ui-react-app/src/components/Connect/List/List.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Connect/List/List.tsx b/kafka-ui-react-app/src/components/Connect/List/List.tsx
index 5fa45a7c84a..65323914778 100644
--- a/kafka-ui-react-app/src/components/Connect/List/List.tsx
+++ b/kafka-ui-react-app/src/components/Connect/List/List.tsx
@@ -24,7 +24,6 @@ export interface ListProps {
const List: React.FC<ListProps> = ({
connectors,
- connects,
areConnectsFetching,
areConnectorsFetching,
fetchConnects,
@@ -55,7 +54,7 @@ const List: React.FC<ListProps> = ({
title="Connects"
fetching={areConnectsFetching}
>
- {connects.length}
+ {connectors.length}
</Indicator>
<div className="column">
| null | train | train | 2021-12-13T13:54:52 | "2021-12-09T10:59:15Z" | agolosen | train |
provectus/kafka-ui/1144_1256 | provectus/kafka-ui | provectus/kafka-ui/1144 | provectus/kafka-ui/1256 | [
"connected"
] | acfc59863cf5730c78bbc220d2e739413f6c578a | 9ee1a28c8b099767e81c584a3ac726ec29c316c1 | [
"Done."
] | [] | "2021-12-15T14:25:45Z" | [
"status/accepted",
"scope/infrastructure",
"type/feature"
] | Implement a workflow to bring up a temp demo external env | Implement a mechanism to bring up a separate public feature testing environment.
Purpose: demostrating that certain feature doesn't work to someone (e.g. an outside collaborator) without creating a VPN account for them.
1) Should be triggered in a PR by the label `status/feature_testing_public`.
2) Should be accessible without VPN but basic auth should be present. | [
".github/workflows/branch-deploy.yml",
".github/workflows/master.yaml"
] | [
".github/workflows/branch-deploy.yml",
".github/workflows/master.yaml"
] | [] | diff --git a/.github/workflows/branch-deploy.yml b/.github/workflows/branch-deploy.yml
index f3a51fc4282..b75bd5cfa50 100644
--- a/.github/workflows/branch-deploy.yml
+++ b/.github/workflows/branch-deploy.yml
@@ -1,6 +1,15 @@
name: DeployFromBranch
on:
workflow_dispatch:
+ inputs:
+ public:
+ required: true
+ description: Will this environment be publicly available?
+ default: "no"
+ kafka-ui-password:
+ required: true
+ description: Password that will be used to login to branch env. If the env is not publicly accessible, this password will not be used.
+ default: ""
pull_request:
types: ['labeled']
jobs:
@@ -83,17 +92,24 @@ jobs:
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Branch:${{ needs.build.outputs.tag }}"
- ./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ secrets.GITHUB_TOKEN }}
+ ./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.inputs.public }} ${{ github.event.inputs.kafka-ui-password }}
git config --global user.email "[email protected]"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
- - name: make comment with deployment link
+
+ - name: make comment with private deployment link
+ if: ${{ github.event.inputs.public == 'no' }}
uses: peter-evans/[email protected]
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
-
-
+ - name: make comment with public deployment link
+ if: ${{ github.event.inputs.public == 'yes' }}
+ uses: peter-evans/[email protected]
+ with:
+ issue-number: ${{ github.event.pull_request.number }}
+ body: |
+ Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io
diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml
index 0a5cc8c0466..0417c5203b1 100644
--- a/.github/workflows/master.yaml
+++ b/.github/workflows/master.yaml
@@ -70,7 +70,7 @@ jobs:
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
- ./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }} ${{ secrets.GITHUB_TOKEN }}
+ ./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
git config --global user.email "[email protected]"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui/*
| null | train | train | 2021-12-15T11:02:47 | "2021-11-30T08:54:01Z" | Haarolean | train |
provectus/kafka-ui/1144_1260 | provectus/kafka-ui | provectus/kafka-ui/1144 | provectus/kafka-ui/1260 | [
"connected"
] | 9ee1a28c8b099767e81c584a3ac726ec29c316c1 | 20e1d28fb4e1d31bf15c569d5200f172f0ff067b | [
"Done."
] | [] | "2021-12-16T08:52:23Z" | [
"status/accepted",
"scope/infrastructure",
"type/feature"
] | Implement a workflow to bring up a temp demo external env | Implement a mechanism to bring up a separate public feature testing environment.
Purpose: demostrating that certain feature doesn't work to someone (e.g. an outside collaborator) without creating a VPN account for them.
1) Should be triggered in a PR by the label `status/feature_testing_public`.
2) Should be accessible without VPN but basic auth should be present. | [
".github/workflows/branch-deploy.yml"
] | [
".github/workflows/branch-deploy.yml"
] | [] | diff --git a/.github/workflows/branch-deploy.yml b/.github/workflows/branch-deploy.yml
index b75bd5cfa50..996ba05da6e 100644
--- a/.github/workflows/branch-deploy.yml
+++ b/.github/workflows/branch-deploy.yml
@@ -1,20 +1,12 @@
name: DeployFromBranch
on:
workflow_dispatch:
- inputs:
- public:
- required: true
- description: Will this environment be publicly available?
- default: "no"
- kafka-ui-password:
- required: true
- description: Password that will be used to login to branch env. If the env is not publicly accessible, this password will not be used.
- default: ""
+
pull_request:
types: ['labeled']
jobs:
build:
- if: ${{ github.event.label.name == 'status/feature_testing' }}
+ if: ${{ github.event.label.name == 'status/feature_testing' || github.event.label.name == 'status/feature_testing_public' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
@@ -92,14 +84,14 @@ jobs:
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Branch:${{ needs.build.outputs.tag }}"
- ./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.inputs.public }} ${{ github.event.inputs.kafka-ui-password }}
+ ./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
git config --global user.email "[email protected]"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
- name: make comment with private deployment link
- if: ${{ github.event.inputs.public == 'no' }}
+ if: ${{ github.event.label.name == 'status/feature_testing' }}
uses: peter-evans/[email protected]
with:
issue-number: ${{ github.event.pull_request.number }}
@@ -107,7 +99,7 @@ jobs:
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
- name: make comment with public deployment link
- if: ${{ github.event.inputs.public == 'yes' }}
+ if: ${{ github.event.label.name == 'status/feature_testing_public' }}
uses: peter-evans/[email protected]
with:
issue-number: ${{ github.event.pull_request.number }}
| null | train | train | 2021-12-15T20:56:49 | "2021-11-30T08:54:01Z" | Haarolean | train |
provectus/kafka-ui/1144_1261 | provectus/kafka-ui | provectus/kafka-ui/1144 | provectus/kafka-ui/1261 | [
"connected"
] | 20e1d28fb4e1d31bf15c569d5200f172f0ff067b | 7804a6eb9b34c67af1af9d0ca7d464634ffca65c | [
"Done."
] | [] | "2021-12-16T09:23:01Z" | [
"status/accepted",
"scope/infrastructure",
"type/feature"
] | Implement a workflow to bring up a temp demo external env | Implement a mechanism to bring up a separate public feature testing environment.
Purpose: demostrating that certain feature doesn't work to someone (e.g. an outside collaborator) without creating a VPN account for them.
1) Should be triggered in a PR by the label `status/feature_testing_public`.
2) Should be accessible without VPN but basic auth should be present. | [
".github/workflows/branch-remove.yml"
] | [
".github/workflows/branch-remove.yml"
] | [] | diff --git a/.github/workflows/branch-remove.yml b/.github/workflows/branch-remove.yml
index cef13859e81..ad6e63aa71e 100644
--- a/.github/workflows/branch-remove.yml
+++ b/.github/workflows/branch-remove.yml
@@ -5,7 +5,7 @@ on:
types: ['unlabeled']
jobs:
remove:
- if: ${{ github.event.label.name == 'status/feature_testing' }}
+ if: ${{ github.event.label.name == 'status/feature_testing' || github.event.label.name == 'status/feature_testing_public' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
| null | val | train | 2021-12-16T09:58:55 | "2021-11-30T08:54:01Z" | Haarolean | train |
provectus/kafka-ui/1250_1270 | provectus/kafka-ui | provectus/kafka-ui/1250 | provectus/kafka-ui/1270 | [
"timestamp(timedelta=0.0, similarity=0.9136179052251852)",
"connected"
] | a170cce4fa875ca1f23ce77e626dcdf74bc81dce | 49285bee9b1ae669e3ab2bd19b967aa344db3a29 | [] | [
"Could we state the minimum required version of SR which supports schemaType here please?",
"Updated exception message with the minimum required version of SR. @Haarolean Could you check it, pls?",
"Nice, thanks"
] | "2021-12-17T15:42:20Z" | [
"type/bug",
"good first issue",
"scope/backend",
"status/accepted",
"status/confirmed"
] | Schema registry back compatibility | **Describe the bug**
There's incompatibility problems with schema registry
**Set up**
docker-compose kafka-ui.yaml
**Steps to Reproduce**
Cluster 1' schema registry (5.2.4): creating a protobuf schema doesn't work.
Schema example:
```
syntax = "proto3";
package com.provectus;
message TestProtoRecord {
string f1 = 1;
int32 f2 = 2;
}
```
Cluster 2' schema registry (5.5.0) works fine.
**Additional context**
```
{code: 4006, message: "Unrecognized field: schemaType", timestamp: 1639472891435,…}
code: 4006
fieldsErrors: null
message: "Unrecognized field: schemaType"
requestId: "51c9accd-15"
stackTrace: "com.provectus.kafka.ui.exception.UnprocessableEntityException: Unrecognized field: schemaType\n\tat com.provectus.kafka.ui.service.SchemaRegistryService.lambda$checkSchemaOnDuplicate$15(SchemaRegistryService.java:234)\n\tSuppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException: \nError has been observed at the following site(s):\n\t*__checkpoint ⇢ 422 from POST http://schemaregistry0:8085/subjects/proto1 [DefaultWebClient]\n\t*__checkpoint ⇢
``` | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeIsNotSupportedException.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeIsNotSupportedException.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeIsNotSupportedException.java
new file mode 100644
index 00000000000..eabaaf97e52
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeIsNotSupportedException.java
@@ -0,0 +1,12 @@
+package com.provectus.kafka.ui.exception;
+
+public class SchemaTypeIsNotSupportedException extends UnprocessableEntityException {
+
+ private static final String REQUIRED_SCHEMA_REGISTRY_VERSION = "5.5.0";
+
+ public SchemaTypeIsNotSupportedException() {
+ super(String.format("Current version of Schema Registry does "
+ + "not support provided schema type,"
+ + " version %s or later is required here.", REQUIRED_SCHEMA_REGISTRY_VERSION));
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
index 52db7d7ee30..16677e4c6c2 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
@@ -5,6 +5,7 @@
import com.provectus.kafka.ui.exception.DuplicateEntityException;
import com.provectus.kafka.ui.exception.SchemaNotFoundException;
+import com.provectus.kafka.ui.exception.SchemaTypeIsNotSupportedException;
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
@@ -51,6 +52,8 @@ public class SchemaRegistryService {
private static final String URL_SUBJECT_BY_VERSION = "/subjects/{schemaName}/versions/{version}";
private static final String LATEST = "latest";
+ private static final String UNRECOGNIZED_FIELD_SCHEMA_TYPE = "Unrecognized field: schemaType";
+
private final ClusterMapper mapper;
private final WebClient webClient;
@@ -195,7 +198,9 @@ private Mono<SubjectIdResponse> submitNewSchema(String subject,
.retrieve()
.onStatus(UNPROCESSABLE_ENTITY::equals,
r -> r.bodyToMono(ErrorResponse.class)
- .flatMap(x -> Mono.error(new UnprocessableEntityException(x.getMessage()))))
+ .flatMap(x -> Mono.error(isUnrecognizedFieldSchemaTypeMessage(x.getMessage())
+ ? new SchemaTypeIsNotSupportedException()
+ : new UnprocessableEntityException(x.getMessage()))))
.bodyToMono(SubjectIdResponse.class);
}
@@ -213,7 +218,9 @@ private Mono<SchemaSubjectDTO> checkSchemaOnDuplicate(String subject,
.onStatus(NOT_FOUND::equals, res -> Mono.empty())
.onStatus(UNPROCESSABLE_ENTITY::equals,
r -> r.bodyToMono(ErrorResponse.class)
- .flatMap(x -> Mono.error(new UnprocessableEntityException(x.getMessage()))))
+ .flatMap(x -> Mono.error(isUnrecognizedFieldSchemaTypeMessage(x.getMessage())
+ ? new SchemaTypeIsNotSupportedException()
+ : new UnprocessableEntityException(x.getMessage()))))
.bodyToMono(SchemaSubjectDTO.class)
.filter(s -> Objects.isNull(s.getId()))
.switchIfEmpty(Mono.error(new DuplicateEntityException("Such schema already exists")));
@@ -321,4 +328,8 @@ private WebClient.RequestBodySpec configuredWebClient(InternalSchemaRegistry sch
.uri(schemaRegistry.getFirstUrl() + uri, params)
.headers(headers -> setBasicAuthIfEnabled(schemaRegistry, headers));
}
+
+ private boolean isUnrecognizedFieldSchemaTypeMessage(String errorMessage) {
+ return errorMessage.contains(UNRECOGNIZED_FIELD_SCHEMA_TYPE);
+ }
}
| null | val | train | 2021-12-17T10:59:58 | "2021-12-14T10:11:08Z" | Haarolean | train |
provectus/kafka-ui/1275_1286 | provectus/kafka-ui | provectus/kafka-ui/1275 | provectus/kafka-ui/1286 | [
"connected"
] | 4c39f1386907469c868b76fa79cb0024064729b3 | e4c1114e423a99430b70baef02880e265041e423 | [
"Hello there fockboi-lgtm! 👋\n\nThank you and congratulations 🎉 for opening your very first issue in this project! 💖\n\nIn case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀",
"Hi, thanks for your concern. We'll get a look into this soon."
] | [] | "2021-12-20T09:25:31Z" | [
"type/security",
"status/accepted",
"scope/infrastructure"
] | [Security] Workflow backend.yml is using vulnerable action Sibz/github-status-action | The workflow backend.yml is referencing action Sibz/github-status-action using references v1. However this reference is missing the commit [650dd1a882a76dbbbc4576fb5974b8d22f29847f](https://github.com/Sibz/github-status-action/commits/650dd1a882a76dbbbc4576fb5974b8d22f29847f) which may contain fix to the some vulnerability.
The vulnerability fix that is missing by actions version could be related to:
(1) CVE fix
(2) upgrade of vulnerable dependency
(3) fix to secret leak and others.
Please consider to update the reference to the action. | [
".github/workflows/e2e-checks.yaml"
] | [
".github/workflows/e2e-checks.yaml"
] | [] | diff --git a/.github/workflows/e2e-checks.yaml b/.github/workflows/e2e-checks.yaml
index 11072bfca8d..b6c44e72e1b 100644
--- a/.github/workflows/e2e-checks.yaml
+++ b/.github/workflows/e2e-checks.yaml
@@ -64,7 +64,7 @@ jobs:
destination_dir: ./allure
- name: Post the link to allure report
if: always()
- uses: Sibz/github-status-action@v1
+ uses: Sibz/[email protected]
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: 'Test report'
@@ -73,4 +73,4 @@ jobs:
target_url: https://${{ github.repository_owner }}.github.io/kafka-ui/allure/allure-results/${{ github.run_number }}
- name: Dump docker logs on failure
if: failure()
- uses: jwalton/[email protected]
\ No newline at end of file
+ uses: jwalton/[email protected]
| null | test | train | 2021-12-19T14:49:58 | "2021-12-20T00:27:08Z" | fockboi-lgtm | train |
provectus/kafka-ui/1210_1307 | provectus/kafka-ui | provectus/kafka-ui/1210 | provectus/kafka-ui/1307 | [
"keyword_pr_to_issue"
] | 317d956c3c843b4cbd3d266b329a0cf417a083c7 | 6351361c38b5ca9b546fe24691900d270411446d | [] | [] | "2021-12-21T19:27:45Z" | [
"type/bug",
"good first issue",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Confusing process of updating Global Compatibility Level | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
When selected and submitted, the Global Compatibility Level for Schema Registry doesn't update automatically. You need to update the page to see the changes.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
The Global Compatibility Level changes automatically once a new option from the drop down menu is selected and submitted; no updates of the page, going back and forth between tabs is needed.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
--
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
When Global Compatibility Level is selected and submitted, there is a strange visual bug -- as if a pop-up tries to open but fails.
| [
"kafka-ui-react-app/package-lock.json",
"kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx",
"kafka-ui-react-app/src/redux/actions/__test__/thunks/schemas.spec.ts",
"kafka-ui-react-app/src/redux/actions/thunks/schemas.ts"
] | [
"kafka-ui-react-app/package-lock.json",
"kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx",
"kafka-ui-react-app/src/redux/actions/__test__/thunks/schemas.spec.ts",
"kafka-ui-react-app/src/redux/actions/thunks/schemas.ts"
] | [] | diff --git a/kafka-ui-react-app/package-lock.json b/kafka-ui-react-app/package-lock.json
index ff880549b08..9b45d9e98e0 100644
--- a/kafka-ui-react-app/package-lock.json
+++ b/kafka-ui-react-app/package-lock.json
@@ -3159,7 +3159,6 @@
"version": "5.1.18",
"resolved": "https://registry.npmjs.org/@types/styled-components/-/styled-components-5.1.18.tgz",
"integrity": "sha512-xPTYmWP7Mxk5TAD3pYsqjwA9G5fAI8e/S51QUJEl7EQD1siKCdiYXIWiH2lzoHRl+QqbQCJMcGv3YTF3OmyPdQ==",
- "dev": true,
"requires": {
"@types/hoist-non-react-statics": "*",
"@types/react": "*",
diff --git a/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx b/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx
index 3a667b6d590..000e01fe84c 100644
--- a/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx
@@ -1,5 +1,4 @@
import ConfirmationModal from 'components/common/ConfirmationModal/ConfirmationModal';
-import PageLoader from 'components/common/PageLoader/PageLoader';
import Select from 'components/common/Select/Select';
import { CompatibilityLevelCompatibilityEnum } from 'generated-sources';
import React from 'react';
@@ -48,6 +47,7 @@ const GlobalSchemaSelector: React.FC<GlobalSchemaSelectorProps> = ({
selectSize="M"
defaultValue={globalSchemaCompatibilityLevel}
onChange={() => setUpdateCompatibilityConfirmationVisible(true)}
+ disabled={methods.formState.isSubmitting}
>
{Object.keys(CompatibilityLevelCompatibilityEnum).map(
(level: string) => (
@@ -61,13 +61,10 @@ const GlobalSchemaSelector: React.FC<GlobalSchemaSelectorProps> = ({
isOpen={isUpdateCompatibilityConfirmationVisible}
onCancel={() => setUpdateCompatibilityConfirmationVisible(false)}
onConfirm={methods.handleSubmit(onCompatibilityLevelUpdate)}
+ isConfirming={methods.formState.isSubmitting}
>
- {methods.formState.isSubmitting ? (
- <PageLoader />
- ) : (
- `Are you sure you want to update the global compatibility level?
- This may affect the compatibility levels of the schemas.`
- )}
+ Are you sure you want to update the global compatibility level? This
+ may affect the compatibility levels of the schemas.
</ConfirmationModal>
</GlobalSchemaSelectorWrapper>
</FormProvider>
diff --git a/kafka-ui-react-app/src/redux/actions/__test__/thunks/schemas.spec.ts b/kafka-ui-react-app/src/redux/actions/__test__/thunks/schemas.spec.ts
index ff9bb73742c..b8eb4ac538b 100644
--- a/kafka-ui-react-app/src/redux/actions/__test__/thunks/schemas.spec.ts
+++ b/kafka-ui-react-app/src/redux/actions/__test__/thunks/schemas.spec.ts
@@ -227,6 +227,8 @@ describe('Thunks', () => {
`/api/clusters/${clusterName}/schemas/compatibility`,
200
);
+ fetchMock.getOnce(`/api/clusters/${clusterName}/schemas`, 200);
+
await store.dispatch(
thunks.updateGlobalSchemaCompatibilityLevel(
clusterName,
@@ -235,6 +237,7 @@ describe('Thunks', () => {
);
expect(store.getActions()).toEqual([
actions.updateGlobalSchemaCompatibilityLevelAction.request(),
+ actions.fetchSchemasByClusterNameAction.request(),
actions.updateGlobalSchemaCompatibilityLevelAction.success(
CompatibilityLevelCompatibilityEnum.FORWARD
),
diff --git a/kafka-ui-react-app/src/redux/actions/thunks/schemas.ts b/kafka-ui-react-app/src/redux/actions/thunks/schemas.ts
index 03f1c20960a..5cfb0aab64b 100644
--- a/kafka-ui-react-app/src/redux/actions/thunks/schemas.ts
+++ b/kafka-ui-react-app/src/redux/actions/thunks/schemas.ts
@@ -78,6 +78,7 @@ export const updateGlobalSchemaCompatibilityLevel =
clusterName,
compatibilityLevel: { compatibility: compatibilityLevel },
});
+ dispatch(fetchSchemasByClusterName(clusterName));
dispatch(
actions.updateGlobalSchemaCompatibilityLevelAction.success(
compatibilityLevel
| null | train | train | 2021-12-21T10:12:39 | "2021-12-09T12:42:22Z" | Khakha-A | train |
provectus/kafka-ui/1225_1312 | provectus/kafka-ui | provectus/kafka-ui/1225 | provectus/kafka-ui/1312 | [
"timestamp(timedelta=0.0, similarity=0.951033727843653)",
"connected"
] | 6351361c38b5ca9b546fe24691900d270411446d | 25d987eb6239d555d80d19f6130552b01dc88a1d | [] | [
"We should use `isValid` instead of checking `Object.values(errors).length > 0`\r\nhttps://react-hook-form.com/api/useform/formstate"
] | "2021-12-22T11:53:39Z" | [
"type/enhancement",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Disable negative values | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
(Write your answer here.)
Negative value offset is available when trying to Reset offsets by Reset type 'Offset' on the page Consumer Groups/[Consumer group ID]/Reset Offsets
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
(Describe your proposed solution here.)
Disable the selection of any negative values in Offsets for Reset type 'Offset' in Reset offsets for Consumer group.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
(Write your answer here.)
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->

(Write your answer here.)
| [
"kafka-ui-react-app/src/components/ConsumerGroups/Details/ResetOffsets/ResetOffsets.tsx"
] | [
"kafka-ui-react-app/src/components/ConsumerGroups/Details/ResetOffsets/ResetOffsets.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/ConsumerGroups/Details/ResetOffsets/ResetOffsets.tsx b/kafka-ui-react-app/src/components/ConsumerGroups/Details/ResetOffsets/ResetOffsets.tsx
index 8319fac8c4c..913641ad0ce 100644
--- a/kafka-ui-react-app/src/components/ConsumerGroups/Details/ResetOffsets/ResetOffsets.tsx
+++ b/kafka-ui-react-app/src/components/ConsumerGroups/Details/ResetOffsets/ResetOffsets.tsx
@@ -67,6 +67,7 @@ const ResetOffsets: React.FC = () => {
);
const methods = useForm<FormType>({
+ mode: 'onChange',
defaultValues: {
resetType: ConsumerGroupOffsetsResetType.EARLIEST,
topic: '',
@@ -80,7 +81,7 @@ const ResetOffsets: React.FC = () => {
control,
setError,
clearErrors,
- formState: { errors },
+ formState: { errors, isValid },
} = methods;
const { fields } = useFieldArray({
control,
@@ -127,7 +128,7 @@ const ResetOffsets: React.FC = () => {
partition: number;
}[],
};
- let isValid = true;
+ let isValidAugmentedData = true;
if (augmentedData.resetType === ConsumerGroupOffsetsResetType.OFFSET) {
augmentedData.partitionsOffsets.forEach((offset, index) => {
if (!offset.offset) {
@@ -135,7 +136,7 @@ const ResetOffsets: React.FC = () => {
type: 'manual',
message: "This field shouldn't be empty!",
});
- isValid = false;
+ isValidAugmentedData = false;
}
});
} else if (
@@ -146,10 +147,10 @@ const ResetOffsets: React.FC = () => {
type: 'manual',
message: "This field shouldn't be empty!",
});
- isValid = false;
+ isValidAugmentedData = false;
}
}
- if (isValid) {
+ if (isValidAugmentedData) {
dispatch(
resetConsumerGroupOffsets({
clusterName,
@@ -258,7 +259,13 @@ const ResetOffsets: React.FC = () => {
id={`partitionsOffsets.${index}.offset`}
type="number"
name={`partitionsOffsets.${index}.offset` as const}
- hookFormOptions={{ shouldUnregister: true }}
+ hookFormOptions={{
+ shouldUnregister: true,
+ min: {
+ value: 0,
+ message: 'must be greater than or equal to 0',
+ },
+ }}
defaultValue={field.offset}
/>
<ErrorMessage
@@ -277,7 +284,7 @@ const ResetOffsets: React.FC = () => {
buttonSize="M"
buttonType="primary"
type="submit"
- disabled={selectedPartitions.length === 0}
+ disabled={!isValid || selectedPartitions.length === 0}
>
Submit
</Button>
| null | train | train | 2021-12-21T20:51:18 | "2021-12-10T11:28:38Z" | agolosen | train |
provectus/kafka-ui/1219_1317 | provectus/kafka-ui | provectus/kafka-ui/1219 | provectus/kafka-ui/1317 | [
"keyword_pr_to_issue",
"connected"
] | 25d987eb6239d555d80d19f6130552b01dc88a1d | f2709ea5c4a12b15aac154a0cdaf049e8d21b17e | [] | [] | "2021-12-24T10:42:58Z" | [
"type/enhancement",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Disable 0 and negative values in Topic Creation | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
When creating a new topic, you can select 0, -1, etc. values in Replication Factor and Min In Sync Replicas, though these values are not allowed by default -- pop-um messages instruct the user about that.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Disable the selection of 0 and other negative values in Replication Factor and Min In Sync Replicas.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
(Write your answer here.)
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.)
| [
"kafka-ui-react-app/src/components/Topics/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx",
"kafka-ui-react-app/src/lib/yupExtended.ts"
] | [
"kafka-ui-react-app/src/components/Topics/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx",
"kafka-ui-react-app/src/lib/yupExtended.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/New/New.tsx b/kafka-ui-react-app/src/components/Topics/New/New.tsx
index 5fb9fbac743..de4075912c2 100644
--- a/kafka-ui-react-app/src/components/Topics/New/New.tsx
+++ b/kafka-ui-react-app/src/components/Topics/New/New.tsx
@@ -21,6 +21,7 @@ interface RouterParams {
const New: React.FC = () => {
const methods = useForm<TopicFormData>({
+ mode: 'onTouched',
resolver: yupResolver(topicFormValidationSchema),
});
const { clusterName } = useParams<RouterParams>();
diff --git a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
index 52f96e9c67f..a81f213a246 100644
--- a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
+++ b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
@@ -70,6 +70,7 @@ const TopicForm: React.FC<Props> = ({
<Input
type="number"
placeholder="Number of partitions"
+ min="1"
defaultValue="1"
name="partitions"
inputSize="M"
@@ -89,6 +90,7 @@ const TopicForm: React.FC<Props> = ({
<Input
type="number"
placeholder="Replication Factor"
+ min="1"
defaultValue="1"
name="replicationFactor"
inputSize="M"
@@ -104,6 +106,7 @@ const TopicForm: React.FC<Props> = ({
<Input
type="number"
placeholder="Min In Sync Replicas"
+ min="1"
defaultValue="1"
name="minInsyncReplicas"
inputSize="M"
@@ -149,6 +152,7 @@ const TopicForm: React.FC<Props> = ({
<InputLabel>Maximum message size in bytes *</InputLabel>
<Input
type="number"
+ min="1"
defaultValue="1000012"
name="maxMessageBytes"
inputSize="M"
diff --git a/kafka-ui-react-app/src/lib/yupExtended.ts b/kafka-ui-react-app/src/lib/yupExtended.ts
index 77382b6bef0..fab2a290018 100644
--- a/kafka-ui-react-app/src/lib/yupExtended.ts
+++ b/kafka-ui-react-app/src/lib/yupExtended.ts
@@ -50,13 +50,13 @@ export const topicFormValidationSchema = yup.object().shape({
TOPIC_NAME_VALIDATION_PATTERN,
'Only alphanumeric, _, -, and . allowed'
),
- partitions: yup.number().required(),
- replicationFactor: yup.number().required(),
- minInsyncReplicas: yup.number().required(),
+ partitions: yup.number().min(1).required(),
+ replicationFactor: yup.number().min(1).required(),
+ minInsyncReplicas: yup.number().min(1).required(),
cleanupPolicy: yup.string().required(),
retentionMs: yup.number().min(-1, 'Must be greater than or equal to -1'),
retentionBytes: yup.number(),
- maxMessageBytes: yup.number().required(),
+ maxMessageBytes: yup.number().min(1).required(),
customParams: yup.array().of(
yup.object().shape({
name: yup.string().required(),
| null | val | train | 2021-12-23T20:01:22 | "2021-12-10T10:16:16Z" | Khakha-A | train |
provectus/kafka-ui/1229_1324 | provectus/kafka-ui | provectus/kafka-ui/1229 | provectus/kafka-ui/1324 | [
"keyword_issue_to_pr",
"timestamp(timedelta=82242.0, similarity=0.8536223079047901)"
] | 0b112003a5999d8c0390514d9551c4b1f08d00ed | b5aa86cf4c4ac5a638a9b60d9fec3108f8864b2a | [
"Fixed within #1324"
] | [] | "2021-12-27T07:47:07Z" | [
"type/enhancement",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Pop up for Clear Messages (not clear/remove at once) | ### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
When accessing the Messages tab in a specific topic, you have the option to clear messages

When clicked, all messages are removed at once - no warning pop up is displayed; you cannot restore deleted messages.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Please add a warning pop up so that users don't accidentally delete all messages, and have no ability to restore them.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
--
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
--
| [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/Details.spec.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap"
] | [
"kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/Details.spec.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap"
] | [] | diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx
index 7328f8a532a..cfa97253125 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Details.tsx
@@ -59,6 +59,8 @@ const Details: React.FC<Props> = ({
React.useContext(ClusterContext);
const [isDeleteTopicConfirmationVisible, setDeleteTopicConfirmationVisible] =
React.useState(false);
+ const [isClearTopicConfirmationVisible, setClearTopicConfirmationVisible] =
+ React.useState(false);
const deleteTopicHandler = React.useCallback(() => {
deleteTopic(clusterName, topicName);
}, [clusterName, topicName]);
@@ -72,6 +74,7 @@ const Details: React.FC<Props> = ({
const clearTopicMessagesHandler = React.useCallback(() => {
clearTopicMessages(clusterName, topicName);
+ setClearTopicConfirmationVisible(false);
}, [clusterName, topicName]);
return (
@@ -103,7 +106,7 @@ const Details: React.FC<Props> = ({
</DropdownItem>
<DropdownItem
style={{ color: Colors.red[50] }}
- onClick={clearTopicMessagesHandler}
+ onClick={() => setClearTopicConfirmationVisible(true)}
>
Clear messages
</DropdownItem>
@@ -127,6 +130,13 @@ const Details: React.FC<Props> = ({
>
Are you sure want to remove <b>{topicName}</b> topic?
</ConfirmationModal>
+ <ConfirmationModal
+ isOpen={isClearTopicConfirmationVisible}
+ onCancel={() => setClearTopicConfirmationVisible(false)}
+ onConfirm={clearTopicMessagesHandler}
+ >
+ Are you sure want to clear topic messages?
+ </ConfirmationModal>
<Navbar role="navigation">
<NavLink
exact
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/Details.spec.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/Details.spec.tsx
index 608405fbce7..50eb93990e0 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/Details.spec.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/Details.spec.tsx
@@ -1,12 +1,16 @@
import React from 'react';
import { mount } from 'enzyme';
import { StaticRouter } from 'react-router-dom';
+import { screen } from '@testing-library/react';
+import userEvent from '@testing-library/user-event';
import ClusterContext from 'components/contexts/ClusterContext';
import Details from 'components/Topics/Topic/Details/Details';
import { internalTopicPayload } from 'redux/reducers/topics/__test__/fixtures';
import { Provider } from 'react-redux';
import { store } from 'redux/store';
import { ThemeProvider } from 'styled-components';
+import { render } from 'lib/testHelpers';
+import { clusterTopicPath } from 'lib/paths';
import theme from 'theme/theme';
describe('Details', () => {
@@ -15,6 +19,30 @@ describe('Details', () => {
const mockClearTopicMessages = jest.fn();
const mockInternalTopicPayload = internalTopicPayload.internal;
+ const setupComponent = (pathname: string) =>
+ render(
+ <StaticRouter location={{ pathname }}>
+ <ClusterContext.Provider
+ value={{
+ isReadOnly: false,
+ hasKafkaConnectConfigured: true,
+ hasSchemaRegistryConfigured: true,
+ isTopicDeletionAllowed: true,
+ }}
+ >
+ <Details
+ clusterName={mockClusterName}
+ topicName={internalTopicPayload.name}
+ name={internalTopicPayload.name}
+ isInternal={false}
+ deleteTopic={mockDelete}
+ clearTopicMessages={mockClearTopicMessages}
+ isDeleted={false}
+ />
+ </ClusterContext.Provider>
+ </StaticRouter>
+ );
+
describe('when it has readonly flag', () => {
it('does not render the Action button a Topic', () => {
const component = mount(
@@ -48,4 +76,17 @@ describe('Details', () => {
expect(component).toMatchSnapshot();
});
});
+
+ it('shows a confirmation popup on deleting topic messages', () => {
+ setupComponent(
+ clusterTopicPath(mockClusterName, internalTopicPayload.name)
+ );
+ const { getByText } = screen;
+ const clearMessagesButton = getByText(/Clear messages/i);
+ userEvent.click(clearMessagesButton);
+
+ expect(
+ getByText(/Are you sure want to clear topic messages?/i)
+ ).toBeInTheDocument();
+ });
});
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap
index 56295bf19ed..264390262f9 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap
@@ -326,6 +326,11 @@ exports[`Details when it has readonly flag does not render the Action button a T
onCancel={[Function]}
onConfirm={[Function]}
/>
+ <ConfirmationModal
+ isOpen={false}
+ onCancel={[Function]}
+ onConfirm={[Function]}
+ />
<styled.nav
role="navigation"
>
| null | val | train | 2021-12-25T10:01:47 | "2021-12-10T13:52:23Z" | Khakha-A | train |
provectus/kafka-ui/1268_1336 | provectus/kafka-ui | provectus/kafka-ui/1268 | provectus/kafka-ui/1336 | [
"connected"
] | 0326cf7c46a7e2da128367fdab1099aa8e0de338 | bdf01e4e699254fabde760877210683b8ff57bc7 | [] | [
"@NelyDavtyan this change should not occur. Please regen your types locally first `npm run gen:sources`\r\nping: @Haarolean",
"@workshur, thanks.",
"pls cover your code with tests",
"there is no snapshot as I see",
"Actually we need to add specs for this particular line of [code](https://sonarcloud.io/component_measures?id=provectus_kafka-ui_frontend&metric=new_coverage&pullRequest=1336&selected=provectus_kafka-ui_frontend%3Asrc%2Fcomponents%2FTopics%2FTopic%2FDetails%2FOverview%2FOverview.tsx&view=list) ",
"To my mind 82-83 rows are unnecessary, because on row 84, you check the same thing",
"Maybe we should move this constants to fixtures. And also create an object rather then many strings, type this object with some interface(or use one from Overview component props) and pass as props into renderComponent",
"As Oleg mentioned in my PR, that we should not use explicitly 'Colors', but import them as theme, I think that here we should also use theme like this:\r\nhttps://github.com/provectus/kafka-ui/blob/master/kafka-ui-react-app/src/components/common/Pagination/__tests__/Pagination.spec.tsx#L83",
"Oops, you're right, thanks. 😊",
"Yes, you are right. I will delete.",
"I've fixed it.",
" Do we really need shallow from enzyme here? Or we can rewrite this test with renderComponent from testing library",
"Yeah It looks like easy win",
"Decided to rewrite it in another issue"
] | "2021-12-29T08:56:13Z" | [
"type/bug",
"scope/frontend",
"status/accepted"
] | Topic stats look strange | <img width="307" alt="Screenshot 2021-12-17 at 13 39 40" src="https://user-images.githubusercontent.com/1096273/146532288-a54d7e14-8616-4063-9388-4339ec09fae2.png">
1. URP = 0 is good, so it should green/black if 0 and red if greater than 0
2. In sync replicas marked with dot, what does it mean? 1 of 1 looks correct for me. | [
"kafka-ui-react-app/src/components/Connect/Details/Overview/__tests__/__snapshots__/Overview.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/__test__/Overview.spec.tsx",
"kafka-ui-react-app/src/components/common/Metrics/Indicator.tsx",
"kafka-ui-react-app/src/components/common/Metrics/Metrics.styled.tsx",
"kafka-ui-react-app/src/components/common/Metrics/__tests__/Indicator.spec.tsx"
] | [
"kafka-ui-react-app/src/components/Connect/Details/Overview/__tests__/__snapshots__/Overview.spec.tsx.snap",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/__test__/Overview.spec.tsx",
"kafka-ui-react-app/src/components/common/Metrics/Indicator.tsx",
"kafka-ui-react-app/src/components/common/Metrics/Metrics.styled.tsx",
"kafka-ui-react-app/src/components/common/Metrics/__tests__/Indicator.spec.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Connect/Details/Overview/__tests__/__snapshots__/Overview.spec.tsx.snap b/kafka-ui-react-app/src/components/Connect/Details/Overview/__tests__/__snapshots__/Overview.spec.tsx.snap
index ebe3e8573a9..d704ed4b835 100644
--- a/kafka-ui-react-app/src/components/Connect/Details/Overview/__tests__/__snapshots__/Overview.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Connect/Details/Overview/__tests__/__snapshots__/Overview.spec.tsx.snap
@@ -211,6 +211,7 @@ exports[`Overview view matches snapshot 1`] = `
<svg
className="c6"
+ role="svg"
viewBox="0 0 4 4"
xmlns="http://www.w3.org/2000/svg"
>
@@ -219,6 +220,7 @@ exports[`Overview view matches snapshot 1`] = `
cx={2}
cy={2}
r={2}
+ role="circle"
/>
</svg>
</div>
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
index abce1702b59..81bcc3456eb 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/Overview.tsx
@@ -11,7 +11,7 @@ import VerticalElipsisIcon from 'components/common/Icons/VerticalElipsisIcon';
import * as Metrics from 'components/common/Metrics';
import TagStyled from 'components/common/Tag/Tag.styled';
-interface Props extends Topic, TopicDetails {
+export interface Props extends Topic, TopicDetails {
clusterName: ClusterName;
topicName: TopicName;
clearTopicMessages(
@@ -52,6 +52,7 @@ const Overview: React.FC<Props> = ({
label="URP"
title="Under replicated partitions"
isAlert
+ alertType={underReplicatedPartitions === 0 ? 'error' : 'success'}
>
<Metrics.RedText>{underReplicatedPartitions}</Metrics.RedText>
</Metrics.Indicator>
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/__test__/Overview.spec.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/__test__/Overview.spec.tsx
index 3bd9fd920a2..f0b8f58303b 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/__test__/Overview.spec.tsx
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Overview/__test__/Overview.spec.tsx
@@ -1,9 +1,11 @@
import React from 'react';
import { shallow } from 'enzyme';
+import { screen } from '@testing-library/react';
+import { render } from 'lib/testHelpers';
import Overview from 'components/Topics/Topic/Details/Overview/Overview';
+import theme from 'theme/theme';
describe('Overview', () => {
- const mockInternal = false;
const mockClusterName = 'local';
const mockTopicName = 'topic';
const mockClearTopicMessages = jest.fn();
@@ -23,13 +25,32 @@ describe('Overview', () => {
},
];
+ const renderComponent = ({
+ underReplicatedPartitions = 1,
+ inSyncReplicas = 1,
+ replicas = 1,
+ } = {}) =>
+ render(
+ <Overview
+ name={mockTopicName}
+ partitions={mockPartitions}
+ internal={undefined}
+ clusterName={mockClusterName}
+ topicName={mockTopicName}
+ clearTopicMessages={mockClearTopicMessages}
+ underReplicatedPartitions={underReplicatedPartitions}
+ inSyncReplicas={inSyncReplicas}
+ replicas={replicas}
+ />
+ );
+
describe('when it has internal flag', () => {
it('does not render the Action button a Topic', () => {
const component = shallow(
<Overview
name={mockTopicName}
partitions={mockPartitions}
- internal={mockInternal}
+ internal={false}
clusterName={mockClusterName}
topicName={mockTopicName}
clearTopicMessages={mockClearTopicMessages}
@@ -44,7 +65,7 @@ describe('Overview', () => {
<Overview
name={mockTopicName}
partitions={[]}
- internal={mockInternal}
+ internal
clusterName={mockClusterName}
topicName={mockTopicName}
clearTopicMessages={mockClearTopicMessages}
@@ -54,4 +75,27 @@ describe('Overview', () => {
expect(componentEmpty.find('td').text()).toEqual('No Partitions found');
});
});
+
+ describe('should render circular alert', () => {
+ it('should be in document', () => {
+ renderComponent();
+ const circles = screen.getAllByRole('circle');
+ expect(circles.length).toEqual(2);
+ });
+
+ it('should be the appropriate color', () => {
+ renderComponent({
+ underReplicatedPartitions: 0,
+ inSyncReplicas: 1,
+ replicas: 2,
+ });
+ const circles = screen.getAllByRole('circle');
+ expect(circles[0]).toHaveStyle(
+ `fill: ${theme.circularAlert.color.error}`
+ );
+ expect(circles[1]).toHaveStyle(
+ `fill: ${theme.circularAlert.color.error}`
+ );
+ });
+ });
});
diff --git a/kafka-ui-react-app/src/components/common/Metrics/Indicator.tsx b/kafka-ui-react-app/src/components/common/Metrics/Indicator.tsx
index 5b98b645287..3431b67394a 100644
--- a/kafka-ui-react-app/src/components/common/Metrics/Indicator.tsx
+++ b/kafka-ui-react-app/src/components/common/Metrics/Indicator.tsx
@@ -3,7 +3,7 @@ import { AlertType } from 'redux/interfaces';
import * as S from './Metrics.styled';
-interface Props {
+export interface Props {
fetching?: boolean;
isAlert?: boolean;
label: React.ReactNode;
diff --git a/kafka-ui-react-app/src/components/common/Metrics/Metrics.styled.tsx b/kafka-ui-react-app/src/components/common/Metrics/Metrics.styled.tsx
index 8384e12aa3f..6feb0a3da93 100644
--- a/kafka-ui-react-app/src/components/common/Metrics/Metrics.styled.tsx
+++ b/kafka-ui-react-app/src/components/common/Metrics/Metrics.styled.tsx
@@ -78,6 +78,7 @@ export const RedText = styled.span`
`;
export const CircularAlertWrapper = styled.svg.attrs({
+ role: 'svg',
viewBox: '0 0 4 4',
xmlns: 'http://www.w3.org/2000/svg',
})`
@@ -87,7 +88,12 @@ export const CircularAlertWrapper = styled.svg.attrs({
height: 4px;
`;
-export const CircularAlert = styled.circle.attrs({ cx: 2, cy: 2, r: 2 })<{
+export const CircularAlert = styled.circle.attrs({
+ role: 'circle',
+ cx: 2,
+ cy: 2,
+ r: 2,
+})<{
$type: AlertType;
}>(
({ theme, $type }) => css`
diff --git a/kafka-ui-react-app/src/components/common/Metrics/__tests__/Indicator.spec.tsx b/kafka-ui-react-app/src/components/common/Metrics/__tests__/Indicator.spec.tsx
index 10fcaa85675..07bb6f6853d 100644
--- a/kafka-ui-react-app/src/components/common/Metrics/__tests__/Indicator.spec.tsx
+++ b/kafka-ui-react-app/src/components/common/Metrics/__tests__/Indicator.spec.tsx
@@ -2,20 +2,61 @@ import React from 'react';
import { Indicator } from 'components/common/Metrics';
import { screen } from '@testing-library/react';
import { render } from 'lib/testHelpers';
+import { Props } from 'components/common/Metrics/Indicator';
+import theme from 'theme/theme';
const title = 'Test Title';
const label = 'Test Label';
const child = 'Child';
describe('Indicator', () => {
- it('matches the snapshot', () => {
+ const setupComponent = (props: Partial<Props> = {}) =>
render(
- <Indicator title={title} label="Test Label">
+ <Indicator title={props.title} label={props.label} {...props}>
{child}
</Indicator>
);
+
+ it('renders indicator', () => {
+ setupComponent({ title, label });
expect(screen.getByTitle(title)).toBeInTheDocument();
expect(screen.getByText(label)).toBeInTheDocument();
expect(screen.getByText(child)).toBeInTheDocument();
});
+
+ describe('should render circular alert', () => {
+ it('should be in document', () => {
+ setupComponent({ title, label, isAlert: true });
+ expect(screen.getByRole('svg')).toBeInTheDocument();
+ expect(screen.getByRole('circle')).toBeInTheDocument();
+ });
+
+ it('success alert', () => {
+ setupComponent({ title, label, isAlert: true, alertType: 'success' });
+ expect(screen.getByRole('circle')).toHaveStyle(
+ `fill: ${theme.circularAlert.color.success}`
+ );
+ });
+
+ it('error alert', () => {
+ setupComponent({ title, label, isAlert: true, alertType: 'error' });
+ expect(screen.getByRole('circle')).toHaveStyle(
+ `fill: ${theme.circularAlert.color.error}`
+ );
+ });
+
+ it('warning alert', () => {
+ setupComponent({ title, label, isAlert: true, alertType: 'warning' });
+ expect(screen.getByRole('circle')).toHaveStyle(
+ `fill: ${theme.circularAlert.color.warning}`
+ );
+ });
+
+ it('info alert', () => {
+ setupComponent({ title, label, isAlert: true, alertType: 'info' });
+ expect(screen.getByRole('circle')).toHaveStyle(
+ `fill: ${theme.circularAlert.color.info}`
+ );
+ });
+ });
});
| null | train | train | 2022-01-12T10:14:26 | "2021-12-17T10:43:10Z" | germanosin | train |
provectus/kafka-ui/1329_1339 | provectus/kafka-ui | provectus/kafka-ui/1329 | provectus/kafka-ui/1339 | [
"keyword_issue_to_pr"
] | 92555d435cdcad2d18c6a0a392f84b8d2044456f | 108242aeb2aaf9ee6ca89879c827c1f551cf1a72 | [
"Hello there nandacamargo! 👋\n\nThank you and congratulations 🎉 for opening your very first issue in this project! 💖\n\nIn case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀",
"Hi, thanks for reaching out.\r\nDoes the app work itself? Which authentication mechanism (if any) are you using?",
"Hi!\r\n\r\nYes, the app works and an additional information that might help is that besides testing with Kafka-UI version 0.3.0, we currently have the same app working in the cluster running with version 0.2.0. It works using the old liveness/readiness probe:\r\n`path: /index.html`.\r\n\r\nSince there are some changes/fixes in Kafka-UI version 0.3.0, we decided to update our deployment to keep up to date. So initially we tried using the probes with `/index.html` path for version 0.3.0 as well, but they failed, so we tried with different [paths](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/AbstractAuthSecurityConfig.java) mentioned previously, without success.\r\n\r\nRegarding to the authentication mechanism, we are using environment variable `AUTH_TYPE`, with value `LOGIN_FORM`, passing an username and password.\r\n\r\nThanks!",
"@nandacamargo thanks much for the detailed feedback!\r\nThis is to be fixed within #1339 and we'll release a separate build shortly after.",
"Great! Thank you so much! ",
"@Haarolean Hi. I set `SERVER_SERVLET_CONTEXT_PATH: \"/kafka-ui\"` \r\n\r\nMy logs:\r\n\r\n```\r\n2023-01-29 18:22:41,591 INFO [background-preinit] o.h.v.i.u.Version: HV000001: Hibernate Validator 6.2.5.Final\r\n2023-01-29 18:22:41,905 INFO [main] c.p.k.u.KafkaUiApplication: Starting KafkaUiApplication using Java 17.0.5 on kafka-ui-5d458c5d-v2pfq with PID 1 (/kafka-ui-api.jar started by kafkaui in /)\r\n2023-01-29 18:22:41,907 DEBUG [main] c.p.k.u.KafkaUiApplication: Running with Spring Boot v2.7.5, Spring v5.3.23\r\n2023-01-29 18:22:43,028 INFO [main] c.p.k.u.KafkaUiApplication: No active profile set, falling back to 1 default profile: \"default\"\r\n2023-01-29 18:24:12,400 DEBUG [main] c.p.k.u.s.SerdesInitializer: Configuring serdes for cluster shortlink\r\n```\r\n\r\nI get:\r\n\r\n```\r\nReadiness probe failed: Get \"http://10.233.102.248:8080/kafka-ui/actuator/health\"\r\nLiveness probe failed: Get \"http://10.233.102.248:8080/kafka-ui/actuator/health\":\r\n```\r\n\r\nWhat can we do about this problem?\r\n\r\nsource: https://github.com/shortlink-org/shortlink/blob/main/ops/Helm/addons/mq/kafka/values.yaml",
"@batazor hey, please raise a new discussion (or feel free to chat with me on [our discord](https://discord.gg/4DWzD7pGE5)), and provide the info regarding what you see/get when you visit the probe URLs"
] | [] | "2021-12-29T10:11:21Z" | [
"type/bug",
"status/pending",
"status/accepted",
"scope/infrastructure",
"scope/k8s"
] | Liveness and Readiness probes failing | Hello!
First of all, congrats to you all, kafka-ui is such a nice project!
We wrote a Helm Chart to deploy Kafka UI in a Kubernetes cluster, using version 0.3.0.
We are facing the following issue when using liveness and readiness probes in the k8s deployment:
`Readiness probe failed: HTTP probe failed with statuscode: 404`
Even using different paths like `/actuator/health`, `/actuator/info` and `login` that are declared [here](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/AbstractAuthSecurityConfig.java) the probes are failing. Also, we checked the Helm Chart [deployment](https://github.com/provectus/kafka-ui/blob/master/charts/kafka-ui/templates/deployment.yaml#L54-L73) you provided.
**Set up**
- Kafka UI - Version: 0.3.0
- Docker image: provectuslabs/kafka-ui:0.3.0
- The Helm Chart applied in the cluster has the following liveness and readiness probes:
```
ports:
- name: view
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /actuator/health
port: view
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /actuator/health
port: view
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
```
Removing the probes, it is possible to use Kafka UI normally in an integration environment in Kubernetes cluster, but we really need the probes before deploying in production.
Thanks a lot!
| [
"kafka-ui-api/src/main/resources/application.yml"
] | [
"kafka-ui-api/src/main/resources/application.yml"
] | [] | diff --git a/kafka-ui-api/src/main/resources/application.yml b/kafka-ui-api/src/main/resources/application.yml
index a68453bf337..a6a4c8e9716 100644
--- a/kafka-ui-api/src/main/resources/application.yml
+++ b/kafka-ui-api/src/main/resources/application.yml
@@ -1,6 +1,7 @@
auth:
type: DISABLED
- management:
+
+management:
endpoint:
info:
enabled: true
@@ -10,6 +11,9 @@ auth:
web:
exposure:
include: "info,health"
+ health:
+ ldap:
+ enabled: false
logging:
level:
| null | train | train | 2021-12-28T18:01:23 | "2021-12-28T18:50:58Z" | nandacamargo | train |
provectus/kafka-ui/1332_1342 | provectus/kafka-ui | provectus/kafka-ui/1332 | provectus/kafka-ui/1342 | [
"keyword_pr_to_issue"
] | 2f5e484a690567091c44ae4c318703a0c5df2f04 | 24f7e4c19083cf1170699b3fe67335e4a78f9d0e | [
"Hello there gusevalexey0! 👋\n\nThank you and congratulations 🎉 for opening your very first issue in this project! 💖\n\nIn case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀",
"Hey, thanks for reaching out.\r\nSeems like we missed that bug in one of the latest commits before the release.\r\nWe'll fix it today.",
"Actually, after a more deeper look, I couldn't reproduce your issue. Could you please check your logs for other errors preceding this one? The best way to check it up is to restart the app and check all the logs since the startup time.",
"@Haarolean I am also facing the same issue, looks like you can reproduce this by using Amazon MSK. ",
"Full log from the startup\r\n\r\n```java\r\n _ __ __ _ _ _ ___\r\n | |/ /__ _ / _| | ____ _ | | | |_ _|\r\n | ' // _` | |_| |/ / _` |_____| | | || |\r\n | . \\ (_| | _| < (_| |_____| |_| || |\r\n |_|\\_\\__,_|_| |_|\\_\\__,_| \\___/|___|\r\n\r\n\r\n2021-12-29 08:25:34,311 INFO [background-preinit] o.h.v.i.u.Version: HV000001: Hibernate Validator 6.2.0.Final\r\n2021-12-29 08:25:34,346 INFO [main] c.p.k.u.KafkaUiApplication: Starting KafkaUiApplication using Java 13.0.9 on e241faf6965e with PID 1 (/kafka-ui-api.jar started by root in /)\r\n2021-12-29 08:25:34,349 DEBUG [main] c.p.k.u.KafkaUiApplication: Running with Spring Boot v2.5.6, Spring v5.3.12\r\n2021-12-29 08:25:34,350 INFO [main] c.p.k.u.KafkaUiApplication: No active profile set, falling back to default profiles: default\r\n2021-12-29 08:25:36,783 INFO [main] o.s.d.r.c.RepositoryConfigurationDelegate: Bootstrapping Spring Data LDAP repositories in DEFAULT mode.\r\n2021-12-29 08:25:36,853 INFO [main] o.s.d.r.c.RepositoryConfigurationDelegate: Finished Spring Data repository scanning in 57 ms. Found 0 LDAP repository interfaces.\r\n2021-12-29 08:25:38,199 INFO [main] c.p.k.u.s.DeserializationService: Using SchemaRegistryAwareRecordSerDe for cluster 'local'\r\n2021-12-29 08:25:39,110 INFO [main] o.s.b.a.s.r.ReactiveUserDetailsServiceAutoConfiguration: \r\n\r\nUsing generated security password: 44b802f3-9423-48cc-8656-5fca76f472be\r\n\r\n2021-12-29 08:25:39,313 WARN [main] c.p.k.u.c.a.DisabledAuthSecurityConfig: Authentication is disabled. Access will be unrestricted.\r\n2021-12-29 08:25:39,687 INFO [main] o.s.l.c.s.AbstractContextSource: Property 'userDn' not set - anonymous context will be used for read-write operations\r\n2021-12-29 08:25:39,811 INFO [main] o.s.b.a.e.w.EndpointLinksResolver: Exposing 1 endpoint(s) beneath base path '/actuator'\r\n2021-12-29 08:25:40,793 INFO [main] o.s.b.w.e.n.NettyWebServer: Netty started on port 8080\r\n2021-12-29 08:25:40,830 INFO [main] c.p.k.u.KafkaUiApplication: Started KafkaUiApplication in 7.519 seconds (JVM running for 8.596)\r\n2021-12-29 08:25:40,880 DEBUG [parallel-1] c.p.k.u.s.ClustersMetricsScheduler: Start getting metrics for kafkaCluster: local\r\n2021-12-29 08:25:40,908 INFO [parallel-1] o.a.k.c.a.AdminClientConfig: AdminClientConfig values: \r\n\tbootstrap.servers = [kafka:19092]\r\n\tclient.dns.lookup = use_all_dns_ips\r\n\tclient.id = \r\n\tconnections.max.idle.ms = 300000\r\n\tdefault.api.timeout.ms = 60000\r\n\tmetadata.max.age.ms = 300000\r\n\tmetric.reporters = []\r\n\tmetrics.num.samples = 2\r\n\tmetrics.recording.level = INFO\r\n\tmetrics.sample.window.ms = 30000\r\n\treceive.buffer.bytes = 65536\r\n\treconnect.backoff.max.ms = 1000\r\n\treconnect.backoff.ms = 50\r\n\trequest.timeout.ms = 30000\r\n\tretries = 2147483647\r\n\tretry.backoff.ms = 100\r\n\tsasl.client.callback.handler.class = null\r\n\tsasl.jaas.config = null\r\n\tsasl.kerberos.kinit.cmd = /usr/bin/kinit\r\n\tsasl.kerberos.min.time.before.relogin = 60000\r\n\tsasl.kerberos.service.name = null\r\n\tsasl.kerberos.ticket.renew.jitter = 0.05\r\n\tsasl.kerberos.ticket.renew.window.factor = 0.8\r\n\tsasl.login.callback.handler.class = null\r\n\tsasl.login.class = null\r\n\tsasl.login.refresh.buffer.seconds = 300\r\n\tsasl.login.refresh.min.period.seconds = 60\r\n\tsasl.login.refresh.window.factor = 0.8\r\n\tsasl.login.refresh.window.jitter = 0.05\r\n\tsasl.mechanism = GSSAPI\r\n\tsecurity.protocol = PLAINTEXT\r\n\tsecurity.providers = null\r\n\tsend.buffer.bytes = 131072\r\n\tsocket.connection.setup.timeout.max.ms = 30000\r\n\tsocket.connection.setup.timeout.ms = 10000\r\n\tssl.cipher.suites = null\r\n\tssl.enabled.protocols = [TLSv1.2, TLSv1.3]\r\n\tssl.endpoint.identification.algorithm = https\r\n\tssl.engine.factory.class = null\r\n\tssl.key.password = null\r\n\tssl.keymanager.algorithm = SunX509\r\n\tssl.keystore.certificate.chain = null\r\n\tssl.keystore.key = null\r\n\tssl.keystore.location = null\r\n\tssl.keystore.password = null\r\n\tssl.keystore.type = JKS\r\n\tssl.protocol = TLSv1.3\r\n\tssl.provider = null\r\n\tssl.secure.random.implementation = null\r\n\tssl.trustmanager.algorithm = PKIX\r\n\tssl.truststore.certificates = null\r\n\tssl.truststore.location = null\r\n\tssl.truststore.password = null\r\n\tssl.truststore.type = JKS\r\n\r\n2021-12-29 08:25:41,034 INFO [parallel-1] o.a.k.c.u.AppInfoParser: Kafka version: 2.8.0\r\n2021-12-29 08:25:41,034 INFO [parallel-1] o.a.k.c.u.AppInfoParser: Kafka commitId: ebb1d6e21cc92130\r\n2021-12-29 08:25:41,034 INFO [parallel-1] o.a.k.c.u.AppInfoParser: Kafka startTimeMs: 1640766341032\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:zookeeper.version=3.7.0-e3704b390a6697bfdf4b0bef79e3da7a4f6bac4b, built on 2021-03-17 09:46 UTC\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:host.name=e241faf6965e\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:java.version=13.0.9\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:java.vendor=Alpine\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:java.home=/usr/lib/jvm/java-13-openjdk\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:java.class.path=kafka-ui-api.jar\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:java.library.path=/usr/lib/jvm/java-13-openjdk/lib/server:/usr/lib/jvm/java-13-openjdk/lib:/usr/lib/jvm/java-13-openjdk/../lib:/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:java.io.tmpdir=/tmp\r\n2021-12-29 08:25:41,701 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:java.compiler=<NA>\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:os.name=Linux\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:os.arch=amd64\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:os.version=5.10.16.3-microsoft-standard-WSL2\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:user.name=root\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:user.home=/root\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:user.dir=/\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:os.memory.free=61MB\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:os.memory.max=3166MB\r\n2021-12-29 08:25:41,702 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Client environment:os.memory.total=93MB\r\n2021-12-29 08:25:41,708 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ZooKeeper: Initiating client connection, connectString=zookeeper:2181 sessionTimeout=60000 watcher=com.provectus.kafka.ui.service.ZookeeperService$$Lambda$1151/0x0000000801216040@7edd3a6a\r\n2021-12-29 08:25:41,717 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.c.X509Util: Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation\r\n2021-12-29 08:25:41,722 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ClientCnxnSocket: jute.maxbuffer value is 1048575 Bytes\r\n2021-12-29 08:25:41,737 INFO [kafka-admin-client-thread | adminclient-1] o.a.z.ClientCnxn: zookeeper.request.timeout value is 0. feature enabled=false\r\n2021-12-29 08:25:41,738 DEBUG [kafka-admin-client-thread | adminclient-1] c.p.k.u.s.ZookeeperService: Start getting Zookeeper metrics for kafkaCluster: local\r\n2021-12-29 08:25:41,750 INFO [kafka-admin-client-thread | adminclient-1-SendThread(zookeeper:2181)] o.a.z.ClientCnxn: Opening socket connection to server zookeeper/172.19.0.3:2181.\r\n2021-12-29 08:25:41,750 INFO [kafka-admin-client-thread | adminclient-1-SendThread(zookeeper:2181)] o.a.z.ClientCnxn: SASL config status: Will not attempt to authenticate using SASL (unknown error)\r\n2021-12-29 08:25:41,752 INFO [kafka-admin-client-thread | adminclient-1-SendThread(zookeeper:2181)] o.a.z.ClientCnxn: Socket connection established, initiating session, client: /172.19.0.2:44364, server: zookeeper/172.19.0.3:2181\r\n2021-12-29 08:25:41,762 INFO [kafka-admin-client-thread | adminclient-1-SendThread(zookeeper:2181)] o.a.z.ClientCnxn: Session establishment complete on server zookeeper/172.19.0.3:2181, session id = 0x100000b29dc0009, negotiated timeout = 40000\r\n2021-12-29 08:25:41,856 DEBUG [kafka-admin-client-thread | adminclient-1] c.p.k.u.s.ClustersMetricsScheduler: Metrics updated for cluster: local\r\n2021-12-29 08:26:04,583 WARN [parallel-7] c.p.k.u.e.ErrorCode: Multiple class com.provectus.kafka.ui.exception.ErrorCode values refer to code 4001\r\n2021-12-29 08:26:10,827 DEBUG [parallel-4] c.p.k.u.s.ClustersMetricsScheduler: Start getting metrics for kafkaCluster: local\r\n2021-12-29 08:26:10,831 DEBUG [kafka-admin-client-thread | adminclient-1] c.p.k.u.s.ZookeeperService: Start getting Zookeeper metrics for kafkaCluster: local\r\n2021-12-29 08:26:10,862 DEBUG [kafka-admin-client-thread | adminclient-1] c.p.k.u.s.ClustersMetricsScheduler: Metrics updated for cluster: local\r\n2021-12-29 08:26:11,482 WARN [parallel-5] o.h.v.i.p.j.JavaBeanExecutable: HV000254: Missing parameter metadata for SeekTypeDTO(String, int, String), which declares implicit or synthetic parameters. Automatic resolution of generic type information for method parameters may yield incorrect results if multiple parameters have the same erasure. To solve this, compile your code with the '-parameters' flag.\r\n2021-12-29 08:26:11,559 WARN [parallel-5] o.h.v.i.p.j.JavaBeanExecutable: HV000254: Missing parameter metadata for SeekDirectionDTO(String, int, String), which declares implicit or synthetic parameters. Automatic resolution of generic type information for method parameters may yield incorrect results if multiple parameters have the same erasure. To solve this, compile your code with the '-parameters' flag.\r\n2021-12-29 08:26:11,618 INFO [elastic-4] o.a.k.c.c.ConsumerConfig: ConsumerConfig values: \r\n\tallow.auto.create.topics = true\r\n\tauto.commit.interval.ms = 5000\r\n\tauto.offset.reset = earliest\r\n\tbootstrap.servers = [kafka:19092]\r\n\tcheck.crcs = true\r\n\tclient.dns.lookup = use_all_dns_ips\r\n\tclient.id = kafka-ui-4dc7012a-c89f-4804-a8ed-94715418201b\r\n\tclient.rack = \r\n\tconnections.max.idle.ms = 540000\r\n\tdefault.api.timeout.ms = 60000\r\n\tenable.auto.commit = true\r\n\texclude.internal.topics = true\r\n\tfetch.max.bytes = 52428800\r\n\tfetch.max.wait.ms = 500\r\n\tfetch.min.bytes = 1\r\n\tgroup.id = null\r\n\tgroup.instance.id = null\r\n\theartbeat.interval.ms = 3000\r\n\tinterceptor.classes = []\r\n\tinternal.leave.group.on.close = true\r\n\tinternal.throw.on.fetch.stable.offset.unsupported = false\r\n\tisolation.level = read_uncommitted\r\n\tkey.deserializer = class org.apache.kafka.common.serialization.BytesDeserializer\r\n\tmax.partition.fetch.bytes = 1048576\r\n\tmax.poll.interval.ms = 300000\r\n\tmax.poll.records = 500\r\n\tmetadata.max.age.ms = 300000\r\n\tmetric.reporters = []\r\n\tmetrics.num.samples = 2\r\n\tmetrics.recording.level = INFO\r\n\tmetrics.sample.window.ms = 30000\r\n\tpartition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]\r\n\treceive.buffer.bytes = 65536\r\n\treconnect.backoff.max.ms = 1000\r\n\treconnect.backoff.ms = 50\r\n\trequest.timeout.ms = 30000\r\n\tretry.backoff.ms = 100\r\n\tsasl.client.callback.handler.class = null\r\n\tsasl.jaas.config = null\r\n\tsasl.kerberos.kinit.cmd = /usr/bin/kinit\r\n\tsasl.kerberos.min.time.before.relogin = 60000\r\n\tsasl.kerberos.service.name = null\r\n\tsasl.kerberos.ticket.renew.jitter = 0.05\r\n\tsasl.kerberos.ticket.renew.window.factor = 0.8\r\n\tsasl.login.callback.handler.class = null\r\n\tsasl.login.class = null\r\n\tsasl.login.refresh.buffer.seconds = 300\r\n\tsasl.login.refresh.min.period.seconds = 60\r\n\tsasl.login.refresh.window.factor = 0.8\r\n\tsasl.login.refresh.window.jitter = 0.05\r\n\tsasl.mechanism = GSSAPI\r\n\tsecurity.protocol = PLAINTEXT\r\n\tsecurity.providers = null\r\n\tsend.buffer.bytes = 131072\r\n\tsession.timeout.ms = 10000\r\n\tsocket.connection.setup.timeout.max.ms = 30000\r\n\tsocket.connection.setup.timeout.ms = 10000\r\n\tssl.cipher.suites = null\r\n\tssl.enabled.protocols = [TLSv1.2, TLSv1.3]\r\n\tssl.endpoint.identification.algorithm = https\r\n\tssl.engine.factory.class = null\r\n\tssl.key.password = null\r\n\tssl.keymanager.algorithm = SunX509\r\n\tssl.keystore.certificate.chain = null\r\n\tssl.keystore.key = null\r\n\tssl.keystore.location = null\r\n\tssl.keystore.password = null\r\n\tssl.keystore.type = JKS\r\n\tssl.protocol = TLSv1.3\r\n\tssl.provider = null\r\n\tssl.secure.random.implementation = null\r\n\tssl.trustmanager.algorithm = PKIX\r\n\tssl.truststore.certificates = null\r\n\tssl.truststore.location = null\r\n\tssl.truststore.password = null\r\n\tssl.truststore.type = JKS\r\n\tvalue.deserializer = class org.apache.kafka.common.serialization.BytesDeserializer\r\n\r\n2021-12-29 08:26:11,655 INFO [elastic-4] o.a.k.c.u.AppInfoParser: Kafka version: 2.8.0\r\n2021-12-29 08:26:11,655 INFO [elastic-4] o.a.k.c.u.AppInfoParser: Kafka commitId: ebb1d6e21cc92130\r\n2021-12-29 08:26:11,655 INFO [elastic-4] o.a.k.c.u.AppInfoParser: Kafka startTimeMs: 1640766371655\r\n2021-12-29 08:26:11,684 INFO [elastic-4] o.a.k.c.Metadata: [Consumer clientId=kafka-ui-4dc7012a-c89f-4804-a8ed-94715418201b, groupId=null] Cluster ID: NA2c93fwSAu943GjgeiF_Q\r\n2021-12-29 08:26:11,687 INFO [elastic-4] c.p.k.u.u.OffsetsSeek: Positioning consumer for topic test with ConsumerPosition(seekType=OFFSET, seekTo={test-0=0}, seekDirection=FORWARD)\r\n2021-12-29 08:26:11,717 INFO [elastic-4] o.a.k.c.c.KafkaConsumer: [Consumer clientId=kafka-ui-4dc7012a-c89f-4804-a8ed-94715418201b, groupId=null] Unsubscribed all topics or patterns and assigned partitions\r\n2021-12-29 08:26:11,718 INFO [elastic-4] c.p.k.u.u.OffsetsSeek: Assignment: []\r\n2021-12-29 08:26:11,723 INFO [elastic-4] c.p.k.u.e.ForwardRecordEmitter: Polling finished\r\n2021-12-29 08:26:11,724 INFO [elastic-4] o.a.k.c.m.Metrics: Metrics scheduler closed\r\n2021-12-29 08:26:11,724 INFO [elastic-4] o.a.k.c.m.Metrics: Closing reporter org.apache.kafka.common.metrics.JmxReporter\r\n2021-12-29 08:26:11,725 INFO [elastic-4] o.a.k.c.m.Metrics: Metrics reporters closed\r\n2021-12-29 08:26:11,737 INFO [elastic-4] o.a.k.c.u.AppInfoParser: App info kafka.consumer for kafka-ui-4dc7012a-c89f-4804-a8ed-94715418201b unregistered\r\n2021-12-29 08:26:27,553 INFO [elastic-4] o.a.k.c.c.ConsumerConfig: ConsumerConfig values: \r\n\tallow.auto.create.topics = true\r\n\tauto.commit.interval.ms = 5000\r\n\tauto.offset.reset = earliest\r\n\tbootstrap.servers = [kafka:19092]\r\n\tcheck.crcs = true\r\n\tclient.dns.lookup = use_all_dns_ips\r\n\tclient.id = kafka-ui-f301102a-e7cc-4773-8d9b-dd0e51d5e0f7\r\n\tclient.rack = \r\n\tconnections.max.idle.ms = 540000\r\n\tdefault.api.timeout.ms = 60000\r\n\tenable.auto.commit = true\r\n\texclude.internal.topics = true\r\n\tfetch.max.bytes = 52428800\r\n\tfetch.max.wait.ms = 500\r\n\tfetch.min.bytes = 1\r\n\tgroup.id = null\r\n\tgroup.instance.id = null\r\n\theartbeat.interval.ms = 3000\r\n\tinterceptor.classes = []\r\n\tinternal.leave.group.on.close = true\r\n\tinternal.throw.on.fetch.stable.offset.unsupported = false\r\n\tisolation.level = read_uncommitted\r\n\tkey.deserializer = class org.apache.kafka.common.serialization.BytesDeserializer\r\n\tmax.partition.fetch.bytes = 1048576\r\n\tmax.poll.interval.ms = 300000\r\n\tmax.poll.records = 500\r\n\tmetadata.max.age.ms = 300000\r\n\tmetric.reporters = []\r\n\tmetrics.num.samples = 2\r\n\tmetrics.recording.level = INFO\r\n\tmetrics.sample.window.ms = 30000\r\n\tpartition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]\r\n\treceive.buffer.bytes = 65536\r\n\treconnect.backoff.max.ms = 1000\r\n\treconnect.backoff.ms = 50\r\n\trequest.timeout.ms = 30000\r\n\tretry.backoff.ms = 100\r\n\tsasl.client.callback.handler.class = null\r\n\tsasl.jaas.config = null\r\n\tsasl.kerberos.kinit.cmd = /usr/bin/kinit\r\n\tsasl.kerberos.min.time.before.relogin = 60000\r\n\tsasl.kerberos.service.name = null\r\n\tsasl.kerberos.ticket.renew.jitter = 0.05\r\n\tsasl.kerberos.ticket.renew.window.factor = 0.8\r\n\tsasl.login.callback.handler.class = null\r\n\tsasl.login.class = null\r\n\tsasl.login.refresh.buffer.seconds = 300\r\n\tsasl.login.refresh.min.period.seconds = 60\r\n\tsasl.login.refresh.window.factor = 0.8\r\n\tsasl.login.refresh.window.jitter = 0.05\r\n\tsasl.mechanism = GSSAPI\r\n\tsecurity.protocol = PLAINTEXT\r\n\tsecurity.providers = null\r\n\tsend.buffer.bytes = 131072\r\n\tsession.timeout.ms = 10000\r\n\tsocket.connection.setup.timeout.max.ms = 30000\r\n\tsocket.connection.setup.timeout.ms = 10000\r\n\tssl.cipher.suites = null\r\n\tssl.enabled.protocols = [TLSv1.2, TLSv1.3]\r\n\tssl.endpoint.identification.algorithm = https\r\n\tssl.engine.factory.class = null\r\n\tssl.key.password = null\r\n\tssl.keymanager.algorithm = SunX509\r\n\tssl.keystore.certificate.chain = null\r\n\tssl.keystore.key = null\r\n\tssl.keystore.location = null\r\n\tssl.keystore.password = null\r\n\tssl.keystore.type = JKS\r\n\tssl.protocol = TLSv1.3\r\n\tssl.provider = null\r\n\tssl.secure.random.implementation = null\r\n\tssl.trustmanager.algorithm = PKIX\r\n\tssl.truststore.certificates = null\r\n\tssl.truststore.location = null\r\n\tssl.truststore.password = null\r\n\tssl.truststore.type = JKS\r\n\tvalue.deserializer = class org.apache.kafka.common.serialization.BytesDeserializer\r\n\r\n2021-12-29 08:26:27,563 INFO [elastic-4] o.a.k.c.u.AppInfoParser: Kafka version: 2.8.0\r\n2021-12-29 08:26:27,564 INFO [elastic-4] o.a.k.c.u.AppInfoParser: Kafka commitId: ebb1d6e21cc92130\r\n2021-12-29 08:26:27,564 INFO [elastic-4] o.a.k.c.u.AppInfoParser: Kafka startTimeMs: 1640766387563\r\n2021-12-29 08:26:27,579 INFO [elastic-4] o.a.k.c.Metadata: [Consumer clientId=kafka-ui-f301102a-e7cc-4773-8d9b-dd0e51d5e0f7, groupId=null] Cluster ID: NA2c93fwSAu943GjgeiF_Q\r\n2021-12-29 08:26:27,582 INFO [elastic-4] c.p.k.u.u.OffsetsSeek: Positioning consumer for topic testtest with ConsumerPosition(seekType=OFFSET, seekTo={testtest-0=0}, seekDirection=FORWARD)\r\n2021-12-29 08:26:27,600 INFO [elastic-4] o.a.k.c.c.KafkaConsumer: [Consumer clientId=kafka-ui-f301102a-e7cc-4773-8d9b-dd0e51d5e0f7, groupId=null] Unsubscribed all topics or patterns and assigned partitions\r\n2021-12-29 08:26:27,600 INFO [elastic-4] c.p.k.u.u.OffsetsSeek: Assignment: []\r\n2021-12-29 08:26:27,601 INFO [elastic-4] c.p.k.u.e.ForwardRecordEmitter: Polling finished\r\n2021-12-29 08:26:27,602 INFO [elastic-4] o.a.k.c.m.Metrics: Metrics scheduler closed\r\n2021-12-29 08:26:27,602 INFO [elastic-4] o.a.k.c.m.Metrics: Closing reporter org.apache.kafka.common.metrics.JmxReporter\r\n2021-12-29 08:26:27,602 INFO [elastic-4] o.a.k.c.m.Metrics: Metrics reporters closed\r\n2021-12-29 08:26:27,611 INFO [elastic-4] o.a.k.c.u.AppInfoParser: App info kafka.consumer for kafka-ui-f301102a-e7cc-4773-8d9b-dd0e51d5e0f7 unregistered\r\n2021-12-29 08:26:28,694 ERROR [parallel-4] o.s.b.a.w.r.e.AbstractErrorWebExceptionHandler: [20cbd8dc-12] 500 Server Error for HTTP GET \"/api/clusters/local/topics/testtest/messages/schema\"\r\njava.lang.NullPointerException: null\r\n\tat com.provectus.kafka.ui.util.jsonschema.JsonSchema.toJson(JsonSchema.java:28)\r\n\tSuppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException: \r\nError has been observed at the following site(s):\r\n\t*__checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ org.springframework.boot.actuate.metrics.web.reactive.server.MetricsWebFilter [DefaultWebFilterChain]\r\n\t*__checkpoint ⇢ HTTP GET \"/api/clusters/local/topics/testtest/messages/schema\" [ExceptionHandlingWebHandler]\r\nStack trace:\r\n\t\tat com.provectus.kafka.ui.util.jsonschema.JsonSchema.toJson(JsonSchema.java:28)\r\n\t\tat com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe.lambda$getTopicSchema$0(SchemaRegistryAwareRecordSerDe.java:196)\r\n\t\tat java.base/java.util.Optional.orElseGet(Optional.java:362)\r\n\t\tat com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe.getTopicSchema(SchemaRegistryAwareRecordSerDe.java:196)\r\n\t\tat com.provectus.kafka.ui.service.TopicsService.getTopicSchema(TopicsService.java:392)\r\n\t\tat com.provectus.kafka.ui.controller.MessagesController.getTopicSchema(MessagesController.java:62)\r\n\t\tat com.provectus.kafka.ui.controller.MessagesController$$FastClassBySpringCGLIB$$8951e2d8.invoke(<generated>)\r\n\t\tat org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)\r\n\t\tat org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:783)\r\n\t\tat org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163)\r\n\t\tat org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753)\r\n\t\tat org.springframework.validation.beanvalidation.MethodValidationInterceptor.invoke(MethodValidationInterceptor.java:123)\r\n\t\tat org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186)\r\n\t\tat org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753)\r\n\t\tat org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:698)\r\n\t\tat com.provectus.kafka.ui.controller.MessagesController$$EnhancerBySpringCGLIB$$4fc4bcd9.getTopicSchema(<generated>)\r\n\t\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\r\n\t\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\r\n\t\tat java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\r\n\t\tat java.base/java.lang.reflect.Method.invoke(Method.java:567)\r\n\t\tat org.springframework.web.reactive.result.method.InvocableHandlerMethod.lambda$invoke$0(InvocableHandlerMethod.java:144)\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:125)\r\n\t\tat reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816)\r\n\t\tat reactor.core.publisher.MonoZip$ZipCoordinator.signal(MonoZip.java:251)\r\n\t\tat reactor.core.publisher.MonoZip$ZipInner.onNext(MonoZip.java:336)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onNext(MonoPeekTerminal.java:180)\r\n\t\tat reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.request(MonoPeekTerminal.java:139)\r\n\t\tat reactor.core.publisher.MonoZip$ZipInner.onSubscribe(MonoZip.java:325)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onSubscribe(MonoPeekTerminal.java:152)\r\n\t\tat reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.MonoZip.subscribe(MonoZip.java:128)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:236)\r\n\t\tat reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.onComplete(MonoIgnoreThen.java:203)\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapMain.onComplete(MonoFlatMap.java:181)\r\n\t\tat reactor.core.publisher.Operators.complete(Operators.java:137)\r\n\t\tat reactor.core.publisher.MonoZip.subscribe(MonoZip.java:120)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:255)\r\n\t\tat reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:51)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157)\r\n\t\tat reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74)\r\n\t\tat reactor.core.publisher.MonoNext$NextSubscriber.onNext(MonoNext.java:82)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.innerNext(FluxConcatMap.java:282)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapInner.onNext(FluxConcatMap.java:861)\r\n\t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:127)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onNext(MonoPeekTerminal.java:180)\r\n\t\tat reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.request(MonoPeekTerminal.java:139)\r\n\t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.request(FluxMapFuseable.java:169)\r\n\t\tat reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:2194)\r\n\t\tat reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onSubscribe(Operators.java:2068)\r\n\t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onSubscribe(FluxMapFuseable.java:96)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onSubscribe(MonoPeekTerminal.java:152)\r\n\t\tat reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:449)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onSubscribe(FluxConcatMap.java:219)\r\n\t\tat reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165)\r\n\t\tat reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onComplete(MonoPeekTerminal.java:299)\r\n\t\tat reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onComplete(MonoPeekTerminal.java:299)\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:148)\r\n\t\tat reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74)\r\n\t\tat reactor.core.publisher.FluxFilter$FilterSubscriber.onNext(FluxFilter.java:113)\r\n\t\tat reactor.core.publisher.FluxDefaultIfEmpty$DefaultIfEmptySubscriber.onNext(FluxDefaultIfEmpty.java:101)\r\n\t\tat reactor.core.publisher.MonoNext$NextSubscriber.onNext(MonoNext.java:82)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.innerNext(FluxConcatMap.java:282)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapInner.onNext(FluxConcatMap.java:861)\r\n\t\tat reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816)\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:151)\r\n\t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:127)\r\n\t\tat reactor.core.publisher.FluxFilterFuseable$FilterFuseableSubscriber.onNext(FluxFilterFuseable.java:118)\r\n\t\tat reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398)\r\n\t\tat reactor.core.publisher.FluxFilterFuseable$FilterFuseableSubscriber.request(FluxFilterFuseable.java:191)\r\n\t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.request(FluxMapFuseable.java:169)\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapMain.onSubscribe(MonoFlatMap.java:110)\r\n\t\tat reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onSubscribe(FluxMapFuseable.java:96)\r\n\t\tat reactor.core.publisher.FluxFilterFuseable$FilterFuseableSubscriber.onSubscribe(FluxFilterFuseable.java:87)\r\n\t\tat reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:449)\r\n\t\tat reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onSubscribe(FluxConcatMap.java:219)\r\n\t\tat reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165)\r\n\t\tat reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:255)\r\n\t\tat reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:51)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82)\r\n\t\tat reactor.core.publisher.FluxFilter$FilterSubscriber.onComplete(FluxFilter.java:166)\r\n\t\tat reactor.core.publisher.FluxPeekFuseable$PeekConditionalSubscriber.onComplete(FluxPeekFuseable.java:940)\r\n\t\tat reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:85)\r\n\t\tat reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2400)\r\n\t\tat reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:2194)\r\n\t\tat reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onSubscribe(Operators.java:2068)\r\n\t\tat reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)\r\n\t\tat reactor.core.publisher.Mono.subscribe(Mono.java:4399)\r\n\t\tat reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82)\r\n\t\tat reactor.core.publisher.MonoNext$NextSubscriber.onComplete(MonoNext.java:102)\r\n\t\tat reactor.core.publisher.FluxFilter$FilterSubscriber.onComplete(FluxFilter.java:166)\r\n\t\tat reactor.core.publisher.FluxFlatMap$FlatMapMain.checkTerminated(FluxFlatMap.java:846)\r\n\t\tat reactor.core.publisher.FluxFlatMap$FlatMapMain.drainLoop(FluxFlatMap.java:608)\r\n\t\tat reactor.core.publisher.FluxFlatMap$FlatMapMain.drain(FluxFlatMap.java:588)\r\n\t\tat reactor.core.publisher.FluxFlatMap$FlatMapMain.onComplete(FluxFlatMap.java:465)\r\n\t\tat reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onComplete(FluxPeekFuseable.java:277)\r\n\t\tat reactor.core.publisher.FluxIterable$IterableSubscription.slowPath(FluxIterable.java:294)\r\n\t\tat reactor.core.publisher.FluxIterable$IterableSubscription.request(FluxIterable.java:230)\r\n\t\tat reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:144)\r\n\t\tat reactor.core.publisher.FluxFlatMap$FlatMapMain.onSubscribe(FluxFlatMap.java:371)\r\n\t\tat reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:178)\r\n\t\tat reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165)\r\n\t\tat reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87)\r\n\t\tat reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)\r\n\t\tat reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)\r\n\t\tat reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157)\r\n\t\tat reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816)\r\n\t\tat reactor.core.publisher.FluxDefaultIfEmpty$DefaultIfEmptySubscriber.onComplete(FluxDefaultIfEmpty.java:109)\r\n\t\tat reactor.core.publisher.FluxMap$MapSubscriber.onComplete(FluxMap.java:142)\r\n\t\tat reactor.core.publisher.FluxMap$MapSubscriber.onComplete(FluxMap.java:142)\r\n\t\tat reactor.core.publisher.FluxFilter$FilterSubscriber.onComplete(FluxFilter.java:166)\r\n\t\tat reactor.core.publisher.FluxMap$MapConditionalSubscriber.onComplete(FluxMap.java:269)\r\n\t\tat reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1817)\r\n\t\tat reactor.core.publisher.MonoCacheTime$CoordinatorSubscriber.signalCached(MonoCacheTime.java:337)\r\n\t\tat reactor.core.publisher.MonoCacheTime$CoordinatorSubscriber.onNext(MonoCacheTime.java:354)\r\n\t\tat reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:200)\r\n\t\tat reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74)\r\n\t\tat reactor.core.publisher.MonoPublishOn$PublishOnSubscriber.run(MonoPublishOn.java:181)\r\n\t\tat reactor.core.scheduler.SchedulerTask.call(SchedulerTask.java:68)\r\n\t\tat reactor.core.scheduler.SchedulerTask.call(SchedulerTask.java:28)\r\n\t\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\r\n\t\tat java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)\r\n\t\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\r\n\t\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\r\n\t\tat java.base/java.lang.Thread.run(Thread.java:830)\r\n```",
"Also have this issue",
"@amonsat hi, please share the details on how you use the app",
"@Haarolean I have a docker-compose.yml\r\n` zookeeper:\r\n image: confluentinc/cp-zookeeper:latest\r\n environment:\r\n ZOOKEEPER_CLIENT_PORT: 2181\r\n ZOOKEEPER_TICK_TIME: 2000\r\n ports:\r\n - \"22181:2181\"\r\n\r\n kafka:\r\n image: confluentinc/cp-kafka:latest\r\n depends_on:\r\n - zookeeper\r\n ports:\r\n - \"29092:9092\"\r\n environment:\r\n KAFKA_BROKER_ID: 1\r\n KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181\r\n KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092\r\n KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT\r\n KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT\r\n KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1\r\n\r\n kafka-ui:\r\n image: provectuslabs/kafka-ui:latest\r\n depends_on:\r\n - kafka\r\n ports:\r\n - \"9002:8080\"\r\n environment:\r\n - KAFKA_CLUSTERS_0_NAME=local\r\n - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092`\r\n\r\nAnd when I click produce message I have same issue. But the rest of functionality works well.",
"Thanks, I was able to reproduce the issue. ",
"Will be available in `:master` labeled image and in `:latest` shortly after."
] | [] | "2021-12-29T16:23:41Z" | [
"type/bug",
"scope/backend",
"status/accepted",
"status/confirmed",
"type/regression"
] | NullPointerException on getting message schema | **Describe the bug**
It is impossible to produce message to topic due to NullPointerException
**Set up**
Run latest image in docker:
```
...
kafka-ui:
image: provectuslabs/kafka-ui:latest
container_name: kafka-ui
ports:
- "8080:8080"
restart: always
depends_on:
- kafka
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
- KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper:2181
...
```
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Run kafka and kafka-ui
2. Create topic `test`
3. Select created topic. Choose "messages" panel. Click "Produce Message" button
**Expected behavior**
Form for message creating opens
**Screenshots**

**Additional context**
Error in logs:
```java
ERROR [parallel-8] o.s.b.a.w.r.e.AbstractErrorWebExceptionHandler: [24abdea9-4] 500 Server Error for HTTP GET "/api/clusters/local/topics/test/messages/schema"
java.lang.NullPointerException: null
at com.provectus.kafka.ui.util.jsonschema.JsonSchema.toJson(JsonSchema.java:28)
Suppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException:
Error has been observed at the following site(s):
*__checkpoint ⇢ com.provectus.kafka.ui.config.CustomWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ com.provectus.kafka.ui.config.ReadOnlyModeFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.authorization.AuthorizationWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.authorization.ExceptionTranslationWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.authentication.logout.LogoutWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.savedrequest.ServerRequestCacheWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.context.SecurityContextServerWebExchangeWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.context.ReactorContextWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.header.HttpHeaderWriterWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.config.web.server.ServerHttpSecurity$ServerWebExchangeReactorContextWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.security.web.server.WebFilterChainProxy [DefaultWebFilterChain]
*__checkpoint ⇢ org.springframework.boot.actuate.metrics.web.reactive.server.MetricsWebFilter [DefaultWebFilterChain]
*__checkpoint ⇢ HTTP GET "/api/clusters/local/topics/test/messages/schema" [ExceptionHandlingWebHandler]
Stack trace:
at com.provectus.kafka.ui.util.jsonschema.JsonSchema.toJson(JsonSchema.java:28)
at com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe.lambda$getTopicSchema$0(SchemaRegistryAwareRecordSerDe.java:196)
at java.base/java.util.Optional.orElseGet(Optional.java:362)
at com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe.getTopicSchema(SchemaRegistryAwareRecordSerDe.java:196)
at com.provectus.kafka.ui.service.TopicsService.getTopicSchema(TopicsService.java:392)
at com.provectus.kafka.ui.controller.MessagesController.getTopicSchema(MessagesController.java:62)
at com.provectus.kafka.ui.controller.MessagesController$$FastClassBySpringCGLIB$$8951e2d8.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:783)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753)
at org.springframework.validation.beanvalidation.MethodValidationInterceptor.invoke(MethodValidationInterceptor.java:123)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186)
at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:753)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:698)
at com.provectus.kafka.ui.controller.MessagesController$$EnhancerBySpringCGLIB$$4fc4bcd9.getTopicSchema(<generated>)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:567)
at org.springframework.web.reactive.result.method.InvocableHandlerMethod.lambda$invoke$0(InvocableHandlerMethod.java:144)
at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:125)
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816)
at reactor.core.publisher.MonoZip$ZipCoordinator.signal(MonoZip.java:251)
at reactor.core.publisher.MonoZip$ZipInner.onNext(MonoZip.java:336)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onNext(MonoPeekTerminal.java:180)
at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.request(MonoPeekTerminal.java:139)
at reactor.core.publisher.MonoZip$ZipInner.onSubscribe(MonoZip.java:325)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onSubscribe(MonoPeekTerminal.java:152)
at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.MonoZip.subscribe(MonoZip.java:128)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:236)
at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.onComplete(MonoIgnoreThen.java:203)
at reactor.core.publisher.MonoFlatMap$FlatMapMain.onComplete(MonoFlatMap.java:181)
at reactor.core.publisher.Operators.complete(Operators.java:137)
at reactor.core.publisher.MonoZip.subscribe(MonoZip.java:120)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:255)
at reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:51)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74)
at reactor.core.publisher.MonoNext$NextSubscriber.onNext(MonoNext.java:82)
at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.innerNext(FluxConcatMap.java:282)
at reactor.core.publisher.FluxConcatMap$ConcatMapInner.onNext(FluxConcatMap.java:861)
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:127)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onNext(MonoPeekTerminal.java:180)
at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.request(MonoPeekTerminal.java:139)
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.request(FluxMapFuseable.java:169)
at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:2194)
at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onSubscribe(Operators.java:2068)
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onSubscribe(FluxMapFuseable.java:96)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onSubscribe(MonoPeekTerminal.java:152)
at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:449)
at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onSubscribe(FluxConcatMap.java:219)
at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165)
at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onComplete(MonoPeekTerminal.java:299)
at reactor.core.publisher.MonoPeekTerminal$MonoTerminalPeekSubscriber.onComplete(MonoPeekTerminal.java:299)
at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:148)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74)
at reactor.core.publisher.FluxFilter$FilterSubscriber.onNext(FluxFilter.java:113)
at reactor.core.publisher.FluxDefaultIfEmpty$DefaultIfEmptySubscriber.onNext(FluxDefaultIfEmpty.java:101)
at reactor.core.publisher.MonoNext$NextSubscriber.onNext(MonoNext.java:82)
at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.innerNext(FluxConcatMap.java:282)
at reactor.core.publisher.FluxConcatMap$ConcatMapInner.onNext(FluxConcatMap.java:861)
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816)
at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:151)
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:127)
at reactor.core.publisher.FluxFilterFuseable$FilterFuseableSubscriber.onNext(FluxFilterFuseable.java:118)
at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2398)
at reactor.core.publisher.FluxFilterFuseable$FilterFuseableSubscriber.request(FluxFilterFuseable.java:191)
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.request(FluxMapFuseable.java:169)
at reactor.core.publisher.MonoFlatMap$FlatMapMain.onSubscribe(MonoFlatMap.java:110)
at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onSubscribe(FluxMapFuseable.java:96)
at reactor.core.publisher.FluxFilterFuseable$FilterFuseableSubscriber.onSubscribe(FluxFilterFuseable.java:87)
at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:449)
at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onSubscribe(FluxConcatMap.java:219)
at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165)
at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.MonoIgnoreThen$ThenIgnoreMain.subscribeNext(MonoIgnoreThen.java:255)
at reactor.core.publisher.MonoIgnoreThen.subscribe(MonoIgnoreThen.java:51)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82)
at reactor.core.publisher.FluxFilter$FilterSubscriber.onComplete(FluxFilter.java:166)
at reactor.core.publisher.FluxPeekFuseable$PeekConditionalSubscriber.onComplete(FluxPeekFuseable.java:940)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:85)
at reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2400)
at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:2194)
at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onSubscribe(Operators.java:2068)
at reactor.core.publisher.MonoJust.subscribe(MonoJust.java:55)
at reactor.core.publisher.Mono.subscribe(Mono.java:4399)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onComplete(FluxSwitchIfEmpty.java:82)
at reactor.core.publisher.MonoNext$NextSubscriber.onComplete(MonoNext.java:102)
at reactor.core.publisher.FluxFilter$FilterSubscriber.onComplete(FluxFilter.java:166)
at reactor.core.publisher.FluxFlatMap$FlatMapMain.checkTerminated(FluxFlatMap.java:846)
at reactor.core.publisher.FluxFlatMap$FlatMapMain.drainLoop(FluxFlatMap.java:608)
at reactor.core.publisher.FluxFlatMap$FlatMapMain.drain(FluxFlatMap.java:588)
at reactor.core.publisher.FluxFlatMap$FlatMapMain.onComplete(FluxFlatMap.java:465)
at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onComplete(FluxPeekFuseable.java:277)
at reactor.core.publisher.FluxIterable$IterableSubscription.slowPath(FluxIterable.java:294)
at reactor.core.publisher.FluxIterable$IterableSubscription.request(FluxIterable.java:230)
at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:144)
at reactor.core.publisher.FluxFlatMap$FlatMapMain.onSubscribe(FluxFlatMap.java:371)
at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:178)
at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:165)
at reactor.core.publisher.FluxIterable.subscribe(FluxIterable.java:87)
at reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:64)
at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:157)
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1816)
at reactor.core.publisher.FluxDefaultIfEmpty$DefaultIfEmptySubscriber.onComplete(FluxDefaultIfEmpty.java:109)
at reactor.core.publisher.FluxMap$MapSubscriber.onComplete(FluxMap.java:142)
at reactor.core.publisher.FluxMap$MapSubscriber.onComplete(FluxMap.java:142)
at reactor.core.publisher.FluxFilter$FilterSubscriber.onComplete(FluxFilter.java:166)
at reactor.core.publisher.FluxMap$MapConditionalSubscriber.onComplete(FluxMap.java:269)
at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1817)
at reactor.core.publisher.MonoCacheTime$CoordinatorSubscriber.signalCached(MonoCacheTime.java:337)
at reactor.core.publisher.MonoCacheTime$CoordinatorSubscriber.onNext(MonoCacheTime.java:354)
at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:200)
at reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber.onNext(FluxSwitchIfEmpty.java:74)
at reactor.core.publisher.MonoPublishOn$PublishOnSubscriber.run(MonoPublishOn.java:181)
at reactor.core.scheduler.SchedulerTask.call(SchedulerTask.java:68)
at reactor.core.scheduler.SchedulerTask.call(SchedulerTask.java:28)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:830)
```
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
index c754b2b68c9..098e2fab670 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
@@ -62,14 +62,12 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
@Nullable
private final JsonSchemaMessageFormatter jsonSchemaMessageFormatter;
- private ObjectMapper objectMapper;
+ private final ObjectMapper objectMapper;
- private SchemaRegistryClient createSchemaRegistryClient(KafkaCluster cluster,
- ObjectMapper objectMapper) {
+ private SchemaRegistryClient createSchemaRegistryClient(KafkaCluster cluster) {
if (cluster.getSchemaRegistry() == null) {
throw new ValidationException("schemaRegistry is not specified");
}
- this.objectMapper = objectMapper;
List<SchemaProvider> schemaProviders =
List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider(), new JsonSchemaProvider());
@@ -98,8 +96,9 @@ private SchemaRegistryClient createSchemaRegistryClient(KafkaCluster cluster,
public SchemaRegistryAwareRecordSerDe(KafkaCluster cluster, ObjectMapper objectMapper) {
this.cluster = cluster;
+ this.objectMapper = objectMapper;
this.schemaRegistryClient = cluster.getSchemaRegistry() != null
- ? createSchemaRegistryClient(cluster, objectMapper)
+ ? createSchemaRegistryClient(cluster)
: null;
if (schemaRegistryClient != null) {
this.avroFormatter = new AvroMessageFormatter(schemaRegistryClient);
| null | train | train | 2021-12-29T13:33:23 | "2021-12-28T22:37:34Z" | gusevalexey0 | train |
provectus/kafka-ui/1347_1350 | provectus/kafka-ui | provectus/kafka-ui/1347 | provectus/kafka-ui/1350 | [
"connected"
] | 269ace82e1bd2e55bde400f1175cb4bdc489287c | 40b5b52bd9ad3399daf6567e1f294bc7f8471b5d | [
"Hello there JoeryH! 👋\n\nThank you and congratulations 🎉 for opening your very first issue in this project! 💖\n\nIn case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀",
"Hi 👋 ",
"Hi Joery, \nthanks for your interest in kafka-ui. And thank you for the solution! \nWe’re having a long holidays streak here, so we’ll get back to this issue somewhere around 10th of January. \nHappy holidays!\n\n> On 31 Dec 2021, at 17:09, Joery ***@***.***> wrote:\n> \n> \n> Hi 👋\n> \n> —\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n> Triage notifications on the go with GitHub Mobile for iOS or Android. \n> You are receiving this because you are subscribed to this thread.\n",
"Hi @JoeryH, we fixed this bug in current master - you can use docker image with \"master\" tag (sha256:5c1346c11a876af0d25bd2871409631b01a0d7d15c0ef22c40db16390aa1f919) ",
"hey, the latest version works for me! Thank you for the quick fix!",
"@JoeryH glad it works! "
] | [
"What about maven dependencies mentioned in the issue?",
"@Haarolean nothing connected with them - all of them already included in kafka-client lib. We just need to add missing os libs. I checked topic all compressions with this change - all work fine",
"Great, thanks!"
] | "2022-01-04T09:18:37Z" | [
"type/bug",
"scope/backend",
"status/accepted"
] | Snappy support missing | Hi guys,
My avro data in kafka is compressed with Snappy. Kafka-ui prints this stacktrace when I load messages:
```txt
java.lang.NoClassDefFoundError: Could not initialize class org.xerial.snappy.Snappy
at org.xerial.snappy.SnappyInputStream.hasNextChunk(SnappyInputStream.java:435)
at org.xerial.snappy.SnappyInputStream.read(SnappyInputStream.java:466)
at java.base/java.io.DataInputStream.readByte(DataInputStream.java:270)
```
I fixed this locally by adding
```xml
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
<version>1.1.7.3</version>
</dependency>
```
Which gave me something like:
```txt
java.lang.UnsatisfiedLinkError: /tmp/snappy-1.1.7-5cec5405-2ce7-4046-a8bd-922ce96534a0-libsnappyjava.so:
Error loading shared library ld-linux-x86-64.so.2: No such file or directory
```
Which I solved by adding `gcompat` to the `apk add` command.
Would be awesome to have this added to this amazing project.
| [
"kafka-ui-api/Dockerfile"
] | [
"kafka-ui-api/Dockerfile"
] | [] | diff --git a/kafka-ui-api/Dockerfile b/kafka-ui-api/Dockerfile
index 2bc496b1f1b..81cc6c9a549 100644
--- a/kafka-ui-api/Dockerfile
+++ b/kafka-ui-api/Dockerfile
@@ -1,5 +1,5 @@
-FROM alpine:latest
-RUN apk add openjdk13-jre
+FROM alpine:3.15.0
+RUN apk add --no-cache openjdk13-jre libc6-compat gcompat
VOLUME /tmp
ARG JAR_FILE
COPY "/target/${JAR_FILE}" "/kafka-ui-api.jar"
| null | test | train | 2021-12-30T10:02:50 | "2021-12-31T14:07:20Z" | JoeryH | train |
provectus/kafka-ui/1316_1360 | provectus/kafka-ui | provectus/kafka-ui/1316 | provectus/kafka-ui/1360 | [
"connected"
] | ce8f828394772057cbde03e749e45636d1878547 | ce8627ea59be60367c3d824375dd3feca6f8479a | [
"fixed within https://github.com/provectus/kafka-ui/pull/1360",
"It's not"
] | [
"@jonasgeiregat \r\nMono[Void] cant be mapped, you should use thenReturn(..) or then() on Mono[Void]. \r\nIn general I think its better to make all schemaRegistryService.deleteXXX methods return Mono[Void] and do .thenReturn(ResponseEntity.ok().build()); in SchemasController",
"use then() instead of .flatMap(r -> Mono.empty()); Also apply this return type change to all deleteXXX methods ",
"Thanks for pointing this out. I improved the PR according to your input.\r\nI also believe the mapping to a ResponseEntity should be done in the controller and definitely not forward the ResponseEntity from a previous Rest call.",
"lets remove this one and extend SchemaFailedToDeleteException from CustomBaseException. I don't think we need another hierarchy level for this"
] | "2022-01-07T15:08:31Z" | [
"type/bug",
"scope/backend",
"status/accepted",
"status/confirmed"
] | Schema registry: delete request doesn't return a response | **Describe the bug**
Schema registry: delete request doesn't return a response
**Set up**
docker-compose -f kafka-ui.yaml up
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Create a schema
2. Delete a schema
**Expected behavior**
Backend returns a response. In fact, it's pending until timeout happens. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaFailedToDeleteException.java",
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java
index c9ba613a3dc..e738216b49a 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java
@@ -69,7 +69,8 @@ public Mono<ResponseEntity<Void>> deleteLatestSchema(
@Override
public Mono<ResponseEntity<Void>> deleteSchema(
String clusterName, String subjectName, ServerWebExchange exchange) {
- return schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subjectName);
+ return schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subjectName)
+ .thenReturn(ResponseEntity.ok().build());
}
@Override
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
index 20df819ec9f..38ab56e8920 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
@@ -25,7 +25,8 @@ public enum ErrorCode {
DIR_NOT_FOUND(4012, HttpStatus.BAD_REQUEST),
TOPIC_OR_PARTITION_NOT_FOUND(4013, HttpStatus.BAD_REQUEST),
INVALID_REQUEST(4014, HttpStatus.BAD_REQUEST),
- RECREATE_TOPIC_TIMEOUT(4015, HttpStatus.REQUEST_TIMEOUT);
+ RECREATE_TOPIC_TIMEOUT(4015, HttpStatus.REQUEST_TIMEOUT),
+ SCHEMA_NOT_DELETED(4015, HttpStatus.INTERNAL_SERVER_ERROR);
static {
// codes uniqueness check
@@ -33,7 +34,7 @@ public enum ErrorCode {
for (ErrorCode value : ErrorCode.values()) {
if (!codes.add(value.code())) {
LoggerFactory.getLogger(ErrorCode.class)
- .warn("Multiple {} values refer to code {}", ErrorCode.class, value.code);
+ .warn("Multiple {} values refer to code {}", ErrorCode.class, value.code);
}
}
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaFailedToDeleteException.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaFailedToDeleteException.java
new file mode 100644
index 00000000000..c88ee86af76
--- /dev/null
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaFailedToDeleteException.java
@@ -0,0 +1,13 @@
+package com.provectus.kafka.ui.exception;
+
+public class SchemaFailedToDeleteException extends CustomBaseException {
+
+ public SchemaFailedToDeleteException(String schemaName) {
+ super(String.format("Unable to delete schema with name %s", schemaName));
+ }
+
+ @Override
+ public ErrorCode getErrorCode() {
+ return ErrorCode.SCHEMA_NOT_DELETED;
+ }
+}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
index 84aba4b81b1..10e0a0a147c 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
@@ -4,6 +4,7 @@
import static org.springframework.http.HttpStatus.UNPROCESSABLE_ENTITY;
import com.provectus.kafka.ui.exception.DuplicateEntityException;
+import com.provectus.kafka.ui.exception.SchemaFailedToDeleteException;
import com.provectus.kafka.ui.exception.SchemaNotFoundException;
import com.provectus.kafka.ui.exception.SchemaTypeIsNotSupportedException;
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
@@ -32,6 +33,7 @@
import org.jetbrains.annotations.NotNull;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
+import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Service;
@@ -156,16 +158,16 @@ private Mono<ResponseEntity<Void>> deleteSchemaSubject(KafkaCluster cluster, Str
).toBodilessEntity();
}
- public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(KafkaCluster cluster,
+ public Mono<Void> deleteSchemaSubjectEntirely(KafkaCluster cluster,
String schemaName) {
return configuredWebClient(
cluster,
HttpMethod.DELETE,
URL_SUBJECT, schemaName)
.retrieve()
- .onStatus(NOT_FOUND::equals,
- throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
- .toBodilessEntity();
+ .onStatus(HttpStatus::isError, errorOnSchemaDeleteFailure(schemaName))
+ .toBodilessEntity()
+ .then();
}
/**
@@ -335,4 +337,13 @@ private WebClient.RequestBodySpec configuredWebClient(InternalSchemaRegistry sch
private boolean isUnrecognizedFieldSchemaTypeMessage(String errorMessage) {
return errorMessage.contains(UNRECOGNIZED_FIELD_SCHEMA_TYPE);
}
+
+ private Function<ClientResponse, Mono<? extends Throwable>> errorOnSchemaDeleteFailure(String schemaName) {
+ return resp -> {
+ if (NOT_FOUND.equals(resp.statusCode())) {
+ return Mono.error(new SchemaNotFoundException(schemaName));
+ }
+ return Mono.error(new SchemaFailedToDeleteException(schemaName));
+ };
+ }
}
| null | test | train | 2022-02-14T15:57:31 | "2021-12-23T11:10:39Z" | Haarolean | train |
provectus/kafka-ui/1185_1364 | provectus/kafka-ui | provectus/kafka-ui/1185 | provectus/kafka-ui/1364 | [
"connected"
] | 6828a412427534305fdd1b0d501821e2cf8b1c5e | a4fce3cd818b58fc3e2249606b633bd9afde5595 | [] | [
"We're trying to use own styles with the styled-components and without classnames, aren't we?",
"Are these colors from figma?",
"remove unused className prop, please"
] | "2022-01-10T13:47:08Z" | [
"type/bug",
"scope/frontend",
"status/accepted",
"type/regression"
] | readonly label disappeared from UI | ### Is your proposal related to a problem?
We are running a UI in readonly mode, the readonly works as expected (data can be read, but no changes can be done). But before there was a label that clearly shows which clusters are in readonly. Now it is gone.
### Describe the solution you'd like
It would be good to show to users that what they are accessing is readonly.
### Describe alternatives you've considered
N/A
### Additional context
It is a low priority in my opinion, just a nice feature that somehow got removed.
| [
"kafka-ui-react-app/src/components/Connect/List/__tests__/__snapshots__/ListItem.spec.tsx.snap",
"kafka-ui-react-app/src/components/Dashboard/ClustersWidget/ClustersWidget.tsx",
"kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/ClustersWidget.spec.tsx",
"kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/fixtures.ts",
"kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap",
"kafka-ui-react-app/src/components/common/Tag/Tag.styled.tsx",
"kafka-ui-react-app/src/theme/theme.ts"
] | [
"kafka-ui-react-app/src/components/Connect/List/__tests__/__snapshots__/ListItem.spec.tsx.snap",
"kafka-ui-react-app/src/components/Dashboard/ClustersWidget/ClustersWidget.tsx",
"kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/ClustersWidget.spec.tsx",
"kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/fixtures.ts",
"kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap",
"kafka-ui-react-app/src/components/common/Tag/Tag.styled.tsx",
"kafka-ui-react-app/src/theme/theme.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Connect/List/__tests__/__snapshots__/ListItem.spec.tsx.snap b/kafka-ui-react-app/src/components/Connect/List/__tests__/__snapshots__/ListItem.spec.tsx.snap
index 2fbbed56bb5..68d4d62415a 100644
--- a/kafka-ui-react-app/src/components/Connect/List/__tests__/__snapshots__/ListItem.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Connect/List/__tests__/__snapshots__/ListItem.spec.tsx.snap
@@ -246,6 +246,7 @@ exports[`Connectors ListItem matches snapshot 1`] = `
},
"tagStyles": Object {
"backgroundColor": Object {
+ "blue": "#e3f2fd",
"gray": "#F1F2F3",
"green": "#D6F5E0",
"red": "#FAD1D1",
diff --git a/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/ClustersWidget.tsx b/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/ClustersWidget.tsx
index 110d134d07f..14b520348f0 100644
--- a/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/ClustersWidget.tsx
+++ b/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/ClustersWidget.tsx
@@ -84,7 +84,10 @@ const ClustersWidget: React.FC<Props> = ({
<tbody>
{chunkItem.data.map((cluster) => (
<tr key={cluster.name}>
- <td>{cluster.name}</td>
+ <td>
+ {cluster.readOnly && <Tag color="blue">readonly</Tag>}{' '}
+ {cluster.name}
+ </td>
<td>{cluster.version}</td>
<td>{cluster.brokerCount}</td>
<td>{cluster.onlinePartitionCount}</td>
diff --git a/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/ClustersWidget.spec.tsx b/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/ClustersWidget.spec.tsx
index c787abeaf0c..00bb747e803 100644
--- a/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/ClustersWidget.spec.tsx
+++ b/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/ClustersWidget.spec.tsx
@@ -27,4 +27,8 @@ describe('ClustersWidget', () => {
userEvent.click(screen.getByRole('checkbox'));
expect(screen.getAllByRole('row').length).toBe(2);
});
+
+ it('when cluster is read-only', () => {
+ expect(screen.getByText('readonly')).toBeInTheDocument();
+ });
});
diff --git a/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/fixtures.ts b/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/fixtures.ts
index 9084de39e24..74714e81079 100644
--- a/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/fixtures.ts
+++ b/kafka-ui-react-app/src/components/Dashboard/ClustersWidget/__test__/fixtures.ts
@@ -9,6 +9,7 @@ export const onlineCluster: Cluster = {
topicCount: 3,
bytesInPerSec: 0.000030618196853764715,
bytesOutPerSec: 5.737800890036267075817,
+ readOnly: false,
};
export const offlineCluster: Cluster = {
@@ -20,6 +21,7 @@ export const offlineCluster: Cluster = {
topicCount: 2,
bytesInPerSec: 8000.0000067376808542600021,
bytesOutPerSec: 0.8153063567297119490871,
+ readOnly: true,
};
export const clusters: Cluster[] = [onlineCluster, offlineCluster];
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap
index 117c5269190..b6e1ccbaf2f 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/__test__/__snapshots__/Details.spec.tsx.snap
@@ -257,6 +257,7 @@ exports[`Details when it has readonly flag does not render the Action button a T
},
"tagStyles": Object {
"backgroundColor": Object {
+ "blue": "#e3f2fd",
"gray": "#F1F2F3",
"green": "#D6F5E0",
"red": "#FAD1D1",
diff --git a/kafka-ui-react-app/src/components/common/Tag/Tag.styled.tsx b/kafka-ui-react-app/src/components/common/Tag/Tag.styled.tsx
index 14a8f48afdd..9ba447387e0 100644
--- a/kafka-ui-react-app/src/components/common/Tag/Tag.styled.tsx
+++ b/kafka-ui-react-app/src/components/common/Tag/Tag.styled.tsx
@@ -1,7 +1,7 @@
import styled from 'styled-components';
interface Props {
- color: 'green' | 'gray' | 'yellow' | 'red' | 'white';
+ color: 'green' | 'gray' | 'yellow' | 'red' | 'white' | 'blue';
}
export const Tag = styled.p<Props>`
diff --git a/kafka-ui-react-app/src/theme/theme.ts b/kafka-ui-react-app/src/theme/theme.ts
index 6737c6739ab..c1103242c10 100644
--- a/kafka-ui-react-app/src/theme/theme.ts
+++ b/kafka-ui-react-app/src/theme/theme.ts
@@ -34,6 +34,12 @@ export const Colors = {
yellow: {
'10': '#FFEECC',
},
+ blue: {
+ '10': '#e3f2fd',
+ '20': '#bbdefb',
+ '30': '#90caf9',
+ '40': '#64b5f6',
+ },
};
const theme = {
@@ -177,6 +183,7 @@ const theme = {
yellow: Colors.yellow[10],
white: Colors.neutral[10],
red: Colors.red[10],
+ blue: Colors.blue[10],
},
color: Colors.neutral[90],
},
| null | train | train | 2022-01-17T11:58:33 | "2021-12-08T04:04:36Z" | akamensky | train |
provectus/kafka-ui/1369_1370 | provectus/kafka-ui | provectus/kafka-ui/1369 | provectus/kafka-ui/1370 | [
"timestamp(timedelta=30.0, similarity=0.9999999999999998)",
"connected"
] | b5aa86cf4c4ac5a638a9b60d9fec3108f8864b2a | 08d7216718bbff4f7398bd0d14c637c9255169c6 | [] | [] | "2022-01-11T08:46:53Z" | [
"type/enhancement",
"scope/k8s"
] | Support for ingressClassName in helm chart | Currently the helm chart doesn't support specifying ingressClassName in the ingress object.
Need to support it. | [
"charts/kafka-ui/templates/ingress.yaml",
"charts/kafka-ui/values.yaml"
] | [
"charts/kafka-ui/templates/ingress.yaml",
"charts/kafka-ui/values.yaml"
] | [] | diff --git a/charts/kafka-ui/templates/ingress.yaml b/charts/kafka-ui/templates/ingress.yaml
index 6c823c0ab05..641cfd9b67e 100644
--- a/charts/kafka-ui/templates/ingress.yaml
+++ b/charts/kafka-ui/templates/ingress.yaml
@@ -24,6 +24,9 @@ spec:
- {{ .Values.ingress.host }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
+ {{- if .Values.ingress.ingressClassName }}
+ ingressClassName: {{ .Values.ingress.ingressClassName }}
+ {{- end }}
rules:
- http:
paths:
diff --git a/charts/kafka-ui/values.yaml b/charts/kafka-ui/values.yaml
index 50e54038500..3fc0754aff5 100644
--- a/charts/kafka-ui/values.yaml
+++ b/charts/kafka-ui/values.yaml
@@ -75,6 +75,9 @@ ingress:
# Annotations for the Ingress
annotations: {}
+ # ingressClassName for the Ingress
+ ingressClassName: ""
+
# The path for the Ingress
path: ""
| null | train | train | 2022-01-10T14:34:36 | "2022-01-11T08:33:39Z" | 5hin0bi | train |
provectus/kafka-ui/1375_1376 | provectus/kafka-ui | provectus/kafka-ui/1375 | provectus/kafka-ui/1376 | [
"timestamp(timedelta=0.0, similarity=0.9412043585622443)",
"connected"
] | 0326cf7c46a7e2da128367fdab1099aa8e0de338 | b6c876275f56f03b5be0adafd388451864fbf8d9 | [
"Hello there Hurenka! 👋\n\nThank you and congratulations 🎉 for opening your very first issue in this project! 💖\n\nIn case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀"
] | [
"Sonar added this line to code smells list. pls check if we can fix that"
] | "2022-01-12T10:24:50Z" | [
"scope/frontend",
"scope/QA",
"status/accepted"
] | Fix GlobalSchemaSelector tests | [
"kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/__tests__/__snapshots__/Filters.spec.tsx.snap",
"kafka-ui-react-app/src/components/common/Select/Select.tsx"
] | [
"kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx",
"kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/__test__/GlobalSchemaSelector.spec.tsx",
"kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/__tests__/__snapshots__/Filters.spec.tsx.snap",
"kafka-ui-react-app/src/components/common/Select/Select.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx b/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx
index 6447b2915c2..66edb6f0465 100644
--- a/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector.tsx
@@ -63,6 +63,9 @@ const GlobalSchemaSelector: React.FC = () => {
compatibilityLevel: { compatibility: nextCompatibilityLevel },
});
dispatch(fetchSchemas(clusterName));
+ setCurrentCompatibilityLevel(nextCompatibilityLevel);
+ setNextCompatibilityLevel(undefined);
+ setIsConfirmationVisible(false);
} catch (e) {
const err = await getResponse(e as Response);
dispatch(serverErrorAlertAdded(err));
@@ -78,7 +81,7 @@ const GlobalSchemaSelector: React.FC = () => {
<div>Global Compatibility Level: </div>
<Select
selectSize="M"
- defaultValue={currentCompatibilityLevel}
+ value={currentCompatibilityLevel}
onChange={handleChangeCompatibilityLevel}
disabled={isFetching || isUpdating || isConfirmationVisible}
>
diff --git a/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/__test__/GlobalSchemaSelector.spec.tsx b/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/__test__/GlobalSchemaSelector.spec.tsx
new file mode 100644
index 00000000000..0d8c6715c0f
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Schemas/List/GlobalSchemaSelector/__test__/GlobalSchemaSelector.spec.tsx
@@ -0,0 +1,90 @@
+import React from 'react';
+import { screen, waitFor } from '@testing-library/react';
+import { render } from 'lib/testHelpers';
+import { CompatibilityLevelCompatibilityEnum } from 'generated-sources';
+import GlobalSchemaSelector from 'components/Schemas/List/GlobalSchemaSelector/GlobalSchemaSelector';
+import userEvent from '@testing-library/user-event';
+import { clusterSchemasPath } from 'lib/paths';
+import { Route } from 'react-router';
+import fetchMock from 'fetch-mock';
+
+const clusterName = 'testClusterName';
+
+const selectForwardOption = () =>
+ userEvent.selectOptions(
+ screen.getByRole('listbox'),
+ CompatibilityLevelCompatibilityEnum.FORWARD
+ );
+
+const expectOptionIsSelected = (option: string) => {
+ const optionElement: HTMLOptionElement = screen.getByText(option);
+ expect(optionElement.selected).toBeTruthy();
+};
+
+describe('GlobalSchemaSelector', () => {
+ const renderComponent = () =>
+ render(
+ <Route path={clusterSchemasPath(':clusterName')}>
+ <GlobalSchemaSelector />
+ </Route>,
+ {
+ pathname: clusterSchemasPath(clusterName),
+ }
+ );
+
+ beforeEach(async () => {
+ const fetchGlobalCompatibilityLevelMock = fetchMock.getOnce(
+ `api/clusters/${clusterName}/schemas/compatibility`,
+ { compatibility: CompatibilityLevelCompatibilityEnum.FULL }
+ );
+ renderComponent();
+ await waitFor(() =>
+ expect(fetchGlobalCompatibilityLevelMock.called()).toBeTruthy()
+ );
+ });
+
+ afterEach(() => {
+ fetchMock.reset();
+ });
+
+ it('renders with initial prop', () => {
+ expectOptionIsSelected(CompatibilityLevelCompatibilityEnum.FULL);
+ });
+
+ it('shows popup when select value is changed', async () => {
+ expectOptionIsSelected(CompatibilityLevelCompatibilityEnum.FULL);
+ selectForwardOption();
+ expect(screen.getByText('Confirm the action')).toBeInTheDocument();
+ });
+
+ it('resets select value when cancel is clicked', () => {
+ selectForwardOption();
+ userEvent.click(screen.getByText('Cancel'));
+ expect(screen.queryByText('Confirm the action')).not.toBeInTheDocument();
+ expectOptionIsSelected(CompatibilityLevelCompatibilityEnum.FULL);
+ });
+
+ it('sets new schema when confirm is clicked', async () => {
+ selectForwardOption();
+ const putNewCompatibilityMock = fetchMock.putOnce(
+ `api/clusters/${clusterName}/schemas/compatibility`,
+ 200,
+ {
+ body: {
+ compatibility: CompatibilityLevelCompatibilityEnum.FORWARD,
+ },
+ }
+ );
+ const getSchemasMock = fetchMock.getOnce(
+ `api/clusters/${clusterName}/schemas`,
+ 200
+ );
+ await waitFor(() => {
+ userEvent.click(screen.getByText('Submit'));
+ });
+ await waitFor(() => expect(putNewCompatibilityMock.called()).toBeTruthy());
+ await waitFor(() => expect(getSchemasMock.called()).toBeTruthy());
+ expect(screen.queryByText('Confirm the action')).not.toBeInTheDocument();
+ expectOptionIsSelected(CompatibilityLevelCompatibilityEnum.FORWARD);
+ });
+});
diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/__tests__/__snapshots__/Filters.spec.tsx.snap b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/__tests__/__snapshots__/Filters.spec.tsx.snap
index 312b0ec70d8..97693628fb4 100644
--- a/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/__tests__/__snapshots__/Filters.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Topics/Topic/Details/Messages/Filters/__tests__/__snapshots__/Filters.spec.tsx.snap
@@ -464,6 +464,7 @@ exports[`Filters component matches the snapshot 1`] = `
<select
class="c7"
id="selectSeekType"
+ role="listbox"
>
<option
value="OFFSET"
@@ -562,6 +563,7 @@ exports[`Filters component matches the snapshot 1`] = `
>
<select
class="c11"
+ role="listbox"
>
<option
value="FORWARD"
@@ -1098,6 +1100,7 @@ exports[`Filters component when fetching matches the snapshot 1`] = `
<select
class="c7"
id="selectSeekType"
+ role="listbox"
>
<option
value="OFFSET"
@@ -1196,6 +1199,7 @@ exports[`Filters component when fetching matches the snapshot 1`] = `
>
<select
class="c11"
+ role="listbox"
>
<option
value="FORWARD"
diff --git a/kafka-ui-react-app/src/components/common/Select/Select.tsx b/kafka-ui-react-app/src/components/common/Select/Select.tsx
index e4ad1554827..8a8b07f13e7 100644
--- a/kafka-ui-react-app/src/components/common/Select/Select.tsx
+++ b/kafka-ui-react-app/src/components/common/Select/Select.tsx
@@ -38,7 +38,12 @@ const Select: React.FC<SelectProps> = ({
{children}
</S.Select>
) : (
- <S.Select selectSize={selectSize} isLive={isLive} {...props}>
+ <S.Select
+ role="listbox"
+ selectSize={selectSize}
+ isLive={isLive}
+ {...props}
+ >
{children}
</S.Select>
)}
| null | train | train | 2022-01-12T10:14:26 | "2022-01-12T10:17:46Z" | Hurenka | train |
|
provectus/kafka-ui/1139_1378 | provectus/kafka-ui | provectus/kafka-ui/1139 | provectus/kafka-ui/1378 | [
"connected"
] | 6d6bce65aa06a9218f5d5c0ce85df330c796135c | 0a7d64de7861e43f0289f01cd23281d89c4d9c3f | [
"Confirmed that replacing spaces in names with underscores \"resolves\" this issue.\r\n\r\nI think this either need to be documented or fixed by using URL encoding everywhere. Or using original name for display and replacing all non-supported characters with underscore for URLs.",
"Hmm, that's pretty unnatural to put spaces there since these are just environment variables."
] | [] | "2022-01-12T20:09:32Z" | [
"type/bug",
"good first issue",
"scope/backend",
"status/accepted"
] | Bad handling of cluster names | **Describe the bug**
We have switched to using user-friendly cluster names in `KAFKA_CLUSTERS_NNN_NAME`. Such that those names are human readable with spaces. After we changed to those names, no operations can be done on cluster (can still browse around tho).
**Set up**
Docker latest master tag
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Use user friendly names such as `KAFKA_CLUSTERS_0_NAME: "Cluster name"`
**Expected behavior**
Everything works as expected.
**Actual behavior**
Error message in UI (i truncated screenshot to not expose private information):

And errors in the logs:
```
02:47:19.596 [parallel-3] WARN com.provectus.kafka.ui.exception.ErrorCode - Multiple class com.provectus.kafka.ui.exception.ErrorCode values refer to code 4001
02:47:19.614 [parallel-3] DEBUG org.springframework.http.codec.json.Jackson2JsonEncoder - [1296e0bd-3] Encoding [class ErrorResponseDTO {<LF> code: 4007<LF> message: No cluster for name 'Cluster%20name'<LF> timestam (truncated)...]
0
```
Meantime browsing all topics, data etc works just fine.
| [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java
index b998748fccb..41b6cc408f5 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java
@@ -3,6 +3,8 @@
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.ReadOnlyModeException;
import com.provectus.kafka.ui.service.ClustersStorage;
+import java.net.URLDecoder;
+import java.nio.charset.StandardCharsets;
import java.util.regex.Pattern;
import lombok.RequiredArgsConstructor;
import org.jetbrains.annotations.NotNull;
@@ -32,7 +34,8 @@ public Mono<Void> filter(ServerWebExchange exchange, @NotNull WebFilterChain cha
}
var path = exchange.getRequest().getPath().pathWithinApplication().value();
- var matcher = CLUSTER_NAME_REGEX.matcher(path);
+ var decodedPath = URLDecoder.decode(path, StandardCharsets.UTF_8);
+ var matcher = CLUSTER_NAME_REGEX.matcher(decodedPath);
if (!matcher.find()) {
return chain.filter(exchange);
}
| null | train | train | 2022-01-12T16:27:08 | "2021-11-30T02:54:03Z" | akamensky | train |
provectus/kafka-ui/1217_1430 | provectus/kafka-ui | provectus/kafka-ui/1217 | provectus/kafka-ui/1430 | [
"connected"
] | 86394034486ee20f50a91bb52bb792695be7ab07 | 2b79fee1e4b2574bf34dee0a2c66df01bfc04b96 | [] | [
"We have either to remove `aria-label=\"form\"` or change `aria-label` to something meaningful \r\nIn your case its something like `Connector config edit` (I'm not sure on this text)\r\n\r\nReasoning: Aria label has to be meaningful and unique \r\n[MDN role form](https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/Roles/form_role)\r\n[MDN aria-label](https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/Attributes/aria-label)",
"I thought we are getting rid of snapshots\r\n@workshur can clarify this",
"```js\r\nawait waitFor(() => fireEvent.submit(screen.getByRole('form')));\r\n```\r\nIs use of `fireEvent.submit` - desired approach?\r\n\r\nI thought about case where someone deletes `Submit` button and this test won't fail\r\n\r\nI'm not sure about this\r\n```js\r\nuserEvent.click(screen.getByText('Submit'));\r\n```",
"got a lot of problems with submitting react hook form because of connector\r\nthis is the only approach which works"
] | "2022-01-19T16:19:01Z" | [
"type/bug",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Space sensitive syntax for Create Connector | **Describe the bug**
If there is a space before a code of connector the button Submit is inactive and the warning 'config is not JSON object' is displayed. If you delete the space the code is acceptable. This behavior is confusing because it`s not obvious that the problem is just in space (for instance for Schema it works well with space before the code)
**Set up**
(How do you run the app?
Which version of the app are you running? Provide either docker image version or check commit hash at the top left corner. We won't be able to help you without this information.)
http://redesign.internal.kafka-ui.provectus.io/
**Steps to Reproduce**
Steps to reproduce the behavior:
1. Navigate to item menu Kafka Connect
2. Click the button Create connector
3. Input Name and Config with a space before code
**Expected behavior**
A connector is created regardless of the space in syntax
**Screenshots**
With space

Without space (the same code)

**Additional context**
(Add any other context about the problem here)
The same bug in the old and redesigned versions https://www.kafka-ui.provectus.io/ui http://redesign.internal.kafka-ui.provectus.io/ | [
"kafka-ui-react-app/src/components/Connect/Edit/Edit.tsx",
"kafka-ui-react-app/src/components/Connect/Edit/__tests__/Edit.spec.tsx",
"kafka-ui-react-app/src/components/Connect/Edit/__tests__/__snapshots__/Edit.spec.tsx.snap",
"kafka-ui-react-app/src/components/Connect/New/New.tsx",
"kafka-ui-react-app/src/components/Connect/New/__tests__/New.spec.tsx",
"kafka-ui-react-app/src/components/Connect/New/__tests__/__snapshots__/New.spec.tsx.snap",
"kafka-ui-react-app/src/lib/yupExtended.ts"
] | [
"kafka-ui-react-app/src/components/Connect/Edit/Edit.tsx",
"kafka-ui-react-app/src/components/Connect/Edit/__tests__/Edit.spec.tsx",
"kafka-ui-react-app/src/components/Connect/New/New.styled.ts",
"kafka-ui-react-app/src/components/Connect/New/New.tsx",
"kafka-ui-react-app/src/components/Connect/New/__tests__/New.spec.tsx",
"kafka-ui-react-app/src/lib/yupExtended.ts"
] | [] | diff --git a/kafka-ui-react-app/src/components/Connect/Edit/Edit.tsx b/kafka-ui-react-app/src/components/Connect/Edit/Edit.tsx
index 3e8cb1dce97..baf933cfdef 100644
--- a/kafka-ui-react-app/src/components/Connect/Edit/Edit.tsx
+++ b/kafka-ui-react-app/src/components/Connect/Edit/Edit.tsx
@@ -88,7 +88,7 @@ const Edit: React.FC<EditProps> = ({
clusterName,
connectName,
connectorName,
- JSON.parse(values.config)
+ JSON.parse(values.config.trim())
);
if (connector) {
history.push(
@@ -116,7 +116,7 @@ const Edit: React.FC<EditProps> = ({
accidentally breaking your connector config!
</ConnectEditWarningMessageStyled>
)}
- <form onSubmit={handleSubmit(onSubmit)}>
+ <form onSubmit={handleSubmit(onSubmit)} aria-label="Edit connect form">
<div>
<Controller
control={control}
diff --git a/kafka-ui-react-app/src/components/Connect/Edit/__tests__/Edit.spec.tsx b/kafka-ui-react-app/src/components/Connect/Edit/__tests__/Edit.spec.tsx
index 44e4268fe4e..54c45619c67 100644
--- a/kafka-ui-react-app/src/components/Connect/Edit/__tests__/Edit.spec.tsx
+++ b/kafka-ui-react-app/src/components/Connect/Edit/__tests__/Edit.spec.tsx
@@ -1,8 +1,5 @@
import React from 'react';
-import { create } from 'react-test-renderer';
-import { mount } from 'enzyme';
-import { act } from 'react-dom/test-utils';
-import { containerRendersView, TestRouterWrapper } from 'lib/testHelpers';
+import { containerRendersView, render } from 'lib/testHelpers';
import {
clusterConnectConnectorConfigPath,
clusterConnectConnectorEditPath,
@@ -10,8 +7,9 @@ import {
import EditContainer from 'components/Connect/Edit/EditContainer';
import Edit, { EditProps } from 'components/Connect/Edit/Edit';
import { connector } from 'redux/reducers/connect/__test__/fixtures';
-import { ThemeProvider } from 'styled-components';
-import theme from 'theme/theme';
+import { Route } from 'react-router';
+import { waitFor } from '@testing-library/dom';
+import { fireEvent, screen } from '@testing-library/react';
jest.mock('components/common/PageLoader/PageLoader', () => 'mock-PageLoader');
@@ -38,12 +36,9 @@ describe('Edit', () => {
const connectName = 'my-connect';
const connectorName = 'my-connector';
- const setupWrapper = (props: Partial<EditProps> = {}) => (
- <ThemeProvider theme={theme}>
- <TestRouterWrapper
- pathname={pathname}
- urlParams={{ clusterName, connectName, connectorName }}
- >
+ const renderComponent = (props: Partial<EditProps> = {}) =>
+ render(
+ <Route path={pathname}>
<Edit
fetchConfig={jest.fn()}
isConfigFetching={false}
@@ -51,30 +46,19 @@ describe('Edit', () => {
updateConfig={jest.fn()}
{...props}
/>
- </TestRouterWrapper>
- </ThemeProvider>
- );
-
- it('matches snapshot', () => {
- const wrapper = create(setupWrapper());
- expect(wrapper.toJSON()).toMatchSnapshot();
- });
-
- it('matches snapshot when fetching config', () => {
- const wrapper = create(setupWrapper({ isConfigFetching: true }));
- expect(wrapper.toJSON()).toMatchSnapshot();
- });
-
- it('matches snapshot when config has credentials', () => {
- const wrapper = create(
- setupWrapper({ config: { ...connector.config, password: '******' } })
+ </Route>,
+ {
+ pathname: clusterConnectConnectorEditPath(
+ clusterName,
+ connectName,
+ connectorName
+ ),
+ }
);
- expect(wrapper.toJSON()).toMatchSnapshot();
- });
it('fetches config on mount', () => {
const fetchConfig = jest.fn();
- mount(setupWrapper({ fetchConfig }));
+ renderComponent({ fetchConfig });
expect(fetchConfig).toHaveBeenCalledTimes(1);
expect(fetchConfig).toHaveBeenCalledWith(
clusterName,
@@ -85,10 +69,8 @@ describe('Edit', () => {
it('calls updateConfig on form submit', async () => {
const updateConfig = jest.fn();
- const wrapper = mount(setupWrapper({ updateConfig }));
- await act(async () => {
- wrapper.find('form').simulate('submit');
- });
+ renderComponent({ updateConfig });
+ await waitFor(() => fireEvent.submit(screen.getByRole('form')));
expect(updateConfig).toHaveBeenCalledTimes(1);
expect(updateConfig).toHaveBeenCalledWith(
clusterName,
@@ -100,10 +82,8 @@ describe('Edit', () => {
it('redirects to connector config view on successful submit', async () => {
const updateConfig = jest.fn().mockResolvedValueOnce(connector);
- const wrapper = mount(setupWrapper({ updateConfig }));
- await act(async () => {
- wrapper.find('form').simulate('submit');
- });
+ renderComponent({ updateConfig });
+ await waitFor(() => fireEvent.submit(screen.getByRole('form')));
expect(mockHistoryPush).toHaveBeenCalledTimes(1);
expect(mockHistoryPush).toHaveBeenCalledWith(
clusterConnectConnectorConfigPath(
@@ -116,10 +96,8 @@ describe('Edit', () => {
it('does not redirect to connector config view on unsuccessful submit', async () => {
const updateConfig = jest.fn().mockResolvedValueOnce(undefined);
- const wrapper = mount(setupWrapper({ updateConfig }));
- await act(async () => {
- wrapper.find('form').simulate('submit');
- });
+ renderComponent({ updateConfig });
+ await waitFor(() => fireEvent.submit(screen.getByRole('form')));
expect(mockHistoryPush).not.toHaveBeenCalled();
});
});
diff --git a/kafka-ui-react-app/src/components/Connect/Edit/__tests__/__snapshots__/Edit.spec.tsx.snap b/kafka-ui-react-app/src/components/Connect/Edit/__tests__/__snapshots__/Edit.spec.tsx.snap
deleted file mode 100644
index f8dd0c4fb6b..00000000000
--- a/kafka-ui-react-app/src/components/Connect/Edit/__tests__/__snapshots__/Edit.spec.tsx.snap
+++ /dev/null
@@ -1,209 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`Edit view matches snapshot 1`] = `
-.c1 {
- display: -webkit-box;
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- -webkit-flex-direction: row;
- -ms-flex-direction: row;
- flex-direction: row;
- -webkit-align-items: center;
- -webkit-box-align: center;
- -ms-flex-align: center;
- align-items: center;
- -webkit-box-pack: center;
- -webkit-justify-content: center;
- -ms-flex-pack: center;
- justify-content: center;
- padding: 0px 12px;
- border: none;
- border-radius: 4px;
- white-space: nowrap;
- background: #4F4FFF;
- color: #FFFFFF;
- font-size: 14px;
- font-weight: 500;
- height: 32px;
-}
-
-.c1:hover:enabled {
- background: #1717CF;
- color: #FFFFFF;
- cursor: pointer;
-}
-
-.c1:active:enabled {
- background: #1414B8;
- color: #FFFFFF;
-}
-
-.c1:disabled {
- opacity: 0.5;
- cursor: not-allowed;
-}
-
-.c1 a {
- color: white;
-}
-
-.c1 i {
- margin-right: 7px;
-}
-
-.c0 {
- margin: 16px;
-}
-
-.c0 form > *:last-child {
- margin-top: 16px;
-}
-
-<div
- className="c0"
->
- <form
- onSubmit={[Function]}
- >
- <div>
- <mock-Editor
- name="config"
- onBlur={[Function]}
- onChange={[Function]}
- readOnly={false}
- value="{
- \\"connector.class\\": \\"FileStreamSource\\",
- \\"tasks.max\\": \\"10\\",
- \\"topic\\": \\"test-topic\\",
- \\"file\\": \\"/some/file\\"
-}"
- />
- </div>
- <div />
- <button
- className="c1"
- disabled={true}
- type="submit"
- >
- Submit
- </button>
- </form>
-</div>
-`;
-
-exports[`Edit view matches snapshot when config has credentials 1`] = `
-.c2 {
- display: -webkit-box;
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- -webkit-flex-direction: row;
- -ms-flex-direction: row;
- flex-direction: row;
- -webkit-align-items: center;
- -webkit-box-align: center;
- -ms-flex-align: center;
- align-items: center;
- -webkit-box-pack: center;
- -webkit-justify-content: center;
- -ms-flex-pack: center;
- justify-content: center;
- padding: 0px 12px;
- border: none;
- border-radius: 4px;
- white-space: nowrap;
- background: #4F4FFF;
- color: #FFFFFF;
- font-size: 14px;
- font-weight: 500;
- height: 32px;
-}
-
-.c2:hover:enabled {
- background: #1717CF;
- color: #FFFFFF;
- cursor: pointer;
-}
-
-.c2:active:enabled {
- background: #1414B8;
- color: #FFFFFF;
-}
-
-.c2:disabled {
- opacity: 0.5;
- cursor: not-allowed;
-}
-
-.c2 a {
- color: white;
-}
-
-.c2 i {
- margin-right: 7px;
-}
-
-.c0 {
- margin: 16px;
-}
-
-.c0 form > *:last-child {
- margin-top: 16px;
-}
-
-.c1 {
- height: 48px;
- display: -webkit-box;
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- -webkit-align-items: center;
- -webkit-box-align: center;
- -ms-flex-align: center;
- align-items: center;
- background-color: #FFEECC;
- border-radius: 8px;
- padding: 8px;
- margin-bottom: 16px;
-}
-
-<div
- className="c0"
->
- <div
- className="c1"
- >
- Please replace ****** with the real credential values to avoid accidentally breaking your connector config!
- </div>
- <form
- onSubmit={[Function]}
- >
- <div>
- <mock-Editor
- name="config"
- onBlur={[Function]}
- onChange={[Function]}
- readOnly={false}
- value="{
- \\"connector.class\\": \\"FileStreamSource\\",
- \\"tasks.max\\": \\"10\\",
- \\"topic\\": \\"test-topic\\",
- \\"file\\": \\"/some/file\\",
- \\"password\\": \\"******\\"
-}"
- />
- </div>
- <div />
- <button
- className="c2"
- disabled={true}
- type="submit"
- >
- Submit
- </button>
- </form>
-</div>
-`;
-
-exports[`Edit view matches snapshot when fetching config 1`] = `<mock-PageLoader />`;
diff --git a/kafka-ui-react-app/src/components/Connect/New/New.styled.ts b/kafka-ui-react-app/src/components/Connect/New/New.styled.ts
new file mode 100644
index 00000000000..c24eff561a3
--- /dev/null
+++ b/kafka-ui-react-app/src/components/Connect/New/New.styled.ts
@@ -0,0 +1,12 @@
+import styled from 'styled-components';
+
+export const NewConnectFormStyled = styled.form`
+ padding: 0 16px 16px;
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+
+ & > button:last-child {
+ align-self: flex-start;
+ }
+`;
diff --git a/kafka-ui-react-app/src/components/Connect/New/New.tsx b/kafka-ui-react-app/src/components/Connect/New/New.tsx
index 205acb50013..0e25e934cbc 100644
--- a/kafka-ui-react-app/src/components/Connect/New/New.tsx
+++ b/kafka-ui-react-app/src/components/Connect/New/New.tsx
@@ -14,9 +14,10 @@ import Select from 'components/common/Select/Select';
import { FormError } from 'components/common/Input/Input.styled';
import Input from 'components/common/Input/Input';
import { Button } from 'components/common/Button/Button';
-import styled from 'styled-components';
import PageHeading from 'components/common/PageHeading/PageHeading';
+import * as S from './New.styled';
+
const validationSchema = yup.object().shape({
name: yup.string().required(),
config: yup.string().required().isJsonObject(),
@@ -26,17 +27,6 @@ interface RouterParams {
clusterName: ClusterName;
}
-const NewConnectFormStyled = styled.form`
- padding: 16px;
- padding-top: 0;
- display: flex;
- flex-direction: column;
- gap: 16px;
- & > button:last-child {
- align-self: flex-start;
- }
-`;
-
export interface NewProps {
fetchConnects(clusterName: ClusterName): void;
areConnectsFetching: boolean;
@@ -99,7 +89,7 @@ const New: React.FC<NewProps> = ({
async (values: FormValues) => {
const connector = await createConnector(clusterName, values.connectName, {
name: values.name,
- config: JSON.parse(values.config),
+ config: JSON.parse(values.config.trim()),
});
if (connector) {
history.push(
@@ -125,7 +115,10 @@ const New: React.FC<NewProps> = ({
return (
<FormProvider {...methods}>
<PageHeading text="Create new connector" />
- <NewConnectFormStyled onSubmit={handleSubmit(onSubmit)}>
+ <S.NewConnectFormStyled
+ onSubmit={handleSubmit(onSubmit)}
+ aria-label="Create connect form"
+ >
<div className={['field', connectNameFieldClassName].join(' ')}>
<InputLabel>Connect *</InputLabel>
<Select selectSize="M" name="connectName" disabled={isSubmitting}>
@@ -175,7 +168,7 @@ const New: React.FC<NewProps> = ({
>
Submit
</Button>
- </NewConnectFormStyled>
+ </S.NewConnectFormStyled>
</FormProvider>
);
};
diff --git a/kafka-ui-react-app/src/components/Connect/New/__tests__/New.spec.tsx b/kafka-ui-react-app/src/components/Connect/New/__tests__/New.spec.tsx
index aa9b73d440a..f333197ae92 100644
--- a/kafka-ui-react-app/src/components/Connect/New/__tests__/New.spec.tsx
+++ b/kafka-ui-react-app/src/components/Connect/New/__tests__/New.spec.tsx
@@ -1,8 +1,5 @@
import React from 'react';
-import { create, act as rendererAct } from 'react-test-renderer';
-import { mount, ReactWrapper } from 'enzyme';
-import { act } from 'react-dom/test-utils';
-import { containerRendersView, TestRouterWrapper } from 'lib/testHelpers';
+import { containerRendersView, render } from 'lib/testHelpers';
import {
clusterConnectConnectorPath,
clusterConnectorNewPath,
@@ -10,12 +7,19 @@ import {
import NewContainer from 'components/Connect/New/NewContainer';
import New, { NewProps } from 'components/Connect/New/New';
import { connects, connector } from 'redux/reducers/connect/__test__/fixtures';
-import { ThemeProvider } from 'styled-components';
-import theme from 'theme/theme';
+import { Route } from 'react-router';
+import { act, fireEvent, screen } from '@testing-library/react';
+import userEvent from '@testing-library/user-event';
+import { waitFor } from '@testing-library/dom';
+import { ControllerRenderProps } from 'react-hook-form';
jest.mock('components/common/PageLoader/PageLoader', () => 'mock-PageLoader');
-
-jest.mock('components/common/Editor/Editor', () => 'mock-Editor');
+jest.mock(
+ 'components/common/Editor/Editor',
+ () => (props: ControllerRenderProps) => {
+ return <textarea {...props} placeholder="json" />;
+ }
+);
const mockHistoryPush = jest.fn();
jest.mock('react-router-dom', () => ({
@@ -29,22 +33,27 @@ describe('New', () => {
containerRendersView(<NewContainer />, New);
describe('view', () => {
- const pathname = clusterConnectorNewPath(':clusterName');
const clusterName = 'my-cluster';
- const simulateFormSubmit = (wrapper: ReactWrapper) =>
- act(async () => {
- wrapper.find('input[name="name"]').simulate('change', {
- target: { name: 'name', value: 'my-connector' },
- });
- wrapper
- .find('mock-Editor')
- .simulate('change', { target: { value: '{"class":"MyClass"}' } });
- wrapper.find('button[type="submit"]').simulate('submit');
+ const simulateFormSubmit = async () => {
+ userEvent.type(
+ screen.getByPlaceholderText('Connector Name'),
+ 'my-connector'
+ );
+ userEvent.type(
+ screen.getByPlaceholderText('json'),
+ '{"class":"MyClass"}'.replace(/[{[]/g, '$&$&')
+ );
+ expect(screen.getByPlaceholderText('json')).toHaveValue(
+ '{"class":"MyClass"}'
+ );
+ await waitFor(() => {
+ fireEvent.submit(screen.getByRole('form'));
});
+ };
- const setupWrapper = (props: Partial<NewProps> = {}) => (
- <ThemeProvider theme={theme}>
- <TestRouterWrapper pathname={pathname} urlParams={{ clusterName }}>
+ const renderComponent = (props: Partial<NewProps> = {}) =>
+ render(
+ <Route path={clusterConnectorNewPath(':clusterName')}>
<New
fetchConnects={jest.fn()}
areConnectsFetching={false}
@@ -52,30 +61,14 @@ describe('New', () => {
createConnector={jest.fn()}
{...props}
/>
- </TestRouterWrapper>
- </ThemeProvider>
- );
-
- it('matches snapshot', async () => {
- let wrapper = create(<div />);
- await rendererAct(async () => {
- wrapper = create(setupWrapper());
- });
- expect(wrapper.toJSON()).toMatchSnapshot();
- });
-
- it('matches snapshot when fetching connects', async () => {
- let wrapper = create(<div />);
- await rendererAct(async () => {
- wrapper = create(setupWrapper({ areConnectsFetching: true }));
- });
- expect(wrapper.toJSON()).toMatchSnapshot();
- });
+ </Route>,
+ { pathname: clusterConnectorNewPath(clusterName) }
+ );
it('fetches connects on mount', async () => {
const fetchConnects = jest.fn();
await act(async () => {
- mount(setupWrapper({ fetchConnects }));
+ renderComponent({ fetchConnects });
});
expect(fetchConnects).toHaveBeenCalledTimes(1);
expect(fetchConnects).toHaveBeenCalledWith(clusterName);
@@ -83,8 +76,8 @@ describe('New', () => {
it('calls createConnector on form submit', async () => {
const createConnector = jest.fn();
- const wrapper = mount(setupWrapper({ createConnector }));
- await simulateFormSubmit(wrapper);
+ renderComponent({ createConnector });
+ await simulateFormSubmit();
expect(createConnector).toHaveBeenCalledTimes(1);
expect(createConnector).toHaveBeenCalledWith(
clusterName,
@@ -98,8 +91,8 @@ describe('New', () => {
it('redirects to connector details view on successful submit', async () => {
const createConnector = jest.fn().mockResolvedValue(connector);
- const wrapper = mount(setupWrapper({ createConnector }));
- await simulateFormSubmit(wrapper);
+ renderComponent({ createConnector });
+ await simulateFormSubmit();
expect(mockHistoryPush).toHaveBeenCalledTimes(1);
expect(mockHistoryPush).toHaveBeenCalledWith(
clusterConnectConnectorPath(
@@ -112,8 +105,8 @@ describe('New', () => {
it('does not redirect to connector details view on unsuccessful submit', async () => {
const createConnector = jest.fn().mockResolvedValueOnce(undefined);
- const wrapper = mount(setupWrapper({ createConnector }));
- await simulateFormSubmit(wrapper);
+ renderComponent({ createConnector });
+ await simulateFormSubmit();
expect(mockHistoryPush).not.toHaveBeenCalled();
});
});
diff --git a/kafka-ui-react-app/src/components/Connect/New/__tests__/__snapshots__/New.spec.tsx.snap b/kafka-ui-react-app/src/components/Connect/New/__tests__/__snapshots__/New.spec.tsx.snap
deleted file mode 100644
index 6030c7d78e2..00000000000
--- a/kafka-ui-react-app/src/components/Connect/New/__tests__/__snapshots__/New.spec.tsx.snap
+++ /dev/null
@@ -1,336 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`New view matches snapshot 1`] = `
-Array [
- .c0 {
- height: 56px;
- display: -webkit-box;
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- -webkit-box-pack: justify;
- -webkit-justify-content: space-between;
- -ms-flex-pack: justify;
- justify-content: space-between;
- -webkit-align-items: center;
- -webkit-box-align: center;
- -ms-flex-align: center;
- align-items: center;
- padding: 0px 16px;
-}
-
-.c0 h1 {
- font-size: 24px;
- font-weight: 500;
- line-height: 32px;
- color: #000;
-}
-
-.c0 > div {
- display: -webkit-box;
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- gap: 16px;
-}
-
-<div
- className="c0"
- >
- <h1>
- Create new connector
- </h1>
- <div />
- </div>,
- .c1 {
- font-weight: 500;
- font-size: 12px;
- line-height: 20px;
- color: #454F54;
-}
-
-.c3 {
- height: 32px;
- border: 1px #ABB5BA solid;
- border-radius: 4px;
- font-size: 14px;
- width: 100%;
- padding-left: 12px;
- padding-right: 16px;
- color: #171A1C;
- min-width: auto;
- background-image: url('data:image/svg+xml,%3Csvg width="10" height="6" viewBox="0 0 10 6" fill="none" xmlns="http://www.w3.org/2000/svg"%3E%3Cpath d="M1 1L5 5L9 1" stroke="%23454F54"/%3E%3C/svg%3E%0A') !important;
- background-repeat: no-repeat !important;
- background-position-x: calc(100% - 8px) !important;
- background-position-y: 55% !important;
- -webkit-appearance: none !important;
- -moz-appearance: none !important;
- appearance: none !important;
-}
-
-.c3:hover {
- color: #171A1C;
- border-color: #73848C;
-}
-
-.c3:focus {
- outline: none;
- color: #171A1C;
- border-color: #454F54;
-}
-
-.c3:disabled {
- color: #ABB5BA;
- border-color: #E3E6E8;
- cursor: not-allowed;
-}
-
-.c2 {
- position: relative;
-}
-
-.c6 {
- border: 1px #ABB5BA solid;
- border-radius: 4px;
- height: 32px;
- width: 100%;
- padding-left: 12px;
- font-size: 14px;
-}
-
-.c6::-webkit-input-placeholder {
- color: #ABB5BA;
- font-size: 14px;
-}
-
-.c6::-moz-placeholder {
- color: #ABB5BA;
- font-size: 14px;
-}
-
-.c6:-ms-input-placeholder {
- color: #ABB5BA;
- font-size: 14px;
-}
-
-.c6::placeholder {
- color: #ABB5BA;
- font-size: 14px;
-}
-
-.c6:hover {
- border-color: #73848C;
-}
-
-.c6:focus {
- outline: none;
- border-color: #454F54;
-}
-
-.c6:focus::-webkit-input-placeholder {
- color: transparent;
-}
-
-.c6:focus::-moz-placeholder {
- color: transparent;
-}
-
-.c6:focus:-ms-input-placeholder {
- color: transparent;
-}
-
-.c6:focus::placeholder {
- color: transparent;
-}
-
-.c6:disabled {
- color: #ABB5BA;
- border-color: #E3E6E8;
- cursor: not-allowed;
-}
-
-.c6:read-only {
- color: #171A1C;
- border: none;
- background-color: #F1F2F3;
- cursor: not-allowed;
-}
-
-.c6:-moz-read-only:focus::placeholder {
- color: #ABB5BA;
-}
-
-.c6:read-only:focus::placeholder {
- color: #ABB5BA;
-}
-
-.c4 {
- color: #E51A1A;
- font-size: 12px;
-}
-
-.c5 {
- position: relative;
-}
-
-.c7 {
- display: -webkit-box;
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- -webkit-flex-direction: row;
- -ms-flex-direction: row;
- flex-direction: row;
- -webkit-align-items: center;
- -webkit-box-align: center;
- -ms-flex-align: center;
- align-items: center;
- -webkit-box-pack: center;
- -webkit-justify-content: center;
- -ms-flex-pack: center;
- justify-content: center;
- padding: 0px 12px;
- border: none;
- border-radius: 4px;
- white-space: nowrap;
- background: #4F4FFF;
- color: #FFFFFF;
- font-size: 14px;
- font-weight: 500;
- height: 32px;
-}
-
-.c7:hover:enabled {
- background: #1717CF;
- color: #FFFFFF;
- cursor: pointer;
-}
-
-.c7:active:enabled {
- background: #1414B8;
- color: #FFFFFF;
-}
-
-.c7:disabled {
- opacity: 0.5;
- cursor: not-allowed;
-}
-
-.c7 a {
- color: white;
-}
-
-.c7 i {
- margin-right: 7px;
-}
-
-.c0 {
- padding: 16px;
- padding-top: 0;
- display: -webkit-box;
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- -webkit-flex-direction: column;
- -ms-flex-direction: column;
- flex-direction: column;
- gap: 16px;
-}
-
-.c0 > button:last-child {
- -webkit-align-self: flex-start;
- -ms-flex-item-align: start;
- align-self: flex-start;
-}
-
-<form
- className="c0"
- onSubmit={[Function]}
- >
- <div
- className="field "
- >
- <label
- className="c1"
- >
- Connect *
- </label>
- <div
- className="select-wrapper c2"
- >
- <select
- className="c3"
- disabled={false}
- name="connectName"
- onBlur={[Function]}
- onChange={[Function]}
- role="listbox"
- >
- <option
- value="first"
- >
- first
- </option>
- <option
- value="second"
- >
- second
- </option>
- </select>
- </div>
- <p
- className="c4"
- />
- </div>
- <div>
- <label
- className="c1"
- >
- Name *
- </label>
- <div
- className="c5"
- >
- <input
- autoComplete="off"
- className="c6 c5"
- disabled={false}
- name="name"
- onBlur={[Function]}
- onChange={[Function]}
- placeholder="Connector Name"
- />
- </div>
- <p
- className="c4"
- />
- </div>
- <div>
- <label
- className="c1"
- >
- Config *
- </label>
- <mock-Editor
- name="config"
- onBlur={[Function]}
- onChange={[Function]}
- readOnly={false}
- value=""
- />
- <p
- className="c4"
- />
- </div>
- <button
- className="c7"
- disabled={true}
- type="submit"
- >
- Submit
- </button>
- </form>,
-]
-`;
-
-exports[`New view matches snapshot when fetching connects 1`] = `<mock-PageLoader />`;
diff --git a/kafka-ui-react-app/src/lib/yupExtended.ts b/kafka-ui-react-app/src/lib/yupExtended.ts
index fab2a290018..91f88c3c279 100644
--- a/kafka-ui-react-app/src/lib/yupExtended.ts
+++ b/kafka-ui-react-app/src/lib/yupExtended.ts
@@ -16,11 +16,13 @@ declare module 'yup' {
export const isValidJsonObject = (value?: string) => {
try {
if (!value) return false;
+
+ const trimmedValue = value.trim();
if (
- value.indexOf('{') === 0 &&
- value.lastIndexOf('}') === value.length - 1
+ trimmedValue.indexOf('{') === 0 &&
+ trimmedValue.lastIndexOf('}') === trimmedValue.length - 1
) {
- JSON.parse(value);
+ JSON.parse(trimmedValue);
return true;
}
} catch {
| null | train | train | 2022-01-20T10:39:23 | "2021-12-09T15:42:49Z" | agolosen | train |
provectus/kafka-ui/1444_1445 | provectus/kafka-ui | provectus/kafka-ui/1444 | provectus/kafka-ui/1445 | [
"connected"
] | 79442a7e821bb0d789995cb0ca74a63a39e5923e | f85a340b79af8a21b8a903808f50c2e01d00d453 | [
"@5hin0bi we currently only restart **connector instance**, not tasks on this button click. I will change this to restart both connector and all its tasks. I think we will also need to rename this button to smth like \"Restart connector\". \r\ncc @Haarolean ",
"@5hin0bi can you please verify it works, and reopen issue if not",
"Checked on our dev environment. The button is now called \"Restart Connector\", but failed tasks themselves are not restarted.",
"To do: create 3 buttons",
"Need to implement frontend",
"I'm seeing a similar error with KafkaConnect where it's trying to hit an api endpoint:\r\n```500 Server Error for HTTP GET \"/api/clusters/local/connectors?search=\"```\r\nthat also doesn't seem to exist, per Confluent's API. ",
"> I'm seeing a similar error with KafkaConnect where it's trying to hit an api endpoint: `500 Server Error for HTTP GET \"/api/clusters/local/connectors?search=\"` that also doesn't seem to exist, per Confluent's API.\r\n\r\nThat's our backend with our API, not confluent's. Please raise a new issue."
] | [] | "2022-01-21T09:47:58Z" | [
"type/enhancement",
"scope/backend",
"scope/frontend",
"status/accepted",
"status/confirmed"
] | Connector "Restart All Tasks" button has no effect | When choosing a specific connector there are several options you can choose:
<img width="1083" alt="image" src="https://user-images.githubusercontent.com/94184844/150494050-67a4692f-2133-4c78-94e3-c74857b65057.png">
Restart all tasks sends the next POST request that actually does do nothing.
https://xxx/api/clusters/local/connects/local/connectors/reddit-source-json/action/RESTART
According to the REST API documentation https://docs.confluent.io/platform/current/connect/references/restapi.html there is no such request available. And the correct one is this:
<img width="904" alt="image" src="https://user-images.githubusercontent.com/94184844/150494299-2cffab5d-abb7-4e05-af35-f3af1a57a050.png">
Restarting the specific task using the triple dot context menu works great tho. | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml",
"kafka-ui-react-app/src/components/Connect/Details/Actions/Actions.tsx",
"kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/Actions.spec.tsx",
"kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/__snapshots__/Actions.spec.tsx.snap"
] | [
"kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java",
"kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml",
"kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml",
"kafka-ui-react-app/src/components/Connect/Details/Actions/Actions.tsx",
"kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/Actions.spec.tsx",
"kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/__snapshots__/Actions.spec.tsx.snap"
] | [] | diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
index 74730e29f68..2d82a54217b 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
@@ -266,7 +266,8 @@ public Mono<Void> updateConnectorState(KafkaCluster cluster, String connectName,
switch (action) {
case RESTART:
kafkaClientCall =
- connect -> KafkaConnectClients.withBaseUrl(connect).restartConnector(connectorName);
+ connect -> KafkaConnectClients.withBaseUrl(connect)
+ .restartConnector(connectorName, true, false);
break;
case PAUSE:
kafkaClientCall =
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
index 14c996ee545..bd96bf0c463 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-connect-api.yaml
@@ -169,7 +169,7 @@ paths:
post:
tags:
- KafkaConnectClient
- summary: restart the connector
+ summary: restart the connector and its tasks
operationId: restartConnector
parameters:
- name: connectorName
@@ -177,6 +177,21 @@ paths:
required: true
schema:
type: string
+ - name: includeTasks
+ in: query
+ required: false
+ schema:
+ type: boolean
+ default: false
+ description: Specifies whether to restart the connector instance and task instances or just the connector instance
+ - name: onlyFailed
+ in: query
+ required: false
+ schema:
+ type: boolean
+ default: false
+ description: Specifies whether to restart just the instances with a FAILED status or all instances
+
responses:
200:
description: OK
@@ -406,6 +421,7 @@ components:
- RUNNING
- FAILED
- PAUSED
+ - RESTARTING
- UNASSIGNED
worker_id:
type: string
diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
index 70af448952a..b0633f9b0b5 100644
--- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
+++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
@@ -2396,6 +2396,7 @@ components:
- RUNNING
- FAILED
- PAUSED
+ - RESTARTING
- UNASSIGNED
ConnectorState:
diff --git a/kafka-ui-react-app/src/components/Connect/Details/Actions/Actions.tsx b/kafka-ui-react-app/src/components/Connect/Details/Actions/Actions.tsx
index 0599c1a4cb5..7f8ce91852b 100644
--- a/kafka-ui-react-app/src/components/Connect/Details/Actions/Actions.tsx
+++ b/kafka-ui-react-app/src/components/Connect/Details/Actions/Actions.tsx
@@ -126,7 +126,7 @@ const Actions: React.FC<ActionsProps> = ({
<span>
<i className="fas fa-sync-alt" />
</span>
- <span>Restart All Tasks</span>
+ <span>Restart Connector</span>
</Button>
<Button
buttonSize="M"
diff --git a/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/Actions.spec.tsx b/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/Actions.spec.tsx
index 0d31cc0a7a6..509bc5606c3 100644
--- a/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/Actions.spec.tsx
+++ b/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/Actions.spec.tsx
@@ -148,7 +148,7 @@ describe('Actions', () => {
it('calls restartConnector when restart button clicked', () => {
const restartConnector = jest.fn();
const wrapper = mount(setupWrapper({ restartConnector }));
- wrapper.find({ children: 'Restart All Tasks' }).simulate('click');
+ wrapper.find({ children: 'Restart Connector' }).simulate('click');
expect(restartConnector).toHaveBeenCalledTimes(1);
expect(restartConnector).toHaveBeenCalledWith(
clusterName,
diff --git a/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/__snapshots__/Actions.spec.tsx.snap b/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/__snapshots__/Actions.spec.tsx.snap
index ee145bdf7c4..57c8cb4f906 100644
--- a/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/__snapshots__/Actions.spec.tsx.snap
+++ b/kafka-ui-react-app/src/components/Connect/Details/Actions/__tests__/__snapshots__/Actions.spec.tsx.snap
@@ -141,7 +141,7 @@ exports[`Actions view matches snapshot 1`] = `
/>
</span>
<span>
- Restart All Tasks
+ Restart Connector
</span>
</button>
<a
@@ -334,7 +334,7 @@ exports[`Actions view matches snapshot when deleting connector 1`] = `
/>
</span>
<span>
- Restart All Tasks
+ Restart Connector
</span>
</button>
<a
@@ -512,7 +512,7 @@ exports[`Actions view matches snapshot when failed 1`] = `
/>
</span>
<span>
- Restart All Tasks
+ Restart Connector
</span>
</button>
<a
@@ -705,7 +705,7 @@ exports[`Actions view matches snapshot when paused 1`] = `
/>
</span>
<span>
- Restart All Tasks
+ Restart Connector
</span>
</button>
<a
@@ -898,7 +898,7 @@ exports[`Actions view matches snapshot when running connector action 1`] = `
/>
</span>
<span>
- Restart All Tasks
+ Restart Connector
</span>
</button>
<a
@@ -1076,7 +1076,7 @@ exports[`Actions view matches snapshot when unassigned 1`] = `
/>
</span>
<span>
- Restart All Tasks
+ Restart Connector
</span>
</button>
<a
| null | val | train | 2022-01-21T08:47:48 | "2022-01-21T08:39:54Z" | 5hin0bi | train |
provectus/kafka-ui/1455_1467 | provectus/kafka-ui | provectus/kafka-ui/1455 | provectus/kafka-ui/1467 | [
"connected"
] | 86dad04448a47ef47f5d82b0c5959ce7398f9631 | 05174fd684398cd19ce889574cc4f41854517e96 | [
"Hello there dmakeroam! 👋\n\nThank you and congratulations 🎉 for opening your very first issue in this project! 💖\n\nIn case you want to claim this issue, please comment down below! We will try to get back to you as soon as we can. 👀",
"Can you help to review this ? @workshur ",
"We've reproduced the issue We'll release a minor patch to address the problem soon.",
"Available in `master`-labeled image."
] | [] | "2022-01-24T13:15:47Z" | [
"type/bug",
"scope/frontend",
"status/accepted",
"status/confirmed",
"type/regression"
] | Cannot create a Kafka topic in Kafka-UI for Confluent Kafka | **Describe the bug**
I cannot create a Kafka topic in Kafka-UI for Confluent Kafka.
**Set up**
Docker images:
Kafka UI: provectuslabs/kafka-ui:master
Confluent Kafka: confluentinc/cp-kafka:6.1.0
**Steps to Reproduce**
1. Go to Kafka UI topic creation page -> http://[local-ip]:[local-port]/ui/clusters/[cluster-name]/topics/create-new
2. Fill in the topic name such as "go"
3. Click "Send" to create the topic
4. Nothing will happen for you
**Expected behavior**
The topic name "go" should be created.
**Screenshots**

**Additional context**
I used Confluent Kafka on premise which setup with kubeadm | [
"kafka-ui-react-app/src/components/Connect/New/New.tsx",
"kafka-ui-react-app/src/components/Schemas/Edit/Edit.tsx",
"kafka-ui-react-app/src/components/Schemas/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx"
] | [
"kafka-ui-react-app/src/components/Connect/New/New.tsx",
"kafka-ui-react-app/src/components/Schemas/Edit/Edit.tsx",
"kafka-ui-react-app/src/components/Schemas/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/New/New.tsx",
"kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx"
] | [] | diff --git a/kafka-ui-react-app/src/components/Connect/New/New.tsx b/kafka-ui-react-app/src/components/Connect/New/New.tsx
index 88b1efef59f..18b80313fad 100644
--- a/kafka-ui-react-app/src/components/Connect/New/New.tsx
+++ b/kafka-ui-react-app/src/components/Connect/New/New.tsx
@@ -127,6 +127,7 @@ const New: React.FC<NewProps> = ({
<div className={['field', connectNameFieldClassName].join(' ')}>
<InputLabel>Connect *</InputLabel>
<Controller
+ defaultValue={connectOptions[0].value}
control={control}
name="connectName"
render={({ field: { name, onChange } }) => (
diff --git a/kafka-ui-react-app/src/components/Schemas/Edit/Edit.tsx b/kafka-ui-react-app/src/components/Schemas/Edit/Edit.tsx
index 5e59075b0af..59a0b75ba8a 100644
--- a/kafka-ui-react-app/src/components/Schemas/Edit/Edit.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/Edit/Edit.tsx
@@ -86,6 +86,7 @@ const Edit: React.FC = () => {
<div>
<InputLabel>Type</InputLabel>
<Controller
+ defaultValue={schema.schemaType}
control={control}
rules={{ required: true }}
name="schemaType"
@@ -108,6 +109,9 @@ const Edit: React.FC = () => {
<div>
<InputLabel>Compatibility level</InputLabel>
<Controller
+ defaultValue={
+ schema.compatibilityLevel as CompatibilityLevelCompatibilityEnum
+ }
control={control}
name="compatibilityLevel"
render={({ field: { name, onChange } }) => (
diff --git a/kafka-ui-react-app/src/components/Schemas/New/New.tsx b/kafka-ui-react-app/src/components/Schemas/New/New.tsx
index db143566301..6f753e38b24 100644
--- a/kafka-ui-react-app/src/components/Schemas/New/New.tsx
+++ b/kafka-ui-react-app/src/components/Schemas/New/New.tsx
@@ -99,6 +99,7 @@ const New: React.FC = () => {
<div>
<InputLabel>Schema Type *</InputLabel>
<Controller
+ defaultValue={SchemaTypeOptions[0].value as SchemaType}
control={control}
rules={{ required: 'Schema Type is required.' }}
name="schemaType"
diff --git a/kafka-ui-react-app/src/components/Topics/New/New.tsx b/kafka-ui-react-app/src/components/Topics/New/New.tsx
index 7d596e63c1a..7eaccd51e09 100644
--- a/kafka-ui-react-app/src/components/Topics/New/New.tsx
+++ b/kafka-ui-react-app/src/components/Topics/New/New.tsx
@@ -21,9 +21,10 @@ interface RouterParams {
const New: React.FC = () => {
const methods = useForm<TopicFormData>({
- mode: 'onTouched',
+ mode: 'all',
resolver: yupResolver(topicFormValidationSchema),
});
+
const { clusterName } = useParams<RouterParams>();
const history = useHistory();
const dispatch = useDispatch();
diff --git a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
index 95042574085..3d214953914 100644
--- a/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
+++ b/kafka-ui-react-app/src/components/Topics/shared/Form/TopicForm.tsx
@@ -115,6 +115,7 @@ const TopicForm: React.FC<Props> = ({
<div>
<InputLabel>Cleanup policy</InputLabel>
<Controller
+ defaultValue={CleanupPolicyOptions[0].value}
control={control}
name="cleanupPolicy"
render={({ field: { name, onChange } }) => (
@@ -142,6 +143,7 @@ const TopicForm: React.FC<Props> = ({
<Controller
control={control}
name="retentionBytes"
+ defaultValue={0}
render={({ field: { name, onChange } }) => (
<Select
name={name}
| null | train | train | 2022-01-23T14:56:38 | "2022-01-23T18:03:55Z" | dmakeroam | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.