Posts
Docs
Sign up
Log in
0%
20%
40%
60%
80%
100%
21:20:00
21:21:00
21:22:00
21:23:00
21:24:00
JVM System
JVM User
Machine Total CPU
Heap usage
Heap reserved
Allocation rate
Compilations
14.71
%
GC Pause Time
46.68
%
User CPU (avg)
1.35
%
System CPU (avg)
732.6
MiB/s
Allocation Rate (avg)
3.1/4.0
GiB
Heap Usage (max)
About
JVM Info
CPU
Allocations
Compilations
Top Allocators
Garbage Collections
Top Down
Bottom Up
Split by Method
Split by Lines
Total
Thread.run()
ThreadPoolExecutor$Worker.run()
ThreadPoolExecutor.runWorker(ThreadPoolExecutor$Worker)
AbstractRunnable.run()
ThreadContext$ContextPreservingAbstractRunnable.doRun()
TimedRunnable.doRun()
AbstractRunnable.run()
ActionRunnable$1.doRun()
1635648924.accept(Object)
SearchService.lambda$rewriteShardRequest$7(ShardSearchRequest, ActionListener)
ActionListener$1.onResponse(Object)
770920601.accept(Object)
ActionListener.lambda$map$2(ActionListener, CheckedFunction, Object)
725108641.apply(Object)
SearchService.lambda$executeQueryPhase$1(SearchTask, ShardSearchRequest)
SearchService.executeQueryPhase(ShardSearchRequest, SearchTask)
SearchService.loadOrExecuteQueryPhase(ShardSearchRequest, SearchContext)
QueryPhase.execute(SearchContext)
QueryPhase.execute(SearchContext, IndexSearcher, Consumer)
IndexSearcher.search(Query, Collector)
ContextIndexSearcher.createWeight(Query, ScoreMode, float)
IndexSearcher.createWeight(Query, ScoreMode, float)
SpanNearQuery.createWeight(IndexSearcher, ScoreMode, float)
SpanNearQuery.createWeight(IndexSearcher, ScoreMode, float)
SpanOrQuery.createWeight(IndexSearcher, ScoreMode, float)
SpanTermQuery.createWeight(IndexSearcher, ScoreMode, float)
TermStates.build(IndexReaderContext, Term, boolean)
TermStates.loadTermsEnum(LeafReaderContext, Term)
SegmentTermsEnum.seekExact(BytesRef)
SegmentTermsEnum.pushFrame(FST$Arc, BytesRef, int)
SegmentTermsEnum.getFrame(int)
SegmentTermsEnumFrame.<init>(SegmentTermsEnum, int)
Lucene50PostingsReader.newTermState()
SegmentTermsEnumFrame.loadBlock()
SegmentTermsEnum.initIndexInput()
ByteBufferIndexInput$SingleBufferImpl.clone()
ByteBufferIndexInput.clone()
ByteBufferIndexInput.buildSlice(String, long, long)
ByteBufferIndexInput.newCloneInstance(String, ByteBuffer[], int, long)
DirectByteBufferR.slice()
ByteBufferIndexInput.buildSlice(ByteBuffer[], long, long)
DirectByteBufferR.duplicate()
SegmentTermsEnum.getArc(int)
FST.findTargetArc(int, FST$Arc, FST$Arc, FST$BytesReader)
FST.findTargetArc(int, FST$Arc, FST$Arc, FST$BytesReader, boolean)
FST.readNextRealArc(FST$Arc, FST$BytesReader)
Outputs.readFinalOutput(DataInput)
ByteSequenceOutputs.read(DataInput)
ByteSequenceOutputs.read(DataInput)
BytesRef.<init>(int)
FieldReader.iterator()
SegmentTermsEnum.<init>(FieldReader)
SegmentTermsEnumFrame.<init>(SegmentTermsEnum, int)
Lucene50PostingsReader.newTermState()
FST.getBytesReader()
OffHeapFSTStore.getReverseBytesReader()
IndexInput.randomAccessSlice(long, long)
ByteBufferIndexInput$SingleBufferImpl.slice(String, long, long)
ByteBufferIndexInput.slice(String, long, long)
ByteBufferIndexInput.buildSlice(String, long, long)
IndexInput.getFullSliceDescription(String)
ByteBufferIndexInput.newCloneInstance(String, ByteBuffer[], int, long)
DirectByteBufferR.slice()
ByteBufferIndexInput.buildSlice(ByteBuffer[], long, long)
DirectByteBufferR.duplicate()
SpanTermQuery$SpanTermWeight.<init>(SpanTermQuery, TermStates, IndexSearcher, Map, float)
SpanWeight.<init>(SpanQuery, IndexSearcher, Map, float)
SpanWeight.buildSimWeight(SpanQuery, IndexSearcher, Map, float)
PerFieldSimilarityWrapper.scorer(float, CollectionStatistics, TermStatistics[])
LegacyBM25Similarity.scorer(float, CollectionStatistics, TermStatistics[])
BM25Similarity.scorer(float, CollectionStatistics, TermStatistics[])
PhraseQuery.createWeight(IndexSearcher, ScoreMode, float)
PhraseQuery$1.<init>(PhraseQuery, Query, String, IndexSearcher, ScoreMode, float)
PhraseWeight.<init>(Query, String, IndexSearcher, ScoreMode)
PhraseQuery$1.getStats(IndexSearcher)
TermStates.build(IndexReaderContext, Term, boolean)
TermStates.loadTermsEnum(LeafReaderContext, Term)
SegmentTermsEnum.seekExact(BytesRef)
SegmentTermsEnum.pushFrame(FST$Arc, BytesRef, int)
SegmentTermsEnum.getFrame(int)
SegmentTermsEnumFrame.<init>(SegmentTermsEnum, int)
FieldReader.iterator()
SegmentTermsEnum.<init>(FieldReader)
TermQuery.createWeight(IndexSearcher, ScoreMode, float)
TermStates.build(IndexReaderContext, Term, boolean)
TermStates.loadTermsEnum(LeafReaderContext, Term)
SegmentTermsEnum.seekExact(BytesRef)
ContextIndexSearcher.search(List, Weight, Collector)
ContextIndexSearcher.searchInternal(List, Weight, Collector)
ContextIndexSearcher$1.bulkScorer(LeafReaderContext)
MultiTermQueryConstantScoreWrapper$1.bulkScorer(LeafReaderContext)
MultiTermQueryConstantScoreWrapper$1.rewrite(LeafReaderContext)
DocIdSetBuilder.build()
DocIdSetBuilder.concat(List)
ArrayUtil.growExact(int[], int)
LSBRadixSorter.sort(int, int[], int)
ArrayUtil.grow(int[], int)
ArrayUtil.growExact(int[], int)
IntersectTermsEnum.next()
IntersectTermsEnum._next()
IntersectTermsEnum.pushFrame(int)
FST.findTargetArc(int, FST$Arc, FST$Arc, FST$BytesReader)
FST.findTargetArc(int, FST$Arc, FST$Arc, FST$BytesReader, boolean)
FST.readNextRealArc(FST$Arc, FST$BytesReader)
ByteSequenceOutputs.read(DataInput)
ByteSequenceOutputs.read(DataInput)
FST.readFirstRealTargetArc(long, FST$Arc, FST$BytesReader)
FST.readNextRealArc(FST$Arc, FST$BytesReader)
ByteSequenceOutputs.read(DataInput)
ByteSequenceOutputs.read(DataInput)
IntersectTermsEnum.getFrame(int)
DocIdSetBuilder.add(DocIdSetIterator)
DocIdSetBuilder.grow(int)
DocIdSetBuilder.ensureBufferCapacity(int)
DocIdSetBuilder.addBuffer(int)
DocIdSetBuilder$Buffer.<init>(int)
DocIdSetBuilder.growBuffer(DocIdSetBuilder$Buffer, int)
ArrayUtil.growExact(int[], int)
MultiTermQueryConstantScoreWrapper$1.collectTerms(LeafReaderContext, TermsEnum, List)
MultiTermQuery.getTermsEnum(Terms)
AutomatonQuery.getTermsEnum(Terms, AttributeSource)
CompiledAutomaton.getTermsEnum(Terms)
FieldReader.intersect(CompiledAutomaton, BytesRef)
IntersectTermsEnum.<init>(FieldReader, Automaton, RunAutomaton, BytesRef, BytesRef)
SegmentTermsEnum.postings(PostingsEnum, int)
Lucene50PostingsReader.postings(FieldInfo, BlockTermState, PostingsEnum, int)
Weight.bulkScorer(LeafReaderContext)
SpanWeight.scorer(LeafReaderContext)
SpanWeight.scorer(LeafReaderContext)
SpanNearQuery$SpanNearWeight.getSpans(LeafReaderContext, SpanWeight$Postings)
SpanOrQuery$SpanOrWeight.getSpans(LeafReaderContext, SpanWeight$Postings)
SpanTermQuery$SpanTermWeight.getSpans(LeafReaderContext, SpanWeight$Postings)
SegmentTermsEnum.postings(PostingsEnum, int)
Lucene50PostingsReader.postings(FieldInfo, BlockTermState, PostingsEnum, int)
Lucene50PostingsReader$EverythingEnum.<init>(Lucene50PostingsReader, FieldInfo)
SpanWeight.getSimScorer(LeafReaderContext)
LeafSimScorer.<init>(Similarity$SimScorer, LeafReader, String, boolean)
FilterLeafReader.getNormValues(String)
CodecReader.getNormValues(String)
Lucene80NormsProducer.getNorms(FieldInfo)
FieldReader.iterator()
SegmentTermsEnum.<init>(FieldReader)
PhraseWeight.scorer(LeafReaderContext)
PhraseQuery$1.getPhraseMatcher(LeafReaderContext, Similarity$SimScorer, boolean)
SegmentTermsEnum.impacts(int)
Lucene50PostingsReader.impacts(FieldInfo, BlockTermState, int)
Lucene50PostingsReader$BlockImpactsPostingsEnum.<init>(Lucene50PostingsReader, FieldInfo, Lucene50PostingsFormat$IntBlockTermState)
Lucene50PostingsReader.postings(FieldInfo, BlockTermState, PostingsEnum, int)
Lucene50PostingsReader$EverythingEnum.<init>(Lucene50PostingsReader, FieldInfo)
ExactPhraseMatcher.<init>(PhraseQuery$PostingsAndFreq[], ScoreMode, Similarity$SimScorer, float)
FieldReader.iterator()
LeafSimScorer.<init>(Similarity$SimScorer, LeafReader, String, boolean)
FilterLeafReader.getNormValues(String)
CodecReader.getNormValues(String)
Lucene80NormsProducer.getNorms(FieldInfo)
TermQuery$TermWeight.scorer(LeafReaderContext)
SegmentTermsEnum.impacts(int)
Lucene50PostingsReader.impacts(FieldInfo, BlockTermState, int)
BulkScorer.score(LeafCollector, Bits)
CancellableBulkScorer.score(LeafCollector, Bits, int, int)
MatchAllDocsQuery$1$1.score(LeafCollector, Bits, int, int)
MultiCollector$MultiLeafCollector.collect(int)
LeafBucketCollector.collect(int)
StringTermsAggregator$1.collect(int, long)
BytesRefHash.add(BytesRef)
BytesRefHash.add(BytesRef, int)
BytesRefHash.set(BytesRef, int, long)
BytesRefHash.append(long, BytesRef, int)
Weight$DefaultBulkScorer.score(LeafCollector, Bits, int, int)
TopDocsCollectorContext.createTopDocsCollectorContext(SearchContext, IndexReader, boolean)
TopDocsCollectorContext$1.<init>(IndexReader, Query, SortAndFormats, ScoreDoc, int, boolean, int, boolean, boolean)
TopDocsCollectorContext$SimpleTopDocsCollectorContext.<init>(IndexReader, Query, SortAndFormats, ScoreDoc, int, boolean, int, boolean, TopDocsCollectorContext$1)
TopDocsCollectorContext$SimpleTopDocsCollectorContext.<init>(IndexReader, Query, SortAndFormats, ScoreDoc, int, boolean, int, boolean)
TopDocsCollectorContext.shortcutTotalHitCount(IndexReader, Query)
LeafReader.docFreq(Term)
SegmentTermsEnum.seekExact(BytesRef)
Â