Skip to content

Commit

Permalink
try replacing doxygen commands automatically
Browse files Browse the repository at this point in the history
  • Loading branch information
bkietz committed Nov 13, 2024
1 parent d7bc378 commit ee586e8
Show file tree
Hide file tree
Showing 416 changed files with 9,110 additions and 8,688 deletions.
40 changes: 20 additions & 20 deletions cpp/examples/arrow/execution_plan_documentation_examples.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ arrow::Result<std::shared_ptr<arrow::RecordBatch>> GetSampleRecordBatch(
return record_batch->FromStructArray(struct_result);
}

/// \brief Create a sample table
/// Create a sample table
/// The table's contents will be:
/// a,b
/// 1,null
Expand All @@ -106,7 +106,7 @@ arrow::Result<std::shared_ptr<arrow::RecordBatch>> GetSampleRecordBatch(
/// 6,false
/// 7,false
/// 8,true
/// \return The created table
/// :return: The created table

arrow::Result<std::shared_ptr<arrow::Table>> GetTable() {
auto null_long = std::numeric_limits<int64_t>::quiet_NaN();
Expand Down Expand Up @@ -136,8 +136,8 @@ arrow::Result<std::shared_ptr<arrow::Table>> GetTable() {
return table;
}

/// \brief Create a sample dataset
/// \return An in-memory dataset based on GetTable()
/// Create a sample dataset
/// :return: An in-memory dataset based on GetTable()
arrow::Result<std::shared_ptr<arrow::dataset::Dataset>> GetDataset() {
ARROW_ASSIGN_OR_RAISE(auto table, GetTable());
auto ds = std::make_shared<arrow::dataset::InMemoryDataset>(table);
Expand Down Expand Up @@ -269,7 +269,7 @@ arrow::Status ExecutePlanAndCollectAsTable(ac::Declaration plan) {

// (Doc section: Scan Example)

/// \brief An example demonstrating a scan and sink node
/// An example demonstrating a scan and sink node
///
/// Scan-Table
/// This example shows how scan operation can be applied on a dataset.
Expand All @@ -292,7 +292,7 @@ arrow::Status ScanSinkExample() {

// (Doc section: Source Example)

/// \brief An example demonstrating a source and sink node
/// An example demonstrating a source and sink node
///
/// Source-Table Example
/// This example shows how a custom source can be used
Expand All @@ -315,7 +315,7 @@ arrow::Status SourceSinkExample() {

// (Doc section: Table Source Example)

/// \brief An example showing a table source node
/// An example showing a table source node
///
/// TableSource-Table Example
/// This example shows how a table_source can be used
Expand All @@ -338,7 +338,7 @@ arrow::Status TableSourceSinkExample() {

// (Doc section: Filter Example)

/// \brief An example showing a filter node
/// An example showing a filter node
///
/// Source-Filter-Table
/// This example shows how a filter can be used in an execution plan,
Expand Down Expand Up @@ -379,7 +379,7 @@ arrow::Status ScanFilterSinkExample() {

// (Doc section: Project Example)

/// \brief An example showing a project node
/// An example showing a project node
///
/// Scan-Project-Table
/// This example shows how a Scan operation can be used to load the data
Expand Down Expand Up @@ -428,7 +428,7 @@ arrow::Status ScanProjectSequenceSinkExample() {

// (Doc section: Scalar Aggregate Example)

/// \brief An example showing an aggregation node to aggregate an entire table
/// An example showing an aggregation node to aggregate an entire table
///
/// Source-Aggregation-Table
/// This example shows how an aggregation operation can be applied on a
Expand All @@ -453,7 +453,7 @@ arrow::Status SourceScalarAggregateSinkExample() {

// (Doc section: Group Aggregate Example)

/// \brief An example showing an aggregation node to perform a group-by operation
/// An example showing an aggregation node to perform a group-by operation
///
/// Source-Aggregation-Table
/// This example shows how an aggregation operation can be applied on a
Expand Down Expand Up @@ -482,7 +482,7 @@ arrow::Status SourceGroupAggregateSinkExample() {

// (Doc section: ConsumingSink Example)

/// \brief An example showing a consuming sink node
/// An example showing a consuming sink node
///
/// Source-Consuming-Sink
/// This example shows how the data can be consumed within the execution plan
Expand Down Expand Up @@ -571,7 +571,7 @@ arrow::Status ExecutePlanAndCollectAsTableWithCustomSink(
return future.status();
}

/// \brief An example showing an order-by node
/// An example showing an order-by node
///
/// Source-OrderBy-Sink
/// In this example, the data enters through the source node
Expand Down Expand Up @@ -602,7 +602,7 @@ arrow::Status SourceOrderBySinkExample() {

// (Doc section: HashJoin Example)

/// \brief An example showing a hash join node
/// An example showing a hash join node
///
/// Source-HashJoin-Table
/// This example shows how source node gets the data and how a self-join
Expand All @@ -629,7 +629,7 @@ arrow::Status SourceHashJoinSinkExample() {

// (Doc section: KSelect Example)

/// \brief An example showing a select-k node
/// An example showing a select-k node
///
/// Source-KSelect
/// This example shows how K number of elements can be selected
Expand Down Expand Up @@ -661,8 +661,8 @@ arrow::Status SourceKSelectExample() {

// (Doc section: Write Example)

/// \brief An example showing a write node
/// \param file_path The destination to write to
/// An example showing a write node
/// :param file_path: The destination to write to
///
/// Scan-Filter-Write
/// This example shows how scan node can be used to load the data
Expand Down Expand Up @@ -723,7 +723,7 @@ arrow::Status ScanFilterWriteExample(const std::string& file_path) {

// (Doc section: Union Example)

/// \brief An example showing a union node
/// An example showing a union node
///
/// Source-Union-Table
/// This example shows how a union operation can be applied on two
Expand All @@ -747,7 +747,7 @@ arrow::Status SourceUnionSinkExample() {

// (Doc section: Table Sink Example)

/// \brief An example showing a table sink node
/// An example showing a table sink node
///
/// TableSink Example
/// This example shows how a table_sink can be used
Expand Down Expand Up @@ -787,7 +787,7 @@ arrow::Status TableSinkExample() {

// (Doc section: RecordBatchReaderSource Example)

/// \brief An example showing the usage of a RecordBatchReader as the data source.
/// An example showing the usage of a RecordBatchReader as the data source.
///
/// RecordBatchReaderSourceSink Example
/// This example shows how a record_batch_reader_source can be used
Expand Down
2 changes: 0 additions & 2 deletions cpp/examples/arrow/filesystem_definition_example.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,11 @@ class ExampleFileSystem : public fs::FileSystem {
return type_name() == other.type_name();
}

/// \cond FALSE
using FileSystem::CreateDir;
using FileSystem::DeleteDirContents;
using FileSystem::GetFileInfo;
using FileSystem::OpenAppendStream;
using FileSystem::OpenOutputStream;
/// \endcond

Result<fs::FileInfo> GetFileInfo(const std::string& path) override {
if (path == kPath) {
Expand Down
18 changes: 9 additions & 9 deletions cpp/examples/arrow/rapidjson_row_converter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@

const rapidjson::Value kNullJsonSingleton = rapidjson::Value();

/// \brief Builder that holds state for a single conversion.
/// Builder that holds state for a single conversion.
///
/// Implements Visit() methods for each type of Arrow Array that set the values
/// of the corresponding fields in each row.
Expand All @@ -66,10 +66,10 @@ class RowBatchBuilder {
}
}

/// \brief Set which field to convert.
/// Set which field to convert.
void SetField(const arrow::Field* field) { field_ = field; }

/// \brief Retrieve converted rows from builder.
/// Retrieve converted rows from builder.
std::vector<rapidjson::Document> Rows() && { return std::move(rows_); }

// Default implementation
Expand Down Expand Up @@ -212,7 +212,7 @@ class ArrowToDocumentConverter {
}
}; // ArrowToDocumentConverter

/// \brief Iterator over rows values of a document for a given field
/// Iterator over rows values of a document for a given field
///
/// path and array_levels are used to address each field in a JSON document. As
/// an example, consider this JSON document:
Expand All @@ -230,9 +230,9 @@ class ArrowToDocumentConverter {
/// },
class DocValuesIterator {
public:
/// \param rows vector of rows
/// \param path field names to enter
/// \param array_levels number of arrays to enter
/// :param rows: vector of rows
/// :param path: field names to enter
/// :param array_levels: number of arrays to enter
DocValuesIterator(const std::vector<rapidjson::Document>& rows,
std::vector<std::string> path, int64_t array_levels)
: rows(rows), path(std::move(path)), array_levels(array_levels) {}
Expand Down Expand Up @@ -329,12 +329,12 @@ class JsonValueConverter {
const std::vector<std::string>& root_path, int64_t array_levels)
: rows_(rows), root_path_(root_path), array_levels_(array_levels) {}

/// \brief For field passed in, append corresponding values to builder
/// For field passed in, append corresponding values to builder
arrow::Status Convert(const arrow::Field& field, arrow::ArrayBuilder* builder) {
return Convert(field, field.name(), builder);
}

/// \brief For field passed in, append corresponding values to builder
/// For field passed in, append corresponding values to builder
arrow::Status Convert(const arrow::Field& field, const std::string& field_name,
arrow::ArrayBuilder* builder) {
field_name_ = field_name;
Expand Down
8 changes: 4 additions & 4 deletions cpp/src/arrow/acero/accumulation_queue.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ namespace util {

using arrow::compute::ExecBatch;

/// \brief A container that accumulates batches until they are ready to
/// A container that accumulates batches until they are ready to
/// be processed.
class AccumulationQueue {
public:
Expand Down Expand Up @@ -84,7 +84,7 @@ class SequencingQueue {
/// safe to do things that rely on order but minimal time should be spent here
/// to avoid becoming a bottleneck.
///
/// \return a follow-up task that will be scheduled. The follow-up task(s) are
/// :return: a follow-up task that will be scheduled. The follow-up task(s) are
/// is not guaranteed to run in any particular order. If nullopt is
/// returned then nothing will be scheduled.
virtual Result<std::optional<Task>> Process(ExecBatch batch) = 0;
Expand All @@ -109,7 +109,7 @@ class SequencingQueue {
virtual Status InsertBatch(ExecBatch batch) = 0;

/// Create a queue
/// \param processor describes how to process the batches, must outlive the queue
/// :param processor: describes how to process the batches, must outlive the queue
static std::unique_ptr<SequencingQueue> Make(Processor* processor);
};

Expand Down Expand Up @@ -152,7 +152,7 @@ class SerialSequencingQueue {
virtual Status InsertBatch(ExecBatch batch) = 0;

/// Create a queue
/// \param processor describes how to process the batches, must outlive the queue
/// :param processor: describes how to process the batches, must outlive the queue
static std::unique_ptr<SerialSequencingQueue> Make(Processor* processor);
};

Expand Down
20 changes: 10 additions & 10 deletions cpp/src/arrow/acero/aggregate_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,10 @@ Status HandleSegments(RowSegmenter* segmenter, const ExecBatch& batch,
return Status::OK();
}

/// @brief Extract values of segment keys from a segment batch
/// @param[out] values_ptr Vector to store the extracted segment key values
/// @param[in] input_batch Segment batch. Must have the a constant value for segment key
/// @param[in] field_ids Segment key field ids
/// Extract values of segment keys from a segment batch
/// :param values_ptr[out]: Vector to store the extracted segment key values
/// :param input_batch: Segment batch. Must have the a constant value for segment key
/// :param field_ids: Segment key field ids
Status ExtractSegmenterValues(std::vector<Datum>* values_ptr,
const ExecBatch& input_batch,
const std::vector<int>& field_ids);
Expand Down Expand Up @@ -233,7 +233,7 @@ class ScalarAggregateNode : public ExecNode, public TracedNode {
std::vector<std::vector<std::unique_ptr<KernelState>>> states_;

AtomicCounter input_counter_;
/// \brief Total number of output batches produced
/// Total number of output batches produced
int total_output_batches_ = 0;
};

Expand Down Expand Up @@ -328,22 +328,22 @@ class GroupByNode : public ExecNode, public TracedNode {
}

int output_task_group_id_;
/// \brief A segmenter for the segment-keys
/// A segmenter for the segment-keys
std::unique_ptr<RowSegmenter> segmenter_;
/// \brief Holds values of the current batch that were selected for the segment-keys
/// Holds values of the current batch that were selected for the segment-keys
std::vector<Datum> segmenter_values_;

const std::vector<int> key_field_ids_;
/// \brief Field indices corresponding to the segment-keys
/// Field indices corresponding to the segment-keys
const std::vector<int> segment_key_field_ids_;
/// \brief Types of input fields per aggregate
/// Types of input fields per aggregate
const std::vector<std::vector<TypeHolder>> agg_src_types_;
const std::vector<std::vector<int>> agg_src_fieldsets_;
const std::vector<Aggregate> aggs_;
const std::vector<const HashAggregateKernel*> agg_kernels_;

AtomicCounter input_counter_;
/// \brief Total number of output batches produced
/// Total number of output batches produced
int total_output_batches_ = 0;

std::vector<ThreadLocalState> local_states_;
Expand Down
12 changes: 6 additions & 6 deletions cpp/src/arrow/acero/aggregate_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,17 @@ using compute::Aggregate;
using compute::default_exec_context;
using compute::ExecContext;

/// \brief Make the output schema of an aggregate node
/// Make the output schema of an aggregate node
///
/// The output schema is determined by the aggregation kernels, which may depend on the
/// ExecContext argument. To guarantee correct results, the same ExecContext argument
/// should be used in execution.
///
/// \param[in] input_schema the schema of the input to the node
/// \param[in] keys the grouping keys for the aggregation
/// \param[in] segment_keys the segmenting keys for the aggregation
/// \param[in] aggregates the aggregates for the aggregation
/// \param[in] exec_ctx the execution context for the aggregation
/// :param input_schema: the schema of the input to the node
/// :param keys: the grouping keys for the aggregation
/// :param segment_keys: the segmenting keys for the aggregation
/// :param aggregates: the aggregates for the aggregation
/// :param exec_ctx: the execution context for the aggregation
ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
const std::shared_ptr<Schema>& input_schema, const std::vector<FieldRef>& keys,
const std::vector<FieldRef>& segment_keys, const std::vector<Aggregate>& aggregates,
Expand Down
8 changes: 4 additions & 4 deletions cpp/src/arrow/acero/asof_join_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1237,11 +1237,11 @@ class AsofJoinNode : public ExecNode {
}
}

/// \brief Make the output schema of an as-of-join node
/// Make the output schema of an as-of-join node
///
/// \param[in] input_schema the schema of each input to the node
/// \param[in] indices_of_on_key the on-key index of each input to the node
/// \param[in] indices_of_by_key the by-key indices of each input to the node
/// :param input_schema: the schema of each input to the node
/// :param indices_of_on_key: the on-key index of each input to the node
/// :param indices_of_by_key: the by-key indices of each input to the node
static arrow::Result<std::shared_ptr<Schema>> MakeOutputSchema(
const std::vector<std::shared_ptr<Schema>> input_schema,
const std::vector<col_index_t>& indices_of_on_key,
Expand Down
6 changes: 3 additions & 3 deletions cpp/src/arrow/acero/asof_join_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@ namespace asofjoin {

using AsofJoinKeys = AsofJoinNodeOptions::Keys;

/// \brief Make the output schema of an as-of-join node
/// Make the output schema of an as-of-join node
///
/// \param[in] input_schema the schema of each input to the node
/// \param[in] input_keys the key of each input to the node
/// :param input_schema: the schema of each input to the node
/// :param input_keys: the key of each input to the node
ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
const std::vector<std::shared_ptr<Schema>>& input_schema,
const std::vector<AsofJoinKeys>& input_keys);
Expand Down
Loading

0 comments on commit ee586e8

Please sign in to comment.