Skip to content

Commit

Permalink
Merge pull request #730 from guijiql/master
Browse files Browse the repository at this point in the history
FIX: typos in doc for evaluator
  • Loading branch information
2017pxy authored Feb 21, 2021
2 parents e6607d4 + ba3e113 commit 1a5f661
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 10 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,4 @@ jobs:
python -m pytest -v tests/config/test_config.py
export PYTHONPATH=.
python tests/config/test_command_line.py --use_gpu=False --valid_metric=Recall@10 --split_ratio=[0.7,0.2,0.1] --metrics=['Recall@10'] --epochs=200 --eval_setting='LO_RS' --learning_rate=0.3
6 changes: 2 additions & 4 deletions recbole/evaluator/evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def average_rank(self, scores):
torch.Tensor: average_rank
Example:
>>> average_rank(tensor([[1,2,2,2,3,3,6],[2,2,2,2,4,4,5]]))
>>> average_rank(tensor([[1,2,2,2,3,3,6],[2,2,2,2,4,5,5]]))
tensor([[1.0000, 3.0000, 3.0000, 3.0000, 5.5000, 5.5000, 7.0000],
[2.5000, 2.5000, 2.5000, 2.5000, 5.0000, 6.5000, 6.5000]])
Expand Down Expand Up @@ -243,7 +243,7 @@ def evaluate(self, batch_matrix_list, eval_data):
eval_data (Dataset): the class of test data
Returns:
dict: such as ``{'GAUC:0.9286}``
dict: such as ``{'GAUC': 0.9286}``
"""
pos_len_list = eval_data.get_pos_len_list()
Expand Down Expand Up @@ -282,8 +282,6 @@ def __str__(self):
msg = 'The Rank Evaluator Info:\n' + \
'\tMetrics:[' + \
', '.join([rank_metrics[metric.lower()] for metric in self.metrics]) + \
'], TopK:[' + \
', '.join(map(str, self.topk)) + \
']'
return msg

Expand Down
6 changes: 3 additions & 3 deletions recbole/evaluator/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,13 +120,13 @@ def ndcg_(pos_index, pos_len):
\mathrm {DCG@K}=\sum_{i=1}^{K} \frac{2^{rel_i}-1}{\log_{2}{(i+1)}}\\
\mathrm {IDCG@K}=\sum_{i=1}^{K}\frac{1}{\log_{2}{(i+1)}}\\
\mathrm {NDCG_u@K}=\frac{DCG_u@K}{IDCG_u@K}\\
\mathrm {NDCG@K}=\frac{\sum \nolimits_{u \in u^{te}NDCG_u@K}}{|u^{te}|}
\mathrm {NDCG@K}=\frac{\sum \nolimits_{u \in U^{te}NDCG_u@K}}{|U^{te}|}
\end{gather}
:math:`K` stands for recommending :math:`K` items.
And the :math:`rel_i` is the relevance of the item in position :math:`i` in the recommendation list.
:math:`2^{rel_i}` equals to 1 if the item hits otherwise 0.
:math:`U^{te}` is for all users in the test set.
:math:`{rel_i}` equals to 1 if the item is ground truth otherwise 0.
:math:`U^{te}` stands for all users in the test set.
"""
len_rank = np.full_like(pos_len, pos_index.shape[1])
Expand Down
5 changes: 3 additions & 2 deletions tests/model/test_model_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# UPDATE
# @Time : 2020/11/17
# @Author : Xingyu Pan
# @email : [email protected]
# @email : [email protected]

import os
import unittest
Expand Down Expand Up @@ -160,14 +160,15 @@ def test_MacridVAE(self):
'training_neg_sample_num': 0
}
quick_test(config_dict)

def test_CDAE(self):
config_dict = {
'model': 'CDAE',
'training_neg_sample_num': 0
}
quick_test(config_dict)


class TestContextRecommender(unittest.TestCase):
# todo: more complex context information should be test, such as criteo dataset

Expand Down

0 comments on commit 1a5f661

Please sign in to comment.