Skip to content

Commit

Permalink
Fix few deprecations
Browse files Browse the repository at this point in the history
  • Loading branch information
PrimozGodec committed May 3, 2022
1 parent cb8e0a7 commit 755517d
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion orangecontrib/text/ontology.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def generate_ontology(
for i in range(population_size)
])
fitness = fitness / fitness.sum()
new_generation = np.zeros(generation.shape, dtype=np.int32)
new_generation = np.zeros(generation.shape, dtype=int)
parents_to_keep = np.argsort(fitness)[-keep_next_gen:]
new_generation[:keep_next_gen, :] = generation[parents_to_keep, :]
for i in range(keep_next_gen, population_size):
Expand Down
2 changes: 1 addition & 1 deletion orangecontrib/text/tests/test_guardian.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def test_valid(self, mock_get):

def test_equal(self):
credentials = guardian.TheGuardianCredentials(API_KEY)
self.assertEquals(credentials, credentials)
self.assertEqual(credentials, credentials)


def skip_limit_exceeded(fun):
Expand Down
4 changes: 2 additions & 2 deletions orangecontrib/text/vectorization/bagofwords.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ class BowVectorizer(BaseVectorizer):

wlocals = OrderedDict((
(COUNT, lambda tf: tf),
(BINARY, lambda tf: np.greater(tf, 0, dtype=np.int) if tf.size
else np.array([], dtype=np.int)),
(BINARY, lambda tf: np.greater(tf, 0).astype(int) if tf.size
else np.array([], dtype=int)),
(SUBLINEAR, lambda tf: 1 + np.log(tf)),
))

Expand Down
2 changes: 1 addition & 1 deletion orangecontrib/text/vectorization/simhash.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def _transform(self, corpus, source_dict):
Corpus with `simhash` variable
"""

X = np.array([self.int2binarray(self.compute_hash(doc)) for doc in corpus.tokens], dtype=np.float)
X = np.array([self.int2binarray(self.compute_hash(doc)) for doc in corpus.tokens], dtype=float)
corpus = corpus.extend_attributes(
X,
feature_names=[
Expand Down

0 comments on commit 755517d

Please sign in to comment.