Skip to content

Commit

Permalink
Typo / spelling fixes (#20186)
Browse files Browse the repository at this point in the history
* [*] Typo / spelling fixes

* [*] Typo / spelling fixes

* saveable => savable

* Saveable => Savable
  • Loading branch information
SamuelMarks authored Aug 30, 2024
1 parent 62472e7 commit e72135b
Show file tree
Hide file tree
Showing 45 changed files with 90 additions and 90 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/scripts/labeler.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ You may obtain a copy of the License at

module.exports = async ({ github, context }) => {
const issue_title = context.payload.issue ? context.payload.issue.title : context.payload.pull_request.title
const issue_discription = context.payload.issue ? context.payload.issue.body : context.payload.pull_request.body
const issue_description = context.payload.issue ? context.payload.issue.body : context.payload.pull_request.body
const issue_number = context.payload.issue ? context.payload.issue.number : context.payload.pull_request.number
const keyword_label = {
gemma:'Gemma'
}
const labelsToAdd = []
console.log(issue_title,issue_discription,issue_number)
console.log(issue_title,issue_description,issue_number)

for(const [keyword, label] of Object.entries(keyword_label)){
if(issue_title.toLowerCase().indexOf(keyword) !=-1 || issue_discription.toLowerCase().indexOf(keyword) !=-1 ){
if(issue_title.toLowerCase().indexOf(keyword) !=-1 || issue_description.toLowerCase().indexOf(keyword) !=-1 ){
console.log(`'${keyword}'keyword is present inside the title or description. Pushing label '${label}' to row.`)
labelsToAdd.push(label)
}
Expand Down
2 changes: 1 addition & 1 deletion guides/training_with_built_in_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ def __getitem__(self, idx):
`True` if your dataset can be safely pickled.
- `max_queue_size`: Maximum number of batches to keep in the queue
when iterating over the dataset in a multithreaded or
multipricessed setting.
multiprocessed setting.
You can reduce this value to reduce the CPU memory consumption of
your dataset. It defaults to 10.
Expand Down
4 changes: 2 additions & 2 deletions keras/src/applications/efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,9 +325,9 @@ def round_repeats(repeats):
x = layers.Rescaling(1.0 / 255.0)(x)
x = layers.Normalization(axis=bn_axis)(x)
if weights == "imagenet":
# Note that the normaliztion layer uses square value of STDDEV as the
# Note that the normalization layer uses square value of STDDEV as the
# variance for the layer: result = (input - mean) / sqrt(var)
# However, the original implemenetation uses (input - mean) / var to
# However, the original implementation uses (input - mean) / var to
# normalize the input, we need to divide another sqrt(var) to match the
# original implementation.
# See https://github.com/tensorflow/tensorflow/issues/49930 for more
Expand Down
8 changes: 4 additions & 4 deletions keras/src/backend/common/backend_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import warnings


def _convert_conv_tranpose_padding_args_from_keras_to_jax(
def _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size, stride, dilation_rate, padding, output_padding
):
"""Convert the padding arguments from Keras to the ones used by JAX.
Expand Down Expand Up @@ -45,7 +45,7 @@ def _convert_conv_tranpose_padding_args_from_keras_to_jax(
return left_pad, right_pad


def _convert_conv_tranpose_padding_args_from_keras_to_torch(
def _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size, stride, dilation_rate, padding, output_padding
):
"""Convert the padding arguments from Keras to the ones used by Torch.
Expand Down Expand Up @@ -134,7 +134,7 @@ def compute_conv_transpose_padding_args_for_jax(
(
pad_left,
pad_right,
) = _convert_conv_tranpose_padding_args_from_keras_to_jax(
) = _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size=kernel_spatial_shape[i],
stride=strides_i,
dilation_rate=dilation_rate_i,
Expand Down Expand Up @@ -174,7 +174,7 @@ def compute_conv_transpose_padding_args_for_torch(
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=kernel_spatial_shape[i],
stride=strides_i,
dilation_rate=dilation_rate_i,
Expand Down
20 changes: 10 additions & 10 deletions keras/src/backend/common/backend_utils_test.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from keras.src.backend.common.backend_utils import (
_convert_conv_tranpose_padding_args_from_keras_to_jax,
_convert_conv_transpose_padding_args_from_keras_to_jax,
)
from keras.src.backend.common.backend_utils import (
_convert_conv_tranpose_padding_args_from_keras_to_torch,
_convert_conv_transpose_padding_args_from_keras_to_torch,
)
from keras.src.backend.common.backend_utils import (
_get_output_shape_given_tf_padding,
Expand All @@ -22,7 +22,7 @@ def test_valid_padding_without_output_padding(self):
(
left_pad,
right_pad,
) = _convert_conv_tranpose_padding_args_from_keras_to_jax(
) = _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand All @@ -37,7 +37,7 @@ def test_same_padding_without_output_padding(self):
(
left_pad,
right_pad,
) = _convert_conv_tranpose_padding_args_from_keras_to_jax(
) = _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand All @@ -54,7 +54,7 @@ def test_valid_padding_without_output_padding(self):
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand All @@ -69,7 +69,7 @@ def test_same_padding_without_output_padding(self):
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand Down Expand Up @@ -145,7 +145,7 @@ def test_valid_padding_with_none_output_padding(self):
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand All @@ -160,7 +160,7 @@ def test_valid_padding_with_output_padding(self):
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand Down Expand Up @@ -211,7 +211,7 @@ def test_valid_padding_with_output_padding(self):
def test_warning_for_inconsistencies(self):
"""Test that a warning is raised for potential inconsistencies"""
with self.assertWarns(Warning):
_convert_conv_tranpose_padding_args_from_keras_to_torch(
_convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand All @@ -224,7 +224,7 @@ def test_same_padding_without_output_padding_for_torch_(self):
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
Expand Down
2 changes: 1 addition & 1 deletion keras/src/backend/common/variables_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ def test_variable_array(self):
self.assertAllClose(v.__array__(), np.array([1, 2, 3]))


class VariableOpsCorrentnessTest(test_case.TestCase):
class VariableOpsCorrectnessTest(test_case.TestCase):
"""Tests for operations on KerasVariable."""

def test_int(self):
Expand Down
4 changes: 2 additions & 2 deletions keras/src/backend/jax/distribution_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def distribute_variable(value, layout):
if layout.is_fully_addressable:
return jax.device_put(value, layout)
else:
# Need to only distribute the value to local addressible devices, and
# Need to only distribute the value to local addressable devices, and
# repack them back into global format.
mapping = layout.addressable_devices_indices_map(value.shape)
local_values = jax.device_put(
Expand Down Expand Up @@ -94,7 +94,7 @@ def distribute_tensor(tensor, layout):
if layout.is_fully_addressable:
return jax.device_put(tensor, layout)
else:
# Need to only distribute the value to local addressible devices, and
# Need to only distribute the value to local addressable devices, and
# repack them back into global format.
mapping = layout.addressable_devices_indices_map(tensor.shape)
local_values = jax.device_put(
Expand Down
8 changes: 4 additions & 4 deletions keras/src/backend/jax/distribution_lib_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,9 @@ def test_device_conversion(self):
self.assertEqual(jax_d, converted_jax_device)

@mock.patch.object(jax.distributed, "initialize", return_value=None)
def test_initialize_with_all_job_addresses(self, mock_jax_initialze):
def test_initialize_with_all_job_addresses(self, mock_jax_initialize):
backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 2, 0)
mock_jax_initialze.assert_called_once_with(
mock_jax_initialize.assert_called_once_with(
coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0
)

Expand All @@ -60,9 +60,9 @@ def test_initialize_validate_job_and_process(self):
backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 3, 0)

@mock.patch.object(jax.distributed, "initialize", return_value=None)
def test_initialize_with_coordinater_address(self, mock_jax_initialze):
def test_initialize_with_coordinator_address(self, mock_jax_initialize):
backend_dlib.initialize("10.0.0.1:1234", 2, 0)
mock_jax_initialze.assert_called_once_with(
mock_jax_initialize.assert_called_once_with(
coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0
)

Expand Down
2 changes: 1 addition & 1 deletion keras/src/backend/tensorflow/numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -2131,7 +2131,7 @@ def round(x, decimals=0):
# int
if decimals > 0:
return x
# temporarilaly convert to floats
# temporarily convert to floats
factor = tf.cast(math.pow(10, decimals), config.floatx())
x = tf.cast(x, config.floatx())
else:
Expand Down
30 changes: 15 additions & 15 deletions keras/src/backend/tensorflow/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@ def categorical(logits, num_samples, dtype="int64", seed=None):


def randint(shape, minval, maxval, dtype="int32", seed=None):
intemediate_dtype = dtype
intermediate_dtype = dtype
if standardize_dtype(dtype) not in ["int32", "int64"]:
intemediate_dtype = "int64"
intermediate_dtype = "int64"
seed = _cast_seed(draw_seed(seed))
output = tf.random.stateless_uniform(
shape=shape,
minval=minval,
maxval=maxval,
dtype=intemediate_dtype,
dtype=intermediate_dtype,
seed=seed,
)
return tf.cast(output, dtype)
Expand Down Expand Up @@ -109,14 +109,14 @@ def gamma(shape, alpha, dtype=None, seed=None):
dtype = dtype or floatx()
seed = _cast_seed(draw_seed(seed))
# TODO: `tf.random.stateless_gamma` doesn't support bfloat16
intemediate_dtype = dtype
intermediate_dtype = dtype
if standardize_dtype(dtype) == "bfloat16":
intemediate_dtype = "float32"
intermediate_dtype = "float32"
return tf.cast(
tf.random.stateless_gamma(
shape,
alpha=alpha,
dtype=intemediate_dtype,
dtype=intermediate_dtype,
seed=seed,
),
dtype,
Expand All @@ -127,16 +127,16 @@ def binomial(shape, counts, probabilities, dtype=None, seed=None):
dtype = dtype or floatx()
seed = _cast_seed(draw_seed(seed))
# TODO: `tf.random.stateless_binomial` doesn't support bfloat16
intemediate_dtype = dtype
intermediate_dtype = dtype
if standardize_dtype(dtype) == "bfloat16":
intemediate_dtype = "float32"
intermediate_dtype = "float32"
return tf.cast(
tf.random.stateless_binomial(
shape=shape,
seed=seed,
counts=counts,
probs=probabilities,
output_dtype=intemediate_dtype,
output_dtype=intermediate_dtype,
),
dtype,
)
Expand All @@ -161,11 +161,11 @@ def beta(shape, alpha, beta, dtype=None, seed=None):
seed_2 = seed_1 + 12

# TODO: `tf.random.stateless_gamma` doesn't support bfloat16
intemediate_dtype = dtype
intermediate_dtype = dtype
if standardize_dtype(dtype) == "bfloat16":
intemediate_dtype = "float32"
alpha = tf.convert_to_tensor(alpha, dtype=intemediate_dtype)
beta = tf.convert_to_tensor(beta, dtype=intemediate_dtype)
intermediate_dtype = "float32"
alpha = tf.convert_to_tensor(alpha, dtype=intermediate_dtype)
beta = tf.convert_to_tensor(beta, dtype=intermediate_dtype)

# tensorflow's tf.random.stateless_gamma has a bit of unconventional
# implementation of the stateless_gamma function where it checks the
Expand All @@ -180,13 +180,13 @@ def beta(shape, alpha, beta, dtype=None, seed=None):

gamma_a = tf.cast(
tf.random.stateless_gamma(
shape=shape, seed=seed_1, alpha=alpha, dtype=intemediate_dtype
shape=shape, seed=seed_1, alpha=alpha, dtype=intermediate_dtype
),
dtype,
)
gamma_b = tf.cast(
tf.random.stateless_gamma(
shape=shape, seed=seed_2, alpha=beta, dtype=intemediate_dtype
shape=shape, seed=seed_2, alpha=beta, dtype=intermediate_dtype
),
dtype,
)
Expand Down
2 changes: 1 addition & 1 deletion keras/src/backend/torch/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def inv(x):

def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch retuns pivots with 1-based indexing
# torch returns pivots with 1-based indexing
return LU, pivots - 1


Expand Down
2 changes: 1 addition & 1 deletion keras/src/callbacks/model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def _get_most_recently_modified_file_matching_pattern(self, pattern):
later time of modification (for instance, when epoch/batch is used as
formatting option), but not necessarily (when accuracy or loss is used).
The tie-breaker is put in the logic as best effort to return the most
recent, and to avoid undeterministic result.
recent, and to avoid nondeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
Expand Down
2 changes: 1 addition & 1 deletion keras/src/distribution/distribution_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,7 @@ class LayoutMap(collections.abc.MutableMapping):
`TensorLayout` instance.
In the normal case, the key to query is usually the `variable.path`, which
is the idenifier of the variable.
is the identifier of the variable.
As shortcut, tuple or list of axis names are also allowed when inserting
as value, and will be converted to `TensorLayout`.
Expand Down
2 changes: 1 addition & 1 deletion keras/src/export/export_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,7 @@ def _check_device_compatible(self):
"the TF runtime in the same environment will not work. "
"To use JAX-native serialization for high-performance export "
"and serving, please install `tensorflow-gpu` and ensure "
"CUDA version compatiblity between your JAX and TF "
"CUDA version compatibility between your JAX and TF "
"installations."
)
return False
Expand Down
2 changes: 1 addition & 1 deletion keras/src/layers/attention/grouped_query_attention_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def test_initializer(self):
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_query_mask_progagation(self):
def test_query_mask_propagation(self):
"""Test automatic propagation of the query's mask."""
layer = layers.GroupedQueryAttention(
num_query_heads=2, num_key_value_heads=2, head_dim=2
Expand Down
2 changes: 1 addition & 1 deletion keras/src/layers/attention/multi_head_attention_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_high_dim_attention(
@parameterized.named_parameters(
("without_key_same_proj", (4, 8), (2, 8), None, None),
("with_key_same_proj", (4, 8), (2, 8), (2, 3), None),
("wihtout_key_different_proj", (4, 8), (2, 8), None, (3, 4)),
("without_key_different_proj", (4, 8), (2, 8), None, (3, 4)),
("with_key_different_proj", (4, 8), (2, 8), (2, 3), (1, 5)),
("high_dim_same_proj", (4, 2, 3, 8), (1, 1, 5, 8), (1, 1, 5, 2), None),
(
Expand Down
Loading

0 comments on commit e72135b

Please sign in to comment.