Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into rkazants/poc_open…
Browse files Browse the repository at this point in the history
…vino_backend
  • Loading branch information
rkazants committed Sep 23, 2024
2 parents 76d4c1a + d85036c commit af254e9
Show file tree
Hide file tree
Showing 361 changed files with 17,575 additions and 5,617 deletions.
4 changes: 2 additions & 2 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"image": "mcr.microsoft.com/vscode/devcontainers/python:3.10",
"postCreateCommand": "sh ./.devcontainer/setup.sh",
"postCreateCommand": "sh ./.devcontainer/setup.sh && pip install -r requirements.txt",
"customizations": {
"vscode": {
"settings": {
Expand All @@ -27,4 +27,4 @@
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
}
}
}
6 changes: 3 additions & 3 deletions .github/workflows/scorecard.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ jobs:
persist-credentials: false

- name: "Run analysis"
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
with:
results_file: results.sarif
results_format: sarif
Expand All @@ -48,14 +48,14 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: SARIF file
path: results.sarif
retention-days: 5

# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@d39d31e687223d841ef683f52467bd88e9b21c14 # v3.25.3
uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
with:
sarif_file: results.sarif
2 changes: 1 addition & 1 deletion .github/workflows/scripts/auto-assignment.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ module.exports = async ({ github, context }) => {
// Is this an issue? If so, assign the issue number. Otherwise, assign the PR number.
if (context.payload.issue) {
//assignee List for issues.
assigneesList = ["SuryanarayanaY", "sachinprasadhs"];
assigneesList = ["mehtamansi29", "sachinprasadhs"];
issueNumber = context.payload.issue.number;
} else {
//assignee List for PRs.
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/scripts/labeler.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ You may obtain a copy of the License at

module.exports = async ({ github, context }) => {
const issue_title = context.payload.issue ? context.payload.issue.title : context.payload.pull_request.title
const issue_discription = context.payload.issue ? context.payload.issue.body : context.payload.pull_request.body
const issue_description = context.payload.issue ? context.payload.issue.body : context.payload.pull_request.body
const issue_number = context.payload.issue ? context.payload.issue.number : context.payload.pull_request.number
const keyword_label = {
gemma:'Gemma'
}
const labelsToAdd = []
console.log(issue_title,issue_discription,issue_number)
console.log(issue_title,issue_description,issue_number)

for(const [keyword, label] of Object.entries(keyword_label)){
if(issue_title.toLowerCase().indexOf(keyword) !=-1 || issue_discription.toLowerCase().indexOf(keyword) !=-1 ){
if(issue_title.toLowerCase().indexOf(keyword) !=-1 || issue_description.toLowerCase().indexOf(keyword) !=-1 ){
console.log(`'${keyword}'keyword is present inside the title or description. Pushing label '${label}' to row.`)
labelsToAdd.push(label)
}
Expand Down
2 changes: 2 additions & 0 deletions .kokoro/github/ubuntu/gpu/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ then
--ignore keras/src/backend/jax/distribution_lib_test.py \
--ignore keras/src/distribution/distribution_lib_test.py \
--cov=keras

pytest keras/src/distribution/distribution_lib_test.py --cov=keras
fi

if [ "$KERAS_BACKEND" == "torch" ]
Expand Down
Empty file added FE
Empty file.
73 changes: 30 additions & 43 deletions api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,15 @@
It generates API and formats user and generated APIs.
"""

import importlib
import os
import re
import shutil

import namex

package = "keras"
PACKAGE = "keras"
BUILD_DIR_NAME = "tmp_build_dir"


def ignore_files(_, filenames):
Expand All @@ -21,12 +23,12 @@ def ignore_files(_, filenames):

def copy_source_to_build_directory(root_path):
# Copy sources (`keras/` directory and setup files) to build dir
build_dir = os.path.join(root_path, "tmp_build_dir")
build_dir = os.path.join(root_path, BUILD_DIR_NAME)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
os.mkdir(build_dir)
shutil.copytree(
package, os.path.join(build_dir, package), ignore=ignore_files
PACKAGE, os.path.join(build_dir, PACKAGE), ignore=ignore_files
)
return build_dir

Expand Down Expand Up @@ -140,49 +142,33 @@ def export_version_string(api_init_fname):
f.write(contents)


def update_package_init(init_fname):
contents = """
# Import everything from /api/ into keras.
from keras.api import * # noqa: F403
from keras.api import __version__ # Import * ignores names start with "_".
import os
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]"""
with open(init_fname) as f:
init_contents = f.read()
with open(init_fname, "w") as f:
f.write(init_contents.replace("\nfrom keras import api", contents))
def update_package_init(template_fname, dest_fname, api_module):
with open(template_fname) as template_file:
with open(dest_fname, "w") as dest_file:
for line in template_file:
if "# DO NOT EDIT." in line:
dest_file.write(line)
# Import all public symbols from `api/` and `__version__`.
for symbol in api_module.__dict__.keys():
if symbol.startswith("_") and symbol != "__version__":
continue
dest_file.write(f"from keras.api import {symbol}\n")
# Skip the previous autogenerated block.
for line in template_file:
if "# END DO NOT EDIT." in line:
break
dest_file.write(line)


def build():
# Backup the `keras/__init__.py` and restore it on error in api gen.
root_path = os.path.dirname(os.path.abspath(__file__))
code_api_dir = os.path.join(root_path, package, "api")
code_init_fname = os.path.join(root_path, package, "__init__.py")
code_api_dir = os.path.join(root_path, PACKAGE, "api")
code_init_fname = os.path.join(root_path, PACKAGE, "__init__.py")
# Create temp build dir
build_dir = copy_source_to_build_directory(root_path)
build_api_dir = os.path.join(build_dir, package, "api")
build_init_fname = os.path.join(build_dir, package, "__init__.py")
build_api_dir = os.path.join(build_dir, PACKAGE, "api")
build_init_fname = os.path.join(build_dir, PACKAGE, "__init__.py")
build_api_init_fname = os.path.join(build_api_dir, "__init__.py")
try:
os.chdir(build_dir)
Expand All @@ -195,12 +181,13 @@ def build():
namex.generate_api_files(
"keras", code_directory="src", target_directory="api"
)
# Creates `keras/__init__.py` importing from `keras/api`
update_package_init(build_init_fname)
# Add __version__ to keras package
# Add __version__ to `api/`.
export_version_string(build_api_init_fname)
# Creates `_tf_keras` with full keras API
create_legacy_directory(package_dir=os.path.join(build_dir, package))
create_legacy_directory(package_dir=os.path.join(build_dir, PACKAGE))
# Update toplevel init with all `api/` imports.
api_module = importlib.import_module(f"{BUILD_DIR_NAME}.keras.api")
update_package_init(code_init_fname, build_init_fname, api_module)
# Copy back the keras/api and keras/__init__.py from build directory
if os.path.exists(code_api_dir):
shutil.rmtree(code_api_dir)
Expand Down
5 changes: 3 additions & 2 deletions guides/training_with_built_in_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ def result(self):

def reset_state(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.0)
self.true_positives.assign(0)


model = get_uncompiled_model()
Expand Down Expand Up @@ -592,6 +592,7 @@ def call(self, targets, logits, sample_weights=None):
The method `__getitem__` should return a complete batch.
If you want to modify your dataset between epochs, you may implement `on_epoch_end`.
You may also implement `on_epoch_begin` to be called at the start of each epoch.
Here's a quick example:
"""
Expand Down Expand Up @@ -649,7 +650,7 @@ def __getitem__(self, idx):
`True` if your dataset can be safely pickled.
- `max_queue_size`: Maximum number of batches to keep in the queue
when iterating over the dataset in a multithreaded or
multipricessed setting.
multiprocessed setting.
You can reduce this value to reduce the CPU memory consumption of
your dataset. It defaults to 10.
Expand Down
4 changes: 2 additions & 2 deletions integration_tests/dataset_tests/cifar100_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ def test_shapes_coarse_label_mode(self):
def test_dtypes(self):
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
self.assertEqual(x_train.dtype, np.uint8)
self.assertEqual(y_train.dtype, np.uint8)
self.assertEqual(y_train.dtype, np.int64)
self.assertEqual(x_test.dtype, np.uint8)
self.assertEqual(y_test.dtype, np.uint8)
self.assertEqual(y_test.dtype, np.int64)

def test_invalid_label_mode(self):
with self.assertRaises(ValueError):
Expand Down
4 changes: 2 additions & 2 deletions integration_tests/dataset_tests/imdb_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
class ImdbLoadDataTest(testing.TestCase):
def test_load_data_default(self):
(x_train, y_train), (x_test, y_test) = imdb.load_data()
self.assertIsInstance(x_train, list)
self.assertIsInstance(x_train, np.ndarray)
self.assertIsInstance(y_train, np.ndarray)
self.assertIsInstance(x_test, list)
self.assertIsInstance(x_test, np.ndarray)
self.assertIsInstance(y_test, np.ndarray)

# Check lengths
Expand Down
Loading

0 comments on commit af254e9

Please sign in to comment.