※ 英語のみのページです※ Training platform は「Qwiklabs」を選択してください※ 本ページでの登録期限は 2020 年 5 月 31 日 23:59(米国太平洋標準時 夏時間)に延長されました
※ ML Study Jams に参加登録しなくとも、「Start your learning journey with Google Cloud today」に登録すれば無料で QWIKLABS を受講できます。
※ ML Study Jams Slack への参加方法は登録いただいた方にメールでご案内します
Element.innerHTML
Content-Security-Policy: require-trusted-types-for 'script'; report-uri //my-csp-endpoint.example
Cross-Origin-Embedder-Policy
Cross-Origin-Opener-Policy
Performance.measureMemory()
document.domain
Scheduler.postTask()
user-blocking
user-visible
background
TaskController
MediaStreamTrack
RTCPeerConnection
<mark>
-webkit-appearance
auto
appearance
<image>
<video>
<canvas>
contain-intrinsic-size
contain: size
prefers-color-scheme
display
inline-grid
grid
inline-flex
flex
<button>
module
font-display
optional
font-display: optional
IDBDatabase.transaction()
durability
"default"
"strict"
"relaxed"
IDBTransaction.durability
const iDBTransaction = database.transaction( [ "storeName" ], "readwrite", { durability: "relaxed" } );
type
time
<input>
@supports
@supports selector(::before) { div { background: green }; }
canTrickleIceCandidates
RTCRtpSender.setParameters()
RTCRtpEncodingParameters.maxFramerate
maxFramerate
RTCRtpSendParameters
degradationPreference
"maintain-framerate"
"maintain-resolution"
"balanced"
Intl.DateTimeFormat
fractionalSecondDigits
DateTimeFormat
'allow-downloads'
import * as facemesh from '@tensorflow-models/facemesh;
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/facemesh"></script>
// Load the MediaPipe facemesh model assets. const model = await facemesh.load(); // Pass in a video stream to the model to obtain // an array of detected faces from the MediaPipe graph. const video = document.querySelector("video"); const faces = await model.estimateFaces(video); // Each face object contains a `scaledMesh` property, // which is an array of 468 landmarks. faces.forEach(face => console.log(face.scaledMesh));
estimateFaces
{ faceInViewConfidence: 1, boundingBox: { topLeft: [232.28, 145.26], // [x, y] bottomRight: [449.75, 308.36], }, mesh: [ [92.07, 119.49, -17.54], // [x, y, z] [91.97, 102.52, -30.54], ... ], scaledMesh: [ [322.32, 297.58, -17.54], [322.18, 263.95, -30.54] ], annotations: { silhouette: [ [326.19, 124.72, -3.82], [351.06, 126.30, -3.00], ... ], ... } }
import * as handtrack from '@tensorflow-models/handpose;
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/handpose"></script>
// Load the MediaPipe handpose model assets. const model = await handpose.load(); // Pass in a video stream to the model to obtain // a prediction from the MediaPipe graph. const video = document.querySelector("video"); const hands = await model.estimateHands(video); // Each hand object contains a `landmarks` property, // which is an array of 21 3-D landmarks. hands.forEach(hand => console.log(hand.landmarks));
facemesh
{ handInViewConfidence: 1, boundingBox: { topLeft: [162.91, -17.42], // [x, y] bottomRight: [548.56, 368.23], }, landmarks: [ [472.52, 298.59, 0.00], // [x, y, z] [412.80, 315.64, -6.18], ... ], annotations: { indexFinger: [ [412.80, 315.64, -6.18], [350.02, 298.38, -7.14], ... ], ... } }
Interpreter
let coreMLDelegate = CoreMLDelegate() let interpreter = try Interpreter(modelPath: modelPath, delegates: [coreMLDelegate])
# Load your custom dataset data = ImageClassifierDataLoader.from_folder(flower_path) train_data, test_data = data.split(0.9) # Customize the pre-trained TensorFlow model model = image_classifier.create(train_data, model_spec=efficienetnet_lite0_spec) # Evaluate the model loss, accuracy = model.evaluate(test_data) # Export as TensorFlow Lite model. model.export('image_classifier.tflite', 'image_labels.txt')
model_spec
assets
<!-- Load TensorFlow.js. This is required to use the qna model. --> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"> </script> <!-- Load the qna model. --> <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/qna"> </script> <!-- Place your code in the script tag below. You can also use an external .js file --> <script> // Notice there is no 'import' statement. 'qna' and 'tf' is // available on the index-page because of the script tag above. // Load the model. qna.load().then(model => { model.findAnswers(question, passage).then(answers => { console.log('Answers: ', answers); }); }); </script>
[ { text: string, score: number, startIndex: number, endIndex: number } ]