tfjs: Not able to Load Local Models of TensorFlow React Native Below is the code attached #3
I followed this Tutorial and official documentation… It was amazing and helpful and works perfectly with Tensor Flow models, but when I try to load local model in componentDidMount it says this ERROR 15:43 Unable to resolve “…/assets/model.json” from “App.js” ERROR 15:43 Building JavaScript bundle: error
Even if I change the path like this
const modelJson = require('./assets/model.json');
const modelWeights = require('./assets/det.bin');
It again throws error with the updated path.
import React from 'react'
import {
StyleSheet,
Text,
View,
ActivityIndicator,
StatusBar,
Image,
TouchableOpacity
} from 'react-native'
import * as tf from '@tensorflow/tfjs'
import { fetch } from '@tensorflow/tfjs-react-native'
import * as mobilenet from '@tensorflow-models/mobilenet'
import * as jpeg from 'jpeg-js'
import * as ImagePicker from 'expo-image-picker'
import Constants from 'expo-constants'
import * as Permissions from 'expo-permissions'
class App extends React.Component {
state = {
isTfReady: false,
isModelReady: false,
predictions: null,
image: null
}
async componentDidMount() {
await tf.ready()
this.setState({
isTfReady: true
})
// Get reference to bundled model assets
const modelJson = require('../assets/model.json');
const modelWeights = require('../assets/det.bin');
this.model = await tf.loadLayersModel(
bundleResourceIO(modelJson, modelWeights));
//this.model = await mobilenet.load()
this.setState({ isModelReady: true })
this.getPermissionAsync()
}
getPermissionAsync = async () => {
if (Constants.platform.ios) {
const { status } = await Permissions.askAsync(Permissions.CAMERA_ROLL)
if (status !== 'granted') {
alert('Sorry, we need camera roll permissions to make this work!')
}
}
}
imageToTensor(rawImageData) {
const TO_UINT8ARRAY = true
const { width, height, data } = jpeg.decode(rawImageData, TO_UINT8ARRAY)
// Drop the alpha channel info for mobilenet
const buffer = new Uint8Array(width * height * 3)
let offset = 0 // offset into original data
for (let i = 0; i < buffer.length; i += 3) {
buffer[i] = data[offset]
buffer[i + 1] = data[offset + 1]
buffer[i + 2] = data[offset + 2]
offset += 4
}
return tf.tensor3d(buffer, [height, width, 3])
}
classifyImage = async () => {
try {
const imageAssetPath = Image.resolveAssetSource(this.state.image)
const response = await fetch(imageAssetPath.uri, {}, { isBinary: true })
const rawImageData = await response.arrayBuffer()
const imageTensor = this.imageToTensor(rawImageData)
const predictions = await this.model.classify(imageTensor)
this.setState({ predictions })
console.log(predictions)
} catch (error) {
console.log(error)
}
}
selectImage = async () => {
try {
let response = await ImagePicker.launchImageLibraryAsync({
mediaTypes: ImagePicker.MediaTypeOptions.All,
allowsEditing: true,
aspect: [4, 3]
})
if (!response.cancelled) {
const source = { uri: response.uri }
this.setState({ image: source })
this.classifyImage()
}
} catch (error) {
console.log(error)
}
}
renderPrediction = prediction => {
return (
<Text key={prediction.className} style={styles.text}>
{prediction.className}
</Text>
)
}
render() {
const { isTfReady, isModelReady, predictions, image } = this.state
return (
<View style={styles.container}>
<StatusBar barStyle='light-content' />
<View style={styles.loadingContainer}>
<Text style={styles.text}>
TFJS ready? {isTfReady ? <Text>✅</Text> : ''}
</Text>
<View style={styles.loadingModelContainer}>
<Text style={styles.text}>Model ready? </Text>
{isModelReady ? (
<Text style={styles.text}>✅</Text>
) : (
<ActivityIndicator size='small' />
)}
</View>
</View>
<TouchableOpacity
style={styles.imageWrapper}
onPress={isModelReady ? this.selectImage : undefined}>
{image && <Image source={image} style={styles.imageContainer} />}
{isModelReady && !image && (
<Text style={styles.transparentText}>Tap to choose image</Text>
)}
</TouchableOpacity>
<View style={styles.predictionWrapper}>
{isModelReady && image && (
<Text style={styles.text}>
Predictions: {predictions ? '' : 'Predicting...'}
</Text>
)}
{isModelReady &&
predictions &&
predictions.map(p => this.renderPrediction(p))}
</View>
<View style={styles.footer}>
<Text style={styles.poweredBy}>Powered by:</Text>
<Image source={require('./assets/tfjs.jpg')} style={styles.tfLogo} />
</View>
</View>
)
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#171f24',
alignItems: 'center'
},
loadingContainer: {
marginTop: 80,
justifyContent: 'center'
},
text: {
color: '#ffffff',
fontSize: 16
},
loadingModelContainer: {
flexDirection: 'row',
marginTop: 10
},
imageWrapper: {
width: 280,
height: 280,
padding: 10,
borderColor: '#cf667f',
borderWidth: 5,
borderStyle: 'dashed',
marginTop: 40,
marginBottom: 10,
position: 'relative',
justifyContent: 'center',
alignItems: 'center'
},
imageContainer: {
width: 250,
height: 250,
position: 'absolute',
top: 10,
left: 10,
bottom: 10,
right: 10
},
predictionWrapper: {
height: 100,
width: '100%',
flexDirection: 'column',
alignItems: 'center'
},
transparentText: {
color: '#ffffff',
opacity: 0.7
},
footer: {
marginTop: 40
},
poweredBy: {
fontSize: 20,
color: '#e69e34',
marginBottom: 6
},
tfLogo: {
width: 125,
height: 70
}
})
export default App
About this issue
- Original URL
- State: closed
- Created 4 years ago
- Comments: 15 (4 by maintainers)
i am facing the similar issue, my problem is I had multiple .bin files converted.
Does anyone know how to bundleResourceIO multiple modelweights and use loadLayersModel with multiple .bin files?
@alihassan711 @tafsiri hello, I am fairly new to tensorflow, and i have some questions for you guys, Please help me here. How did you convert your pretrained models? Did you use tensorflowjs_converter? If not, what did you use? I am trying to convert pretrained model, which generally is in .pb format (downloaded from here : https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md ) and i saw many examples where model.json and model.bin files is being used. Can somebody provide me a link for the tutorial or some tips to convert models into .bin files?