1 with g.device('/device:GPU:0'): 2 # All operations constructed in this context will be placed 3 # on GPU 0. 4 with g.device(None): 5 # All operations constructed in this context will have no 6 # assigned device. 7 8 # Defines a function from `Operation` to device string. 9 def matmul_on_gpu(n): 10 if n.type == "MatMul": 11 return "/device:GPU:0" 12 else: 13 return "/cpu:0" 14 15 with g.device(matmul_on_gpu): 16 # All operations of type "MatMul" constructed in this context 17 # will be placed on GPU 0; all other operations will be placed 18 # on CPU 0.
1 # Place the operations on device "GPU:0" in the "ps" job. 2 device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0) 3 with tf.device(device_spec): 4 # Both my_var and squared_var will be placed on /job:ps/device:GPU:0. 5 my_var = tf.Variable(..., name="my_variable") 6 squared_var = tf.square(my_var) 7 若是一個DeviceSpec被部分指定,將根據定義的範圍與其餘DeviceSpecs合併,在內部內定義的DeviceSpec組件優先於在外層內定義的組件。 8 with tf.device(DeviceSpec(job="train", )): 9 with tf.device(DeviceSpec(job="ps", device_type="GPU", device_index=0): 10 # Nodes created here will be assigned to /job:ps/device:GPU:0. 11 with tf.device(DeviceSpec(device_type="GPU", device_index=1): 12 # Nodes created here will be assigned to /job:train/device:GPU:1.