physical_devices=tf.config.list_physical_devices("GPU")# Lists all GPU's available
iflen(physical_devices)>0:
tf.config.experimental.set_memory_growth(physical_devices[0],enable=True)# If number of GPU's is >0 run memory growth accordingly (doesn't just allocate all GPU memory at once)
# CPU usage distribution (tries run 4 tasks concurrently)