You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
{{ message }}
This repository has been archived by the owner on Apr 9, 2024. It is now read-only.
Traceback (most recent call last):
File "/shared/xinyang/slot-attention-video/savi/main.py", line 63, in
app.run(main)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 308, in run
_run_main(main, args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 254, in _run_main
sys.exit(main(argv))
File "/shared/xinyang/slot-attention-video/savi/main.py", line 59, in main
trainer.train_and_evaluate(FLAGS.config, FLAGS.workdir)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 202, in train_and_evaluate
state_vars, initial_params = init_model(rng)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 188, in init_model
initial_vars = model.init(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1121, in init
_, v_out = self.init_with_output(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1090, in init_with_output
return self.apply(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1057, in apply
return apply(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/scope.py", line 691, in wrapper
y = fn(root, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1312, in scope_fn
return fn(module.clone(parent=scope), *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 124, in call
encoded_inputs = self.encoder()(video, padding_mask, train) # pytype: disable=not-callable
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 296, in wrapped_fn
return trafo_fn(module_scopes, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/lift.py", line 195, in wrapper
y, out_variable_groups_xs_t = fn(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/lift.py", line 426, in inner
return mapped(variable_groups, rng_groups, args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/api.py", line 1587, in vmap_f
out_flat = batching.batch(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/linear_util.py", line 166, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/lift.py", line 423, in mapped
y = fn(scope, *args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 289, in core_fn
res = prewrapped_fn(cloned, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 172, in call
x = self.backbone()(inputs, train=train)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/shared/xinyang/slot-attention-video/savi/modules/resnet.py", line 181, in call
x = nn.Conv(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py", line 282, in call
y = lax.conv_general_dilated(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/lax/convolution.py", line 150, in conv_general_dilated
return conv_general_dilated_p.bind(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 286, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 289, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/interpreters/batching.py", line 198, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/lax/convolution.py", line 599, in _conv_general_dilated_batch_rule
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/lax/convolution.py", line 150, in conv_general_dilated
return conv_general_dilated_p.bind(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 286, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 289, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 611, in process_primitive
return primitive.impl(*tracers, **params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 92, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *unsafe_map(arg_spec, args),
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/util.py", line 210, in wrapper
return cached(config._trace_context(), *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/util.py", line 203, in cached
return f(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 111, in xla_primitive_callable
compiled = _xla_callable_uncached(lu.wrap_init(prim_fun), device, None,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 169, in _xla_callable_uncached
return lower_xla_callable(fun, device, backend, name, donated_invars,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 528, in compile
self._executable = XlaCompiledComputation.from_xla_computation(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 614, in from_xla_computation
compiled = compile_or_get_cached(backend, xla_computation, options)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 583, in compile_or_get_cached
return backend_compile(backend, computation, compile_options)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 537, in backend_compile
return backend.compile(built_c, compile_options=options)
jax._src.traceback_util.UnfilteredStackTrace: RuntimeError: UNKNOWN: Failed to determine best cudnn convolution algorithm for:
%cudnn-conv = (f32[6,128,128,64]{2,1,3,0}, u8[0]{0}) custom-call(f32[6,128,128,3]{2,1,3,0} %copy, f32[3,3,3,64]{1,0,2,3} %copy.1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", metadata={op_name="jit(conv_general_dilated)/jit(main)/conv_general_dilated[window_strides=(1, 1) padding=((1, 1), (1, 1)) lhs_dilation=(1, 1) rhs_dilation=(1, 1) dimension_numbers=ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2)) feature_group_count=1 batch_group_count=1 lhs_shape=(6, 128, 128, 3) rhs_shape=(3, 3, 3, 64) precision=None preferred_element_type=None]" source_file="/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py" source_line=282}, backend_config="{"conv_result_scale":1,"activation_mode":"0","side_input_scale":0}"
Original error: UNIMPLEMENTED: DNN library is not found.
To ignore this failure and try to use a fallback algorithm (which may have suboptimal performance), use XLA_FLAGS=--xla_gpu_strict_conv_algorithm_picker=false. Please also file a bug for the root cause of failing autotuning.
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/shared/xinyang/slot-attention-video/savi/main.py", line 63, in
app.run(main)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 308, in run
_run_main(main, args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 254, in _run_main
sys.exit(main(argv))
File "/shared/xinyang/slot-attention-video/savi/main.py", line 59, in main
trainer.train_and_evaluate(FLAGS.config, FLAGS.workdir)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 202, in train_and_evaluate
state_vars, initial_params = init_model(rng)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 188, in init_model
initial_vars = model.init(
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 124, in call
encoded_inputs = self.encoder()(video, padding_mask, train) # pytype: disable=not-callable
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 172, in call
x = self.backbone()(inputs, train=train)
File "/shared/xinyang/slot-attention-video/savi/modules/resnet.py", line 181, in call
x = nn.Conv(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py", line 282, in call
y = lax.conv_general_dilated(
RuntimeError: UNKNOWN: Failed to determine best cudnn convolution algorithm for:
%cudnn-conv = (f32[6,128,128,64]{2,1,3,0}, u8[0]{0}) custom-call(f32[6,128,128,3]{2,1,3,0} %copy, f32[3,3,3,64]{1,0,2,3} %copy.1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", metadata={op_name="jit(conv_general_dilated)/jit(main)/conv_general_dilated[window_strides=(1, 1) padding=((1, 1), (1, 1)) lhs_dilation=(1, 1) rhs_dilation=(1, 1) dimension_numbers=ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2)) feature_group_count=1 batch_group_count=1 lhs_shape=(6, 128, 128, 3) rhs_shape=(3, 3, 3, 64) precision=None preferred_element_type=None]" source_file="/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py" source_line=282}, backend_config="{"conv_result_scale":1,"activation_mode":"0","side_input_scale":0}"
Original error: UNIMPLEMENTED: DNN library is not found.
To ignore this failure and try to use a fallback algorithm (which may have suboptimal performance), use XLA_FLAGS=--xla_gpu_strict_conv_algorithm_picker=false. Please also file a bug for the root cause of failing autotuning.
The text was updated successfully, but these errors were encountered:
Sign up for freeto subscribe to this conversation on GitHub.
Already have an account?
Sign in.
Traceback (most recent call last):
File "/shared/xinyang/slot-attention-video/savi/main.py", line 63, in
app.run(main)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 308, in run
_run_main(main, args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 254, in _run_main
sys.exit(main(argv))
File "/shared/xinyang/slot-attention-video/savi/main.py", line 59, in main
trainer.train_and_evaluate(FLAGS.config, FLAGS.workdir)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 202, in train_and_evaluate
state_vars, initial_params = init_model(rng)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 188, in init_model
initial_vars = model.init(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1121, in init
_, v_out = self.init_with_output(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1090, in init_with_output
return self.apply(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1057, in apply
return apply(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/scope.py", line 691, in wrapper
y = fn(root, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 1312, in scope_fn
return fn(module.clone(parent=scope), *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 124, in call
encoded_inputs = self.encoder()(video, padding_mask, train) # pytype: disable=not-callable
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 296, in wrapped_fn
return trafo_fn(module_scopes, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/lift.py", line 195, in wrapper
y, out_variable_groups_xs_t = fn(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/lift.py", line 426, in inner
return mapped(variable_groups, rng_groups, args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/api.py", line 1587, in vmap_f
out_flat = batching.batch(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/linear_util.py", line 166, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/core/lift.py", line 423, in mapped
y = fn(scope, *args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 289, in core_fn
res = prewrapped_fn(cloned, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 172, in call
x = self.backbone()(inputs, train=train)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/shared/xinyang/slot-attention-video/savi/modules/resnet.py", line 181, in call
x = nn.Conv(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/transforms.py", line 602, in wrapped_fn
return prewrapped_fn(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 318, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/module.py", line 602, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py", line 282, in call
y = lax.conv_general_dilated(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/lax/convolution.py", line 150, in conv_general_dilated
return conv_general_dilated_p.bind(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 286, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 289, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/interpreters/batching.py", line 198, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/lax/convolution.py", line 599, in _conv_general_dilated_batch_rule
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/lax/convolution.py", line 150, in conv_general_dilated
return conv_general_dilated_p.bind(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 286, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 289, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/core.py", line 611, in process_primitive
return primitive.impl(*tracers, **params)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 92, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *unsafe_map(arg_spec, args),
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/util.py", line 210, in wrapper
return cached(config._trace_context(), *args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/util.py", line 203, in cached
return f(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 111, in xla_primitive_callable
compiled = _xla_callable_uncached(lu.wrap_init(prim_fun), device, None,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 169, in _xla_callable_uncached
return lower_xla_callable(fun, device, backend, name, donated_invars,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 528, in compile
self._executable = XlaCompiledComputation.from_xla_computation(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 614, in from_xla_computation
compiled = compile_or_get_cached(backend, xla_computation, options)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 583, in compile_or_get_cached
return backend_compile(backend, computation, compile_options)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/jax/_src/dispatch.py", line 537, in backend_compile
return backend.compile(built_c, compile_options=options)
jax._src.traceback_util.UnfilteredStackTrace: RuntimeError: UNKNOWN: Failed to determine best cudnn convolution algorithm for:
%cudnn-conv = (f32[6,128,128,64]{2,1,3,0}, u8[0]{0}) custom-call(f32[6,128,128,3]{2,1,3,0} %copy, f32[3,3,3,64]{1,0,2,3} %copy.1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", metadata={op_name="jit(conv_general_dilated)/jit(main)/conv_general_dilated[window_strides=(1, 1) padding=((1, 1), (1, 1)) lhs_dilation=(1, 1) rhs_dilation=(1, 1) dimension_numbers=ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2)) feature_group_count=1 batch_group_count=1 lhs_shape=(6, 128, 128, 3) rhs_shape=(3, 3, 3, 64) precision=None preferred_element_type=None]" source_file="/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py" source_line=282}, backend_config="{"conv_result_scale":1,"activation_mode":"0","side_input_scale":0}"
Original error: UNIMPLEMENTED: DNN library is not found.
To ignore this failure and try to use a fallback algorithm (which may have suboptimal performance), use XLA_FLAGS=--xla_gpu_strict_conv_algorithm_picker=false. Please also file a bug for the root cause of failing autotuning.
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/shared/xinyang/slot-attention-video/savi/main.py", line 63, in
app.run(main)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 308, in run
_run_main(main, args)
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/absl/app.py", line 254, in _run_main
sys.exit(main(argv))
File "/shared/xinyang/slot-attention-video/savi/main.py", line 59, in main
trainer.train_and_evaluate(FLAGS.config, FLAGS.workdir)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 202, in train_and_evaluate
state_vars, initial_params = init_model(rng)
File "/shared/xinyang/slot-attention-video/savi/lib/trainer.py", line 188, in init_model
initial_vars = model.init(
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 124, in call
encoded_inputs = self.encoder()(video, padding_mask, train) # pytype: disable=not-callable
File "/shared/xinyang/slot-attention-video/savi/modules/video.py", line 172, in call
x = self.backbone()(inputs, train=train)
File "/shared/xinyang/slot-attention-video/savi/modules/resnet.py", line 181, in call
x = nn.Conv(
File "/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py", line 282, in call
y = lax.conv_general_dilated(
RuntimeError: UNKNOWN: Failed to determine best cudnn convolution algorithm for:
%cudnn-conv = (f32[6,128,128,64]{2,1,3,0}, u8[0]{0}) custom-call(f32[6,128,128,3]{2,1,3,0} %copy, f32[3,3,3,64]{1,0,2,3} %copy.1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", metadata={op_name="jit(conv_general_dilated)/jit(main)/conv_general_dilated[window_strides=(1, 1) padding=((1, 1), (1, 1)) lhs_dilation=(1, 1) rhs_dilation=(1, 1) dimension_numbers=ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2)) feature_group_count=1 batch_group_count=1 lhs_shape=(6, 128, 128, 3) rhs_shape=(3, 3, 3, 64) precision=None preferred_element_type=None]" source_file="/home/xinyang/miniconda/envs/savi/lib/python3.8/site-packages/flax/linen/linear.py" source_line=282}, backend_config="{"conv_result_scale":1,"activation_mode":"0","side_input_scale":0}"
Original error: UNIMPLEMENTED: DNN library is not found.
To ignore this failure and try to use a fallback algorithm (which may have suboptimal performance), use XLA_FLAGS=--xla_gpu_strict_conv_algorithm_picker=false. Please also file a bug for the root cause of failing autotuning.
The text was updated successfully, but these errors were encountered: