diff --git a/pytensor/link/c/cmodule.py b/pytensor/link/c/cmodule.py index f1f098edbf..13ed2b3d9f 100644 --- a/pytensor/link/c/cmodule.py +++ b/pytensor/link/c/cmodule.py @@ -2947,6 +2947,13 @@ def check_libs( except Exception as e: _logger.debug(e) _logger.debug("Failed to identify blas ldflags. Will leave them empty.") + warnings.warn( + "PyTensor could not link to a BLAS installation. Operations that might benefit from BLAS will be severely degraded.\n" + "This usually happens when PyTensor is installed via pip. We recommend it be installed via conda/mamba/pixi instead.\n" + "Alternatively, you can use a experimental backend such as Numba or JAX that perform their own BLAS optimizations, " + "by setting `pytensor.config.mode == 'NUMBA'` or passing `mode='NUMBA'` when compiling a PyTensor function.", + UserWarning, + ) return "" diff --git a/pytensor/tensor/blas_headers.py b/pytensor/tensor/blas_headers.py index 2806bfc41d..645f04bfb3 100644 --- a/pytensor/tensor/blas_headers.py +++ b/pytensor/tensor/blas_headers.py @@ -742,6 +742,11 @@ def blas_header_text(): blas_code = "" if not config.blas__ldflags: + # This code can only be reached by compiling a function with a manually specified GEMM Op. + # Normal PyTensor usage will end up with Dot22 or Dot22Scalar instead, + # which opt out of C-code completely if the blas flags are missing + _logger.warning("Using NumPy C-API based implementation for BLAS functions.") + # Include the Numpy version implementation of [sd]gemm_. current_filedir = Path(__file__).parent blas_common_filepath = current_filedir / "c_code/alt_blas_common.h" @@ -1003,10 +1008,6 @@ def blas_header_text(): return header + blas_code -if not config.blas__ldflags: - _logger.warning("Using NumPy C-API based implementation for BLAS functions.") - - def mkl_threads_text(): """C header for MKL threads interface""" header = """