Skip to content

Commit

Permalink
whisper : disable CUDA mel + fix FFMPEG
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Jun 26, 2024
1 parent 3efedb9 commit dc8cc2d
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 41 deletions.
35 changes: 30 additions & 5 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ if (WHISPER_SDL2)
string(STRIP "${SDL2_LIBRARIES}" SDL2_LIBRARIES)

message(STATUS "SDL2_INCLUDE_DIRS = ${SDL2_INCLUDE_DIRS}")
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
endif()

if (WHISPER_CLBLAST)
Expand All @@ -22,10 +22,35 @@ endif()

set(TARGET common)

unset(COMMON_EXTRA_LIBS)

if (WHISPER_FFMPEG)
# As of cmake 3.27, there is no official cmake support for FindFFmpeg.
# Consequnelty we added a FindFFmpeg.cmake script the cmake subfolder:
# whisper.cpp does not need the full ffmpeg libs, just AVFORMAT AVCODEC AVUTIL SWRESAMPLE
# libswresample performs highly optimized audio resampling, rematrixing and sample format conversion operations
# libavcodec provides a generic encoding/decoding framework and contains multiple decoders and encoders for audio, video and subtitle streams, and several bitstream filters.
# libavformat provides a generic framework for multiplexing and demultiplexing (muxing and demuxing) audio, video and subtitle streams.
find_package(FFmpeg REQUIRED)

if (NOT ${FFMPEG_FOUND})
message(FATAL_ERROR "Cannot find ffmpeg libs/headers")
endif()

message(STATUS "Found ffmpeg libs: ${FFMPEG_LIBRARIES}")
message(STATUS "Found ffmpeg headers in: ${FFMPEG_INCLUDE_DIRS}")
message(STATUS "ffmpeg definitions: ${FFMPEG_DEFINITIONS}")
message(STATUS "Found avformat ${AVFORMAT_VERSION}")

include_directories(${FFMPEG_INCLUDE_DIRS})
add_compile_definitions(WHISPER_FFMPEG)

list(APPEND COMMON_EXTRA_LIBS ${FFMPEG_LIBRARIES})

set(COMMON_SOURCES_FFMPEG ffmpeg-transcode.cpp)
endif()


add_library(${TARGET} STATIC
common.h
common.cpp
Expand All @@ -38,7 +63,7 @@ add_library(${TARGET} STATIC

include(DefaultTargetOptions)

target_link_libraries(${TARGET} PRIVATE whisper)
target_link_libraries(${TARGET} PRIVATE whisper ${COMMON_EXTRA_LIBS})

set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
Expand All @@ -55,8 +80,8 @@ if (WHISPER_SDL2)

include(DefaultTargetOptions)

target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES})
target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
target_link_libraries (${TARGET} PRIVATE ${SDL2_LIBRARIES})

set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
Expand Down Expand Up @@ -108,7 +133,7 @@ if (WHISPER_SDL2)
set_target_properties(talk-llama PROPERTIES FOLDER "examples")
add_subdirectory(lsp)
set_target_properties(lsp PROPERTIES FOLDER "examples")
if (LLAMA_SYCL)
if (GGML_SYCL)
add_subdirectory(sycl)
set_target_properties(sycl PROPERTIES FOLDER "examples")
endif()
Expand Down
6 changes: 3 additions & 3 deletions examples/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ extern bool ffmpeg_decode_audio(const std::string & ifname, std::vector<uint8_t>
#endif

// Function to check if the next argument exists
std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
static std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
if (i + 1 < argc && argv[i + 1][0] != '-') {
return argv[++i];
} else {
Expand Down Expand Up @@ -346,7 +346,7 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
return tokens;
}

std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
static std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
std::vector<gpt_vocab::id> output;
std::stringstream ss(input);
std::string token;
Expand All @@ -358,7 +358,7 @@ std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, ch
return output;
}

std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
static std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
if (fpath_test.empty()){
fprintf(stderr, "%s : No test file found.\n", __func__);
return std::map<std::string, std::vector<gpt_vocab::id>>();
Expand Down
8 changes: 4 additions & 4 deletions scripts/build-info.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ if out=$($CC -dumpmachine); then
build_target=$out
fi

echo "int LLAMA_BUILD_NUMBER = ${build_number};"
echo "char const *LLAMA_COMMIT = \"${build_commit}\";"
echo "char const *LLAMA_COMPILER = \"${build_compiler}\";"
echo "char const *LLAMA_BUILD_TARGET = \"${build_target}\";"
echo "int WHISPER_BUILD_NUMBER = ${build_number};"
echo "char const *WHISPER_COMMIT = \"${build_commit}\";"
echo "char const *WHISPER_COMPILER = \"${build_compiler}\";"
echo "char const *WHISPER_BUILD_TARGET = \"${build_target}\";"
53 changes: 27 additions & 26 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -77,27 +77,27 @@ if (WHISPER_OPENVINO)
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
endif()

if (GGML_CUDA)
cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES

find_package(CUDAToolkit)
if (CUDAToolkit_FOUND)
message(STATUS "CUDA found")

if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
# 52 == lowest CUDA 12 standard
# 60 == f16 CUDA intrinsics
# 61 == integer CUDA intrinsics
# 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster
set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics
endif()
message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")

enable_language(CUDA)
else()
message(WARNING "CUDA not found")
endif()
endif()
#if (GGML_CUDA)
# cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
#
# find_package(CUDAToolkit)
# if (CUDAToolkit_FOUND)
# message(STATUS "CUDA found")
#
# if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
# # 52 == lowest CUDA 12 standard
# # 60 == f16 CUDA intrinsics
# # 61 == integer CUDA intrinsics
# # 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster
# set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics
# endif()
# message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
#
# enable_language(CUDA)
# else()
# message(WARNING "CUDA not found")
# endif()
#endif()

# whisper

Expand All @@ -107,11 +107,12 @@ add_library(whisper
whisper-mel.hpp
)

if (GGML_CUDA)
target_sources(whisper PRIVATE whisper-mel-cuda.cu)

target_link_libraries(whisper PRIVATE CUDA::cufft)
endif()
# TODO: disabled because it relies on ggml internals that are no longer accessible (ggml-backend-impl.h, ggml-cuda/common.cuh, ..)
#if (GGML_CUDA)
# target_sources(whisper PRIVATE whisper-mel-cuda.cu)
#
# target_link_libraries(whisper PRIVATE CUDA::cufft)
#endif()

# Set the version numbers
set_target_properties(whisper PROPERTIES
Expand Down
3 changes: 1 addition & 2 deletions src/whisper-mel-cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#include "whisper-mel-cuda.hpp"
#include "whisper.h"

#include <ggml-cuda/common.cuh>
#include <ggml-backend-impl.h>
#include <ggml-backend.h>

#include <cuda.h>
#include <cuda_runtime.h>
Expand Down
4 changes: 3 additions & 1 deletion src/whisper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3215,7 +3215,9 @@ struct mel_calc_cpu : public whisper_mel_calc {
}

static whisper_mel_calc * whisper_mel_calc_create(ggml_backend_t backend, const whisper_filters & filters) {
#if defined(GGML_USE_CUDA) && !defined(GGML_USE_HIPBLAS)
// TODO: disabled because it relies on ggml internals that are no longer accessible (ggml-backend-impl.h, ggml-cuda/common.cuh, ..)
//#if defined(GGML_USE_CUDA) && !defined(GGML_USE_HIPBLAS)
#if 0
if (ggml_backend_is_cuda(backend)) {
auto ret = whisper_mel_calc_create_cuda(backend, filters);
if (ret) {
Expand Down

0 comments on commit dc8cc2d

Please sign in to comment.