技术标签: 环境配置方案
find:
cannot find -lcudart
解决:
sudo ln -s /usr/local/cuda/lib64/libcudart.so /usr/lib/libcudart.so
下载链接:https://developer.nvidia.com/tensorrt
https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing
tar -xvzf TensorRT-7.1.3.4.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0.tar.gz
cd data
python3 ./download_pgms.py
cd sample
pip install pillow
../bin/sample_mnist
export LD_LIBRARY_PATH=/home/cs/TensorRT-7.1.3.4/lib:$LD_LIBRARY_PATH
cd /TensorRT-7.1.3.4/python
python3.6 -m pip install tensorrt-7.1.3.4-cp36-none-linux_x86_64.whl
将TensorRT中的链接文件.so文件复制到/usr/lib/文件夹中,比如
1) ImportError: libnvinfer.so.6: cannot open shared object file:
Nosuch file or directory2)ImportError: libnvonnxparser.so.6: cannot open shared object file:No
such file or directory解决办法:
1) sudo cp TensorRT-6.01/targets/x86_64-linux-gnu/lib/libnvinfer.so.6
/usr/lib/2)sudo cp
TensorRT-6.01/targets/x86_64-linux-gnu/lib/libnvonnxparser.so.6
/usr/lib/
出现pycuda问题
ModuleNotFoundError: No module named ‘pycuda’sudo pip3.6 install pycuda== 2019.1.2 -i
https://pypi.tuna.tsinghua.edu.cn/simple
// 不用sudo
pip3.6 install pycuda -i https://pypi.tuna.tsinghua.edu.cn/simple
出现 is not a symbolic link
sbin/ldconfig.real: /usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn_ops_
infer.so.8 is not a symbolic link
sudo ln -sf libcudnn.so.8.0.2 libcudnn.so.8
出现
CMakeFiles/traffic_det_reg_caffe_trt.dir/src/TrafficDetection.cpp.o:
In functiononnxToTRTModel(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, nvinfer1::ICudaEngine*&, int const&)': TrafficDetection.cpp:(.text+0x1357): undefined reference to
createNvOnnxParser_INTERNAL’
cmakelist添加:
/home/name/TensorRT-7.1.3.4/lib/libnvinfer.so
/home/name/TensorRT-7.1.3.4/lib/libnvinfer_plugin.so
/home/name/TensorRT-7.1.3.4/lib/libnvparsers.so
/home/name/TensorRT-7.1.3.4/lib/libnvonnxparser.so
ICudaEngine类即为Engine,可通过IBuilder类方法buildCudaEngine()/buildEngineWithConfig()返回其指针。
注意,可通过导入模型生成Engine和通过反序列化来加载Engine两种Engine生成方式。
//method 1:
ICudaEngine *engine = builder-buildCudaEngine(*network);
// method 2:
nvinfer1::IPluginFactory *mPlugin;
mEngine = shared_ptr<nvinfer1::ICudaEngine>(
builder->buildCudaEngine(*network), InferDeleter());
//method 3:
nvinfer1::IPluginFactory *mPlugin;
mEngine = SampleUniquePtr<nvinfer1::ICudaEngine>(
builder->buildEngineWithConfig(*network, *config));
例如,输入输出通道的转换:
分割输入通道转换:
cv::Mat img = cv::imread();
if (img.empty()) continue;
// BGR to RGB
cv::Mat pr_img = preprocess_img(img);
int i = 0;
for (int row = 0; row < INPUT_H; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < INPUT_W; ++col) {
data[b * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
data[b * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
data[b * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
uc_pixel += 3;
++i;
}
}
分割输出通道转换:
//在这里
doInference(*context, data, prob, 1); //chw
out = chw2hwc(prob, out);
//hwc
cv::resize(out, real_out, real_out.size());
real_out_ = map2three(real_out, real_out_);
// libtroch版本可能会引起ros未定义
set(Torch_DIR /home/libtorch/share/cmake/Torch)
find_package(Torch REQUIRED)
find_package(Torch PATHS ${
Torch_DIR} NO_DEFAULT REQUIRED)
if (Torch_FOUND)
message(STATUS "Torch library found!")
message(STATUS " include path: ${TORCH_INCLUDE_DIRS}" \n)
else ()
message(FATAL_ERROR "Could not locate Torch" \n)
endif()
选择版本下载
https://pytorch.org/
https://download.pytorch.org/libtorch/cu102/libtorch-shared-with-deps-1.7.1.zip
unzip libtorch-shared-with-deps-1.7.1+cu101.zip
解压后
实例代码
https://pytorch.org/cppdocs/installing.html
#include <torch/torch.h>
#include <iostream>
int main() {
torch::Tensor tensor = torch::rand({
2, 3});
std::cout << tensor << std::endl;
}
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(example-app)
find_package(Torch REQUIRED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
add_executable(example-app example-app.cpp)
target_link_libraries(example-app "${TORCH_LIBRARIES}")
set_property(TARGET example-app PROPERTY CXX_STANDARD 14)
# The following code block is suggested to be used on Windows.
# According to https://github.com/pytorch/pytorch/issues/25457,
# the DLLs need to be copied to avoid memory errors.
if (MSVC)
file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll")
add_custom_command(TARGET example-app
POST_BUILD
COMMAND ${
CMAKE_COMMAND} -E copy_if_different
${
TORCH_DLLS}
$<TARGET_FILE_DIR:example-app>)
endif (MSVC)
mkdir example
cd example/
vim CMakeLists.txt
vim example-app.cpp
mkdir build
cd build/
cmake -DCMAKE_PREFIX_PATH=/home/libtorch ..
cmake --build . --config Release
./example-app
project(test_pytorch)
set(CMAKE_CXX_STANDARD 14)
set(Torch_DIR /home/zyt/libtorch/share/cmake/Torch)
SET(CMAKE_BUILD_TYPE "RELEASE")
#include_directories(${OpenCV_INCLUDE_DIRS}
find_package(OpenCV REQUIRED)
find_package(Torch REQUIRED)
add_executable(test_pytorch test_pytorch.cpp)
target_link_libraries(test_pytorch ${
OpenCV_LIBS} ${
TORCH_LIBRARIES})
mkdir build
cd build/
cmake ..
make
./test_pytorch
load .pt模型
import torch
import torchvision
from unet import UNet
model = UNet(3, 2)#自己定义的网络模型
model.load_state_dict(torch.load("weights.pth"))#保存的训练模型
model.eval()#切换到eval()
example = torch.rand(1, 3, 320, 480)#生成一个随机输入维度的输入
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("model.pt")
Detector::Detector(const std::string& model_path, const torch::DeviceType& device_type) : device_(device_type) {
try {
module_ = torch::jit::load(model_path);
}
catch (const c10::Error& e) {
std::cerr << "Error loading the model!\n";
std::exit(EXIT_FAILURE);
}
half_ = (device_ != torch::kCPU);
module_.to(device_);
if (half_) {
module_.to(torch::kHalf);
}
module_.eval();
}
infer
t ensor_img = tensor_img.permute({
0, 3, 1, 2}).contiguous(); // BHWC -> BCHW (Batch, Channel, Height, Width)
if (half_) {
tensor_img = tensor_img.to(torch::kHalf);
}
std::vector<torch::jit::IValue> inputs;
inputs.emplace_back(tensor_img);
/*** Inference ***/
// inference
torch::jit::IValue output = module_.forward(inputs);
auto detections = output.toTuple()->elements()[0].toTensor();
auto result = PostProcessing(detections, pad_w, pad_h, scale, img.size(), conf_threshold, iou_threshold);
set(CMAKE_BUILD_TYPE "RELEASE")
set(Torch_DIR /home/libtorch/share/cmake/Torch)
find_package(Torch REQUIRED)
find_package(Torch PATHS ${
Torch_DIR} NO_DEFAULT REQUIRED)
if (Torch_FOUND)
message(STATUS "Torch library found!")
message(STATUS " include path: ${TORCH_INCLUDE_DIRS}" \n)
else ()
message(FATAL_ERROR "Could not locate Torch" \n)
endif()
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
catkin_package(
CATKIN_DEPENDS
roslib
pcl_ros
)
include_directories(
include
${
catkin_INCLUDE_DIRS}
)
INCLUDE_DIRECTORIES(
/home/TensorRT-7.0.0.11/include
/home/TensorRT-7.0.0.11/samples/)
错误一:
catkin_make无法通过,一直在报错如下:
CMakeFiles/detect_node.dir/src/detect_node.cpp.o: In function main': detect_node.cpp:(.text+0x118): undefined reference to ros::init(int&, char**, std::string const&, unsigned int)'
detect_node.cpp:(.text+0x1aa): undefined reference to ros::NodeHandle::NodeHandle(std::string const&, std::map<std::string, std::string, std::less<std::string>, std::allocator<std::pair<std::string const, std::string> > > const&)' detect_node.cpp:(.text+0x28d): undefined reference to
。。。。。。。。。
一般:
这个问题一般都是在cmakelists.txt中没有加入相应的链接库导致的,但是明明加了!
libtorch版本的问题!可能最新版本的libtorch库跟ROS的库一起链接的时候会不兼容吧
错误二:
pcl::io::loadPCDFile<pcl::PointXYZI> //无法通过,
undefined reference to `pcl::PCDReader::read(std::string const&, pcl::PCLPointCloud2&, Eigen::Matrix<float, 4, 1, 0, 4, 1>&, Eigen::Quaternion<float, 0>&, int&, int)
一直在报错,可能也是libtorch版本导致的,换版本能解决
调整14 11
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
使用多线程
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -pthread" )
1、在指定的目录下,下载需要编译的caffe
创建目录XXX,在该目录下右键打开终端,clone 需要编译的caffe
git clone https://github.com/BVLC/caffe.git
2、进入caffe目录,复制Makefile.config.example 到 Makefile.config 中,并编辑修改Makefile.config中内容
进入 caffe ,将 Makefile.config.example 文件复制一份并更名为 Makefile.config ,也可以在 caffe 目录下直接调用以下命令完成复制操作 :
cd caffe
ls
sudo cp Makefile.config.example Makefile.config
sudo gedit Makefile.config
打开Makefile.config文件,进行如下修改:
1.应用 cudnn
将
#USE_CUDNN := 1
修改成:
USE_CUDNN := 1
2.应用 opencv 版本
将
#OPENCV_VERSION := 3
修改为:
OPENCV_VERSION := 3
3.使用 python 接口
将
#WITH_PYTHON_LAYER := 1
修改为
WITH_PYTHON_LAYER := 1
4.修改 python 路径
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
修改为:
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include /usr/include/hdf5/serial
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib /usr/lib/x86_64-linux-gnu /usr/lib/x86_64-linux-gnu/hdf5/serial
5.CUDA_RACH 。 因为我安装的cuda版本较高,compute_20和compute_21已经过时,所以注释掉这两行
将
CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \
-gencode arch=compute_20,code=sm_21 \
修改为:
CUDA_ARCH := #-gencode arch=compute_20,code=sm_20 \
#-gencode arch=compute_20,code=sm_21 \
这里给出我修改后的Makefile.config文件内容,我的配置文件详细内容见文末:
3、然后修改 caffe 目录下的 Makefile 文件
sudo gedit Makefile
修改如下两项:
将:
NVCCFLAGS +=-ccbin=$(CXX) -Xcompiler-fPIC $(COMMON_FLAGS)
替换为:
NVCCFLAGS += -D_FORCE_INLINES -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)
将:
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_hl hdf5
改为:
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_serial_hl hdf5_serial
出现:
/usr/include/c++/5/bits/c++0x_warning.h:32:2: error: #error This file requires compiler and library support for the ISO C++ 2011 standard. This support must be enabled with the -std=c++11 or -std=gnu++11 compiler options.
解决方法
解决:修改Makefile文件
CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -std=c++11
NVCCFLAGS += -D_FORCE_INLINES -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -std=c++11
LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -std=c++11
出现
fatal error: opencv2/core/core.hpp: No such file or directory
解决makefile添加
#LIBRARIES += glog gflags protobuf boost_system boost_filesystem m
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_serial_hl hdf5_serial opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs
源码安装opencv
https://blog.csdn.net/fightingboom/article/details/88732537
unzip opencv-3.4.3
unzip opencv_contrib-3.4.3
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=/home/opencv_contrib-3.4.3/modules -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON ..
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D WITH_TBB=ON -D WITH_V4L=ON -D WITH_LIBV4L=ON -D WITH_CUDA=ON -D ENABLE_FAST_MATH=1 -D OPENCV_ENABLE_NONFREE=ON -D CUDA_FAST_MATH=1 -D CUDA_NVCC_FLAGS="-D_FORCE_INLINES" -D WITH_CUBLAS=1 -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-3.4.3/modules -D BUILD_opencv_dnn=ON ..
sudo make -j4
sudo make install
sudo make
sudo make install
sudo ldconfig
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=/opencv_contrib-3.4.3/modules -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON -D BUILD_TIFF=ON -D WITH_VTK=OFF ..
/usr/bin/ld: cannot find -lcblas
/usr/bin/ld: cannot find -latlas
collect2: error: ld returned 1 exit status
Makefile:590: recipe for target '.build_release/lib/libcaffe.so.1.0.0' failed
make: *** [.build_release/lib/libcaffe.so.1.0.0] Error 1
// install
sudo apt-get install libcblas-dev
sudo ln -s libatlas.so.3 libatlas.so
碰到Undefined reference to ‘__cudaPopCallConfiguration’问题
可以在caffe 文件夹下/usr/local/cuda/lib64
ldd build/lib/libcaffe.so | grep cudart
查看cuda 对应so版本
如果不对
需要修改
/usr/lib/x86_64-linux-gnu
中所有 cudaart
替换为
/usr/local/cuda/lib64
中所有cudaart
sudo cp libcudart.so.10.0 /usr/lib/x86_64-linux-gnu/
ting@cidi:/usr/local/cuda/lib64$ sudo cp libcudart.so.10.0.130 /usr/lib/x86_64-linux-gnu/
Caffe添加自定义层,如yolov3使用的层
报错:
Traceback (most recent call last):
File "/opt/ros/kinetic/bin/roscore", line 69, in <module>
import roslaunch
File "/opt/ros/kinetic/lib/python2.7/dist-packages/roslaunch/__init__.py", line 51, in <module>
from . import param_dump as roslaunch_param_dump
File "/opt/ros/kinetic/lib/python2.7/dist-packages/roslaunch/param_dump.py", line 40, in <module>
import roslaunch.config
File "/opt/ros/kinetic/lib/python2.7/dist-packages/roslaunch/config.py", line 45, in <module>
import rospkg.distro
File "/usr/lib/python2.7/dist-packages/rospkg/distro.py", line 44, in <module>
import yaml
File "/usr/local/lib/python3.5/dist-packages/yaml/__init__.py", line 284
class YAMLObject(metaclass=YAMLObjectMetaclass):
^
SyntaxError: invalid syntax
你一定要搞清楚的是PYTHONPATH这个环境变量。遇到的很多导入包的错误都是因为PYTHONPATH没有设置好。例如,要运行roscore会需要导入yaml这个python包,如果你PYTHONPATH没有设置Python2的路径,就会导致roscore引入Python3的yaml的包,造成错误,如下。总而言之,一定要搞清楚PYTHONPATH怎么设置,需要用到哪些路径,不需要用到哪些路径,哪些路径放在前面,哪些放在后面。PS:因为貌似只要一搜索到名字相同的包,系统就会直接引入,不管是版本2还是3;所以如果一个软件包既有2也有3的版本,那么一定要考虑好路径的排列顺序。
解决办法:
export PYTHONPATH="/opt/ros/kinetic/lib/python2.7/dist-packages"
# TensorRT
find_path(TENSORRT_INCLUDE_DIR NvInfer.h
HINTS ${
TENSORRT_ROOT}
PATH_SUFFIXES include)
MESSAGE(STATUS "Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}")
find_library(TENSORRT_LIBRARY_INFER nvinfer
HINTS ${
TENSORRT_ROOT}
PATH_SUFFIXES lib lib64 lib/x64)
find_library(TENSORRT_LIBRARY_INFER_PLUGIN nvinfer_plugin
HINTS ${
TENSORRT_ROOT}
PATH_SUFFIXES lib lib64 lib/x64)
find_library(TENSORRT_LIBRARY_PARSER nvparsers
HINTS ${
TENSORRT_ROOT}
PATH_SUFFIXES lib lib64 lib/x64)
#find_library(TENSORRT_LIBRARY_ONNXPARSER nvonnxparser
# HINTS ${TENSORRT_ROOT}
# PATH_SUFFIXES lib lib64 lib/x64)
set(TENSORRT_LIBRARY ${
TENSORRT_LIBRARY_INFER} ${
TENSORRT_LIBRARY_INFER_PLUGIN} ${
TENSORRT_LIBRARY_ONNXPARSER} ${
TENSORRT_LIBRARY_PARSER})
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
find_package_handle_standard_args(
TENSORRT DEFAULT_MSG TENSORRT_INCLUDE_DIR TENSORRT_LIBRARY)
if(NOT TENSORRT_FOUND)
message(ERROR
"Cannot find TensorRT library.")
endif()
需要添加这些
set(TENSORRT_LIBRARY ${
TENSORRT_LIBRARY_INFER} ${
TENSORRT_LIBRARY_INFER_PLUGIN} ${
TENSORRT_LIBRARY_ONNXPARSER} ${
TENSORRT_LIBRARY_PARSER})
//出现
undefined reference to `nvcaffeparser1::createCaffeParser()
//可能缺少
${
TENSORRT_LIBRARY_PARSER})
文章浏览阅读2w次,点赞7次,收藏51次。四个步骤1.创建C++ Win32项目动态库dll 2.在Win32项目动态库中添加 外部依赖项 lib头文件和lib库3.导出C接口4.c#调用c++动态库开始你的表演...①创建一个空白的解决方案,在解决方案中添加 Visual C++ , Win32 项目空白解决方案的创建:添加Visual C++ , Win32 项目这......_c#调用lib
文章浏览阅读4.6k次。苹方字体是苹果系统上的黑体,挺好看的。注重颜值的网站都会使用,例如知乎:font-family: -apple-system, BlinkMacSystemFont, Helvetica Neue, PingFang SC, Microsoft YaHei, Source Han Sans SC, Noto Sans CJK SC, W..._ubuntu pingfang
文章浏览阅读159次。表单表单概述表单标签表单域按钮控件demo表单标签表单标签基本语法结构<form action="处理数据程序的url地址“ method=”get|post“ name="表单名称”></form><!--action,当提交表单时,向何处发送表单中的数据,地址可以是相对地址也可以是绝对地址--><!--method将表单中的数据传送给服务器处理,get方式直接显示在url地址中,数据可以被缓存,且长度有限制;而post方式数据隐藏传输,_html表单的处理程序有那些
文章浏览阅读1.2k次。使用说明:开启Google的登陆二步验证(即Google Authenticator服务)后用户登陆时需要输入额外由手机客户端生成的一次性密码。实现Google Authenticator功能需要服务器端和客户端的支持。服务器端负责密钥的生成、验证一次性密码是否正确。客户端记录密钥后生成一次性密码。下载谷歌验证类库文件放到项目合适位置(我这边放在项目Vender下面)https://github.com/PHPGangsta/GoogleAuthenticatorPHP代码示例://引入谷_php otp 验证器
文章浏览阅读4.3k次,点赞5次,收藏11次。matplotlib.plot画图横坐标混乱及间隔处理_matplotlib更改横轴间距
文章浏览阅读2.2k次。①Storage driver 处理各镜像层及容器层的处理细节,实现了多层数据的堆叠,为用户 提供了多层数据合并后的统一视图②所有 Storage driver 都使用可堆叠图像层和写时复制(CoW)策略③docker info 命令可查看当系统上的 storage driver主要用于测试目的,不建议用于生成环境。_docker 保存容器
文章浏览阅读834次,点赞27次,收藏13次。网络拓扑结构是指计算机网络中各组件(如计算机、服务器、打印机、路由器、交换机等设备)及其连接线路在物理布局或逻辑构型上的排列形式。这种布局不仅描述了设备间的实际物理连接方式,也决定了数据在网络中流动的路径和方式。不同的网络拓扑结构影响着网络的性能、可靠性、可扩展性及管理维护的难易程度。_网络拓扑csdn
文章浏览阅读1.8k次,点赞5次,收藏8次。IOS系统Date的坑要创建一个指定时间的new Date对象时,通常的做法是:new Date("2020-09-21 11:11:00")这行代码在 PC 端和安卓端都是正常的,而在 iOS 端则会提示 Invalid Date 无效日期。在IOS年月日中间的横岗许换成斜杠,也就是new Date("2020/09/21 11:11:00")通常为了兼容IOS的这个坑,需要做一些额外的特殊处理,笔者在开发的时候经常会忘了兼容IOS系统。所以就想试着重写Date函数,一劳永逸,避免每次ne_date.prototype 将所有 ios
文章浏览阅读5.3k次。方法一:用PLSQL Developer工具。 1 在PLSQL Developer的sql window里输入select * from test for update; 2 按F8执行 3 打开锁, 再按一下加号. 鼠标点到第一列的列头,使全列成选中状态,然后粘贴,最后commit提交即可。(前提..._excel导入pl/sql
文章浏览阅读83次。Git常用命令速查手册1、初始化仓库git init2、将文件添加到仓库git add 文件名 # 将工作区的某个文件添加到暂存区 git add -u # 添加所有被tracked文件中被修改或删除的文件信息到暂存区,不处理untracked的文件git add -A # 添加所有被tracked文件中被修改或删除的文件信息到暂存区,包括untracked的文件...
文章浏览阅读202次。分享119个ASP.NET源码总有一个是你想要的_千博二手车源码v2023 build 1120
文章浏览阅读1.8k次。版权声明:转载请注明出处 http://blog.csdn.net/irean_lau。目录(?)[+]1、缺省构造函数。2、缺省拷贝构造函数。3、 缺省析构函数。4、缺省赋值运算符。5、缺省取址运算符。6、 缺省取址运算符 const。[cpp] view plain copy_空类默认产生哪些类成员函数