mirror of
https://github.com/krahets/hello-algo.git
synced 2025-02-02 22:43:50 +08:00
build
This commit is contained in:
parent
ceef41be19
commit
2b5bd017b9
@ -924,7 +924,8 @@ comments: true
|
||||
_count += nums[i];
|
||||
}
|
||||
// 直接遍历数组元素
|
||||
for num in nums {
|
||||
_count = 0;
|
||||
for &num in nums {
|
||||
_count += num;
|
||||
}
|
||||
}
|
||||
@ -1359,9 +1360,8 @@ comments: true
|
||||
// 初始化一个扩展长度后的数组
|
||||
let mut res: Vec<i32> = vec![0; nums.len() + enlarge];
|
||||
// 将原数组中的所有元素复制到新
|
||||
for i in 0..nums.len() {
|
||||
res[i] = nums[i];
|
||||
}
|
||||
res[0..nums.len()].copy_from_slice(nums);
|
||||
|
||||
// 返回扩展后的新数组
|
||||
res
|
||||
}
|
||||
|
@ -777,9 +777,6 @@ comments: true
|
||||
/* 删除链表的节点 n0 之后的首个节点 */
|
||||
#[allow(non_snake_case)]
|
||||
pub fn remove<T>(n0: &Rc<RefCell<ListNode<T>>>) {
|
||||
if n0.borrow().next.is_none() {
|
||||
return;
|
||||
};
|
||||
// n0 -> P -> n1
|
||||
let P = n0.borrow_mut().next.take();
|
||||
if let Some(node) = P {
|
||||
@ -988,15 +985,23 @@ comments: true
|
||||
|
||||
```rust title="linked_list.rs"
|
||||
/* 访问链表中索引为 index 的节点 */
|
||||
pub fn access<T>(head: Rc<RefCell<ListNode<T>>>, index: i32) -> Rc<RefCell<ListNode<T>>> {
|
||||
if index <= 0 {
|
||||
return head;
|
||||
};
|
||||
if let Some(node) = &head.borrow().next {
|
||||
return access(node.clone(), index - 1);
|
||||
pub fn access<T>(head: Rc<RefCell<ListNode<T>>>, index: i32) -> Option<Rc<RefCell<ListNode<T>>>> {
|
||||
fn dfs<T>(
|
||||
head: Option<&Rc<RefCell<ListNode<T>>>>,
|
||||
index: i32,
|
||||
) -> Option<Rc<RefCell<ListNode<T>>>> {
|
||||
if index <= 0 {
|
||||
return head.cloned();
|
||||
}
|
||||
|
||||
if let Some(node) = head {
|
||||
dfs(node.borrow().next.as_ref(), index - 1)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
return head;
|
||||
dfs(Some(head).as_ref(), index)
|
||||
}
|
||||
```
|
||||
|
||||
@ -1219,14 +1224,19 @@ comments: true
|
||||
|
||||
```rust title="linked_list.rs"
|
||||
/* 在链表中查找值为 target 的首个节点 */
|
||||
pub fn find<T: PartialEq>(head: Rc<RefCell<ListNode<T>>>, target: T, index: i32) -> i32 {
|
||||
if head.borrow().val == target {
|
||||
return index;
|
||||
};
|
||||
if let Some(node) = &head.borrow_mut().next {
|
||||
return find(node.clone(), target, index + 1);
|
||||
pub fn find<T: PartialEq>(head: Rc<RefCell<ListNode<T>>>, target: T) -> i32 {
|
||||
fn find<T: PartialEq>(head: Option<&Rc<RefCell<ListNode<T>>>>, target: T, idx: i32) -> i32 {
|
||||
if let Some(node) = head {
|
||||
if node.borrow().val == target {
|
||||
return idx;
|
||||
}
|
||||
return find(node.borrow().next.as_ref(), target, idx + 1);
|
||||
} else {
|
||||
-1
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
|
||||
find(Some(head).as_ref(), target, 0)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -28,7 +28,7 @@ comments: true
|
||||
|
||||
## 2.1.2 理论估算
|
||||
|
||||
由于实际测试具有较大的局限性,因此我们可以考虑仅通过一些计算来评估算法的效率。这种估算方法被称为<u>渐近复杂度分析(asymptotic complexity analysis)</u>,简称<u>复杂度分析</u>。
|
||||
由于实际测试具有较大的局限性,我们可以考虑仅通过一些计算来评估算法的效率。这种估算方法被称为<u>渐近复杂度分析(asymptotic complexity analysis)</u>,简称<u>复杂度分析</u>。
|
||||
|
||||
复杂度分析能够体现算法运行所需的时间和空间资源与输入数据大小之间的关系。**它描述了随着输入数据大小的增加,算法执行所需时间和空间的增长趋势**。这个定义有些拗口,我们可以将其分为三个重点来理解。
|
||||
|
||||
|
@ -661,7 +661,7 @@ index = hash(key) % capacity
|
||||
|
||||
观察发现,每种哈希算法的最后一步都是对大质数 $1000000007$ 取模,以确保哈希值在合适的范围内。值得思考的是,为什么要强调对质数取模,或者说对合数取模的弊端是什么?这是一个有趣的问题。
|
||||
|
||||
先抛出结论:**使用大质数作为模数,可以最大化地保证哈希值的均匀分布**。因为质数不与其他数字存在公约数,可以减少因取模操作而产生的周期性模式,从而避免哈希冲突。
|
||||
先给出结论:**使用大质数作为模数,可以最大化地保证哈希值的均匀分布**。因为质数不与其他数字存在公约数,可以减少因取模操作而产生的周期性模式,从而避免哈希冲突。
|
||||
|
||||
举个例子,假设我们选择合数 $9$ 作为模数,它可以被 $3$ 整除,那么所有可以被 $3$ 整除的 `key` 都会被映射到 $0$、$3$、$6$ 这三个哈希值。
|
||||
|
||||
|
@ -4,13 +4,13 @@ comments: true
|
||||
|
||||
# 4.4 Memory and cache *
|
||||
|
||||
In the first two sections of this chapter, we explored arrays and linked lists, two fundamental and important data structures, representing "continuous storage" and "dispersed storage" respectively.
|
||||
In the first two sections of this chapter, we explored arrays and linked lists, two fundamental data structures that represent "continuous storage" and "dispersed storage," respectively.
|
||||
|
||||
In fact, **the physical structure largely determines the efficiency of a program's use of memory and cache**, which in turn affects the overall performance of the algorithm.
|
||||
In fact, **the physical structure largely determines how efficiently a program utilizes memory and cache**, which in turn affects the overall performance of the algorithm.
|
||||
|
||||
## 4.4.1 Computer storage devices
|
||||
|
||||
There are three types of storage devices in computers: <u>hard disk</u>, <u>random-access memory (RAM)</u>, and <u>cache memory</u>. The following table shows their different roles and performance characteristics in computer systems.
|
||||
There are three types of storage devices in computers: <u>hard disk</u>, <u>random-access memory (RAM)</u>, and <u>cache memory</u>. The following table shows their respective roles and performance characteristics in computer systems.
|
||||
|
||||
<p align="center"> Table 4-2 Computer storage devices </p>
|
||||
|
||||
@ -22,14 +22,14 @@ There are three types of storage devices in computers: <u>hard disk</u>, <u>rand
|
||||
| Volatility | Data is not lost after power off | Data is lost after power off | Data is lost after power off |
|
||||
| Capacity | Larger, TB level | Smaller, GB level | Very small, MB level |
|
||||
| Speed | Slower, several hundred to thousands MB/s | Faster, several tens of GB/s | Very fast, several tens to hundreds of GB/s |
|
||||
| Price | Cheaper, several cents to yuan / GB | More expensive, tens to hundreds of yuan / GB | Very expensive, priced with CPU |
|
||||
| Price | Cheaper, a few cents to a few dollars / GB | More expensive, tens to hundreds of dollars / GB | Very expensive, priced with CPU |
|
||||
|
||||
</div>
|
||||
|
||||
We can imagine the computer storage system as a pyramid structure shown in Figure 4-9. The storage devices closer to the top of the pyramid are faster, have smaller capacity, and are more costly. This multi-level design is not accidental, but the result of careful consideration by computer scientists and engineers.
|
||||
The computer storage system can be visualized as a pyramid, as shown in Figure 4-9. The storage devices at the top of the pyramid are faster, have smaller capacities, and are more expensive. This multi-level design is not accidental, but a deliberate outcome of careful consideration by computer scientists and engineers.
|
||||
|
||||
- **Hard disks are difficult to replace with memory**. Firstly, data in memory is lost after power off, making it unsuitable for long-term data storage; secondly, the cost of memory is dozens of times that of hard disks, making it difficult to popularize in the consumer market.
|
||||
- **It is difficult for caches to have both large capacity and high speed**. As the capacity of L1, L2, L3 caches gradually increases, their physical size becomes larger, increasing the physical distance from the CPU core, leading to increased data transfer time and higher element access latency. Under current technology, a multi-level cache structure is the best balance between capacity, speed, and cost.
|
||||
- **Replacing hard disks with memory is challenging**. Firstly, data in memory is lost after power off, making it unsuitable for long-term data storage; secondly, memory is significantly more expensive than hard disks, limiting its feasibility for widespread use in the consumer market.
|
||||
- **Caches face a trade-off between large capacity and high speed**. As the capacity of L1, L2, and L3 caches increases, their physical size grows, increasing the distance from the CPU core. This results in longer data transfer times and higher access latency. With current technology, a multi-level cache structure provides the optimal balance between capacity, speed, and cost.
|
||||
|
||||
![Computer storage system](ram_and_cache.assets/storage_pyramid.png){ class="animation-figure" }
|
||||
|
||||
@ -37,11 +37,11 @@ We can imagine the computer storage system as a pyramid structure shown in Figur
|
||||
|
||||
!!! tip
|
||||
|
||||
The storage hierarchy of computers reflects a delicate balance between speed, capacity, and cost. In fact, this kind of trade-off is common in all industrial fields, requiring us to find the best balance between different advantages and limitations.
|
||||
The storage hierarchy in computers reflects a careful balance between speed, capacity, and cost. This type of trade-off is common across various industries, where finding the optimal balance between benefits and limitations is essential.
|
||||
|
||||
Overall, **hard disks are used for long-term storage of large amounts of data, memory is used for temporary storage of data being processed during program execution, and cache is used to store frequently accessed data and instructions** to improve program execution efficiency. Together, they ensure the efficient operation of computer systems.
|
||||
Overall, **hard disks provide long-term storage for large volumes of data, memory serves as temporary storage for data being processed during program execution, and cache stores frequently accessed data and instructions to enhance execution efficiency**. Together, they ensure the efficient operation of computer systems.
|
||||
|
||||
As shown in Figure 4-10, during program execution, data is read from the hard disk into memory for CPU computation. The cache can be considered a part of the CPU, **smartly loading data from memory** to provide fast data access to the CPU, significantly enhancing program execution efficiency and reducing reliance on slower memory.
|
||||
As shown in Figure 4-10, during program execution, data is read from the hard disk into memory for CPU computation. The cache, acting as an extension of the CPU, **intelligently preloads data from memory**, enabling faster data access for the CPU. This greatly improves program execution efficiency while reducing reliance on slower memory.
|
||||
|
||||
![Data flow between hard disk, memory, and cache](ram_and_cache.assets/computer_storage_devices.png){ class="animation-figure" }
|
||||
|
||||
@ -51,33 +51,33 @@ As shown in Figure 4-10, during program execution, data is read from the hard di
|
||||
|
||||
In terms of memory space utilization, arrays and linked lists have their advantages and limitations.
|
||||
|
||||
On one hand, **memory is limited and cannot be shared by multiple programs**, so we hope that data structures can use space as efficiently as possible. The elements of an array are tightly packed without extra space for storing references (pointers) between linked list nodes, making them more space-efficient. However, arrays require allocating sufficient continuous memory space at once, which may lead to memory waste, and array expansion also requires additional time and space costs. In contrast, linked lists allocate and reclaim memory dynamically on a per-node basis, providing greater flexibility.
|
||||
On one hand, **memory is limited and cannot be shared by multiple programs**, so optimizing space usage in data structures is crucial. Arrays are space-efficient because their elements are tightly packed, without requiring extra memory for references (pointers) as in linked lists. However, arrays require pre-allocating a contiguous block of memory, which can lead to waste if the allocated space exceeds the actual need. Expanding an array also incurs additional time and space overhead. In contrast, linked lists allocate and free memory dynamically for each node, offering greater flexibility at the cost of additional memory for pointers.
|
||||
|
||||
On the other hand, during program execution, **as memory is repeatedly allocated and released, the degree of fragmentation of free memory becomes higher**, leading to reduced memory utilization efficiency. Arrays, due to their continuous storage method, are relatively less likely to cause memory fragmentation. In contrast, the elements of a linked list are dispersedly stored, and frequent insertion and deletion operations make memory fragmentation more likely.
|
||||
On the other hand, during program execution, **repeated memory allocation and deallocation increase memory fragmentation**, reducing memory utilization efficiency. Arrays, due to their continuous storage method, are relatively less likely to cause memory fragmentation. In contrast, linked lists store elements in non-contiguous locations, and frequent insertions and deletions can exacerbate memory fragmentation.
|
||||
|
||||
## 4.4.3 Cache efficiency of data structures
|
||||
|
||||
Although caches are much smaller in space capacity than memory, they are much faster and play a crucial role in program execution speed. Since the cache's capacity is limited and can only store a small part of frequently accessed data, when the CPU tries to access data not in the cache, a <u>cache miss</u> occurs, forcing the CPU to load the needed data from slower memory.
|
||||
Although caches are much smaller in space capacity than memory, they are much faster and play a crucial role in program execution speed. Due to their limited capacity, caches can only store a subset of frequently accessed data. When the CPU attempts to access data not present in the cache, a <u>cache miss</u> occurs, requiring the CPU to retrieve the needed data from slower memory, which can impact performance.
|
||||
|
||||
Clearly, **the fewer the cache misses, the higher the CPU's data read-write efficiency**, and the better the program performance. The proportion of successful data retrieval from the cache by the CPU is called the <u>cache hit rate</u>, a metric often used to measure cache efficiency.
|
||||
|
||||
To achieve higher efficiency, caches adopt the following data loading mechanisms.
|
||||
|
||||
- **Cache lines**: Caches don't store and load data byte by byte but in units of cache lines. Compared to byte-by-byte transfer, the transmission of cache lines is more efficient.
|
||||
- **Prefetch mechanism**: Processors try to predict data access patterns (such as sequential access, fixed stride jumping access, etc.) and load data into the cache according to specific patterns to improve the hit rate.
|
||||
- **Spatial locality**: If data is accessed, data nearby is likely to be accessed in the near future. Therefore, when loading certain data, the cache also loads nearby data to improve the hit rate.
|
||||
- **Cache lines**: Caches operate by storing and loading data in units called cache lines, rather than individual bytes. This approach improves efficiency by transferring larger blocks of data at once.
|
||||
- **Prefetch mechanism**: Processors predict data access patterns (e.g., sequential or fixed-stride access) and preload data into the cache based on these patterns to increase the cache hit rate.
|
||||
- **Spatial locality**: When a specific piece of data is accessed, nearby data is likely to be accessed soon. To leverage this, caches load adjacent data along with the requested data, improving hit rates.
|
||||
- **Temporal locality**: If data is accessed, it's likely to be accessed again in the near future. Caches use this principle to retain recently accessed data to improve the hit rate.
|
||||
|
||||
In fact, **arrays and linked lists have different cache utilization efficiencies**, mainly reflected in the following aspects.
|
||||
In fact, **arrays and linked lists have different cache utilization efficiencies**, which is mainly reflected in the following aspects.
|
||||
|
||||
- **Occupied space**: Linked list elements occupy more space than array elements, resulting in less effective data volume in the cache.
|
||||
- **Cache lines**: Linked list data is scattered throughout memory, and since caches load "by line," the proportion of loading invalid data is higher.
|
||||
- **Prefetch mechanism**: The data access pattern of arrays is more "predictable" than that of linked lists, meaning the system is more likely to guess which data will be loaded next.
|
||||
- **Spatial locality**: Arrays are stored in concentrated memory spaces, so the data near the loaded data is more likely to be accessed next.
|
||||
- **Occupied space**: Linked list elements take up more space than array elements, resulting in less effective data being held in the cache.
|
||||
- **Cache lines**: Linked list data is scattered throughout the memory, and cache is "loaded by row", so the proportion of invalid data loaded is higher.
|
||||
- **Prefetch mechanism**: The data access pattern of arrays is more "predictable" than that of linked lists, that is, it is easier for the system to guess the data that is about to be loaded.
|
||||
- **Spatial locality**: Arrays are stored in a continuous memory space, so data near the data being loaded is more likely to be accessed soon.
|
||||
|
||||
Overall, **arrays have a higher cache hit rate and are generally more efficient in operation than linked lists**. This makes data structures based on arrays more popular in solving algorithmic problems.
|
||||
|
||||
It should be noted that **high cache efficiency does not mean that arrays are always better than linked lists**. Which data structure to choose in actual applications should be based on specific requirements. For example, both arrays and linked lists can implement the "stack" data structure (which will be detailed in the next chapter), but they are suitable for different scenarios.
|
||||
It should be noted that **high cache efficiency does not mean that arrays are always better than linked lists**. The choice of data structure should depend on specific application requirements. For example, both arrays and linked lists can implement the "stack" data structure (which will be detailed in the next chapter), but they are suitable for different scenarios.
|
||||
|
||||
- In algorithm problems, we tend to choose stacks based on arrays because they provide higher operational efficiency and random access capabilities, with the only cost being the need to pre-allocate a certain amount of memory space for the array.
|
||||
- If the data volume is very large, highly dynamic, and the expected size of the stack is difficult to estimate, then a stack based on a linked list is more appropriate. Linked lists can disperse a large amount of data in different parts of the memory and avoid the additional overhead of array expansion.
|
||||
- If the data volume is very large, highly dynamic, and the expected size of the stack is difficult to estimate, then a stack based on a linked list is a better choice. Linked lists can distribute a large amount of data in different parts of the memory and avoid the additional overhead of array expansion.
|
||||
|
@ -4,9 +4,9 @@ comments: true
|
||||
|
||||
# 11.1 Sorting algorithms
|
||||
|
||||
<u>Sorting algorithms (sorting algorithm)</u> are used to arrange a set of data in a specific order. Sorting algorithms have a wide range of applications because ordered data can usually be searched, analyzed, and processed more efficiently.
|
||||
<u>Sorting algorithms</u> are used to arrange a set of data in a specific order. Sorting algorithms have a wide range of applications because ordered data can usually be searched, analyzed, and processed more efficiently.
|
||||
|
||||
As shown in Figure 11-1, the data types in sorting algorithms can be integers, floating point numbers, characters, or strings, etc. Sorting rules can be set according to needs, such as numerical size, character ASCII order, or custom rules.
|
||||
As shown in Figure 11-1, the data types in sorting algorithms can be integers, floating point numbers, characters, or strings, etc. Sorting criterion can be set according to needs, such as numerical size, character ASCII order, or custom criterion.
|
||||
|
||||
![Data types and comparator examples](sorting_algorithm.assets/sorting_examples.png){ class="animation-figure" }
|
||||
|
||||
@ -14,13 +14,13 @@ As shown in Figure 11-1, the data types in sorting algorithms can be integers, f
|
||||
|
||||
## 11.1.1 Evaluation dimensions
|
||||
|
||||
**Execution efficiency**: We expect the time complexity of sorting algorithms to be as low as possible, with a lower number of overall operations (reduction in the constant factor of time complexity). For large data volumes, execution efficiency is particularly important.
|
||||
**Execution efficiency**: We expect the time complexity of sorting algorithms to be as low as possible, as well as a lower number of overall operations (lowering the constant term of time complexity). For large data volumes, execution efficiency is particularly important.
|
||||
|
||||
**In-place property**: As the name implies, <u>in-place sorting</u> is achieved by directly manipulating the original array, without the need for additional auxiliary arrays, thus saving memory. Generally, in-place sorting involves fewer data movement operations and is faster.
|
||||
**In-place property**: As the name implies, <u>in-place sorting</u> is achieved by directly manipulating the original array, without the need for additional helper arrays, thus saving memory. Generally, in-place sorting involves fewer data moving operations and is faster.
|
||||
|
||||
**Stability**: <u>Stable sorting</u> ensures that the relative order of equal elements in the array does not change after sorting.
|
||||
|
||||
Stable sorting is a necessary condition for multi-level sorting scenarios. Suppose we have a table storing student information, with the first and second columns being name and age, respectively. In this case, <u>unstable sorting</u> might lead to a loss of orderedness in the input data:
|
||||
Stable sorting is a necessary condition for multi-key sorting scenarios. Suppose we have a table storing student information, with the first and second columns being name and age, respectively. In this case, <u>unstable sorting</u> might lead to a loss of order in the input data:
|
||||
|
||||
```shell
|
||||
# Input data is sorted by name
|
||||
@ -43,7 +43,7 @@ Stable sorting is a necessary condition for multi-level sorting scenarios. Suppo
|
||||
|
||||
**Adaptability**: <u>Adaptive sorting</u> leverages existing order information within the input data to reduce computational effort, achieving more optimal time efficiency. The best-case time complexity of adaptive sorting algorithms is typically better than their average-case time complexity.
|
||||
|
||||
**Comparison-based**: <u>Comparison-based sorting</u> relies on comparison operators ($<$, $=$, $>$) to determine the relative order of elements and thus sort the entire array, with the theoretical optimal time complexity being $O(n \log n)$. Meanwhile, <u>non-comparison sorting</u> does not use comparison operators and can achieve a time complexity of $O(n)$, but its versatility is relatively poor.
|
||||
**Comparison or non-comparison-based**: <u>Comparison-based sorting</u> relies on comparison operators ($<$, $=$, $>$) to determine the relative order of elements and thus sort the entire array, with the theoretical optimal time complexity being $O(n \log n)$. Meanwhile, <u>non-comparison sorting</u> does not use comparison operators and can achieve a time complexity of $O(n)$, but its versatility is relatively poor.
|
||||
|
||||
## 11.1.2 Ideal sorting algorithm
|
||||
|
||||
|
@ -924,7 +924,8 @@ comments: true
|
||||
_count += nums[i];
|
||||
}
|
||||
// 直接走訪陣列元素
|
||||
for num in nums {
|
||||
_count = 0;
|
||||
for &num in nums {
|
||||
_count += num;
|
||||
}
|
||||
}
|
||||
@ -1359,9 +1360,8 @@ comments: true
|
||||
// 初始化一個擴展長度後的陣列
|
||||
let mut res: Vec<i32> = vec![0; nums.len() + enlarge];
|
||||
// 將原陣列中的所有元素複製到新
|
||||
for i in 0..nums.len() {
|
||||
res[i] = nums[i];
|
||||
}
|
||||
res[0..nums.len()].copy_from_slice(nums);
|
||||
|
||||
// 返回擴展後的新陣列
|
||||
res
|
||||
}
|
||||
|
@ -777,9 +777,6 @@ comments: true
|
||||
/* 刪除鏈結串列的節點 n0 之後的首個節點 */
|
||||
#[allow(non_snake_case)]
|
||||
pub fn remove<T>(n0: &Rc<RefCell<ListNode<T>>>) {
|
||||
if n0.borrow().next.is_none() {
|
||||
return;
|
||||
};
|
||||
// n0 -> P -> n1
|
||||
let P = n0.borrow_mut().next.take();
|
||||
if let Some(node) = P {
|
||||
@ -988,15 +985,23 @@ comments: true
|
||||
|
||||
```rust title="linked_list.rs"
|
||||
/* 訪問鏈結串列中索引為 index 的節點 */
|
||||
pub fn access<T>(head: Rc<RefCell<ListNode<T>>>, index: i32) -> Rc<RefCell<ListNode<T>>> {
|
||||
if index <= 0 {
|
||||
return head;
|
||||
};
|
||||
if let Some(node) = &head.borrow().next {
|
||||
return access(node.clone(), index - 1);
|
||||
pub fn access<T>(head: Rc<RefCell<ListNode<T>>>, index: i32) -> Option<Rc<RefCell<ListNode<T>>>> {
|
||||
fn dfs<T>(
|
||||
head: Option<&Rc<RefCell<ListNode<T>>>>,
|
||||
index: i32,
|
||||
) -> Option<Rc<RefCell<ListNode<T>>>> {
|
||||
if index <= 0 {
|
||||
return head.cloned();
|
||||
}
|
||||
|
||||
if let Some(node) = head {
|
||||
dfs(node.borrow().next.as_ref(), index - 1)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
return head;
|
||||
dfs(Some(head).as_ref(), index)
|
||||
}
|
||||
```
|
||||
|
||||
@ -1219,14 +1224,19 @@ comments: true
|
||||
|
||||
```rust title="linked_list.rs"
|
||||
/* 在鏈結串列中查詢值為 target 的首個節點 */
|
||||
pub fn find<T: PartialEq>(head: Rc<RefCell<ListNode<T>>>, target: T, index: i32) -> i32 {
|
||||
if head.borrow().val == target {
|
||||
return index;
|
||||
};
|
||||
if let Some(node) = &head.borrow_mut().next {
|
||||
return find(node.clone(), target, index + 1);
|
||||
pub fn find<T: PartialEq>(head: Rc<RefCell<ListNode<T>>>, target: T) -> i32 {
|
||||
fn find<T: PartialEq>(head: Option<&Rc<RefCell<ListNode<T>>>>, target: T, idx: i32) -> i32 {
|
||||
if let Some(node) = head {
|
||||
if node.borrow().val == target {
|
||||
return idx;
|
||||
}
|
||||
return find(node.borrow().next.as_ref(), target, idx + 1);
|
||||
} else {
|
||||
-1
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
|
||||
find(Some(head).as_ref(), target, 0)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -28,7 +28,7 @@ comments: true
|
||||
|
||||
## 2.1.2 理論估算
|
||||
|
||||
由於實際測試具有較大的侷限性,因此我們可以考慮僅透過一些計算來評估演算法的效率。這種估算方法被稱為<u>漸近複雜度分析(asymptotic complexity analysis)</u>,簡稱<u>複雜度分析</u>。
|
||||
由於實際測試具有較大的侷限性,我們可以考慮僅透過一些計算來評估演算法的效率。這種估算方法被稱為<u>漸近複雜度分析(asymptotic complexity analysis)</u>,簡稱<u>複雜度分析</u>。
|
||||
|
||||
複雜度分析能夠體現演算法執行所需的時間和空間資源與輸入資料大小之間的關係。**它描述了隨著輸入資料大小的增加,演算法執行所需時間和空間的增長趨勢**。這個定義有些拗口,我們可以將其分為三個重點來理解。
|
||||
|
||||
|
@ -661,7 +661,7 @@ index = hash(key) % capacity
|
||||
|
||||
觀察發現,每種雜湊演算法的最後一步都是對大質數 $1000000007$ 取模,以確保雜湊值在合適的範圍內。值得思考的是,為什麼要強調對質數取模,或者說對合數取模的弊端是什麼?這是一個有趣的問題。
|
||||
|
||||
先丟擲結論:**使用大質數作為模數,可以最大化地保證雜湊值的均勻分佈**。因為質數不與其他數字存在公約數,可以減少因取模操作而產生的週期性模式,從而避免雜湊衝突。
|
||||
先給出結論:**使用大質數作為模數,可以最大化地保證雜湊值的均勻分佈**。因為質數不與其他數字存在公約數,可以減少因取模操作而產生的週期性模式,從而避免雜湊衝突。
|
||||
|
||||
舉個例子,假設我們選擇合數 $9$ 作為模數,它可以被 $3$ 整除,那麼所有可以被 $3$ 整除的 `key` 都會被對映到 $0$、$3$、$6$ 這三個雜湊值。
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user