Off-by-one on range boundaries
Wrong move: Loop endpoints miss first/last candidate.
Usually fails on: Fails on minimal arrays and exact-boundary answers.
Fix: Re-derive loops from inclusive/exclusive ranges before coding.
Move from brute-force thinking to an efficient approach using array strategy.
You are given an undirected weighted graph of n nodes (0-indexed), represented by an edge list where edges[i] = [a, b] is an undirected edge connecting the nodes a and b with a probability of success of traversing that edge succProb[i].
Given two nodes start and end, find the path with the maximum probability of success to go from start to end and return its success probability.
If there is no path from start to end, return 0. Your answer will be accepted if it differs from the correct answer by at most 1e-5.
Example 1:
Input: n = 3, edges = [[0,1],[1,2],[0,2]], succProb = [0.5,0.5,0.2], start = 0, end = 2 Output: 0.25000 Explanation: There are two paths from start to end, one having a probability of success = 0.2 and the other has 0.5 * 0.5 = 0.25.
Example 2:
Input: n = 3, edges = [[0,1],[1,2],[0,2]], succProb = [0.5,0.5,0.3], start = 0, end = 2 Output: 0.30000
Example 3:
Input: n = 3, edges = [[0,1]], succProb = [0.5], start = 0, end = 2 Output: 0.00000 Explanation: There is no path between 0 and 2.
Constraints:
2 <= n <= 10^40 <= start, end < nstart != end0 <= a, b < na != b0 <= succProb.length == edges.length <= 2*10^40 <= succProb[i] <= 1Problem summary: You are given an undirected weighted graph of n nodes (0-indexed), represented by an edge list where edges[i] = [a, b] is an undirected edge connecting the nodes a and b with a probability of success of traversing that edge succProb[i]. Given two nodes start and end, find the path with the maximum probability of success to go from start to end and return its success probability. If there is no path from start to end, return 0. Your answer will be accepted if it differs from the correct answer by at most 1e-5.
Start with the most direct exhaustive search. That gives a correctness anchor before optimizing.
Pattern signal: Array
3 [[0,1],[1,2],[0,2]] [0.5,0.5,0.2] 0 2
3 [[0,1],[1,2],[0,2]] [0.5,0.5,0.3] 0 2
3 [[0,1]] [0.5] 0 2
number-of-ways-to-arrive-at-destination)Source-backed implementations are provided below for direct study and interview prep.
// Accepted solution for LeetCode #1514: Path with Maximum Probability
class Solution {
public double maxProbability(
int n, int[][] edges, double[] succProb, int start_node, int end_node) {
List<Pair<Integer, Double>>[] g = new List[n];
Arrays.setAll(g, k -> new ArrayList<>());
for (int i = 0; i < edges.length; ++i) {
var e = edges[i];
int a = e[0], b = e[1];
double p = succProb[i];
g[a].add(new Pair<>(b, p));
g[b].add(new Pair<>(a, p));
}
double[] dist = new double[n];
dist[start_node] = 1;
PriorityQueue<Pair<Integer, Double>> pq
= new PriorityQueue<>(Comparator.comparingDouble(p -> - p.getValue()));
pq.offer(new Pair<>(start_node, 1.0));
while (!pq.isEmpty()) {
var p = pq.poll();
int a = p.getKey();
double w = p.getValue();
if (dist[a] > w) {
continue;
}
for (var e : g[a]) {
int b = e.getKey();
double pab = e.getValue();
double wab = w * pab;
if (wab > dist[b]) {
dist[b] = wab;
pq.offer(new Pair<>(b, wab));
}
}
}
return dist[end_node];
}
}
// Accepted solution for LeetCode #1514: Path with Maximum Probability
func maxProbability(n int, edges [][]int, succProb []float64, start_node int, end_node int) float64 {
g := make([][]pair, n)
for i, e := range edges {
a, b := e[0], e[1]
p := succProb[i]
g[a] = append(g[a], pair{p, b})
g[b] = append(g[b], pair{p, a})
}
pq := hp{{1, start_node}}
dist := make([]float64, n)
dist[start_node] = 1
for len(pq) > 0 {
p := heap.Pop(&pq).(pair)
w, a := p.p, p.a
if dist[a] > w {
continue
}
for _, e := range g[a] {
b, p := e.a, e.p
if nw := w * p; nw > dist[b] {
dist[b] = nw
heap.Push(&pq, pair{nw, b})
}
}
}
return dist[end_node]
}
type pair struct {
p float64
a int
}
type hp []pair
func (h hp) Len() int { return len(h) }
func (h hp) Less(i, j int) bool { return h[i].p > h[j].p }
func (h hp) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *hp) Push(x any) { *h = append(*h, x.(pair)) }
func (h *hp) Pop() (x any) { a := *h; x = a[len(a)-1]; *h = a[:len(a)-1]; return }
# Accepted solution for LeetCode #1514: Path with Maximum Probability
class Solution:
def maxProbability(
self,
n: int,
edges: List[List[int]],
succProb: List[float],
start_node: int,
end_node: int,
) -> float:
g: List[List[Tuple[int, float]]] = [[] for _ in range(n)]
for (a, b), p in zip(edges, succProb):
g[a].append((b, p))
g[b].append((a, p))
pq = [(-1, start_node)]
dist = [0] * n
dist[start_node] = 1
while pq:
w, a = heappop(pq)
w = -w
if dist[a] > w:
continue
for b, p in g[a]:
if (t := w * p) > dist[b]:
dist[b] = t
heappush(pq, (-t, b))
return dist[end_node]
// Accepted solution for LeetCode #1514: Path with Maximum Probability
struct Solution;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::collections::HashMap;
#[derive(PartialEq)]
struct State {
id: usize,
prob: f64,
}
impl State {
fn new(id: usize, prob: f64) -> Self {
State { id, prob }
}
}
impl Eq for State {}
impl PartialOrd for State {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.prob.partial_cmp(&other.prob)
}
}
impl Ord for State {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Solution {
fn max_probability(
n: i32,
edges: Vec<Vec<i32>>,
succ_prob: Vec<f64>,
start: i32,
end: i32,
) -> f64 {
let n = n as usize;
let start = start as usize;
let end = end as usize;
let mut adj: Vec<HashMap<usize, f64>> = vec![HashMap::new(); n];
for (i, edge) in edges.into_iter().enumerate() {
let u = edge[0] as usize;
let v = edge[1] as usize;
let p = succ_prob[i];
adj[u].insert(v, p);
adj[v].insert(u, p);
}
let mut probs: Vec<f64> = vec![0.0; n];
let mut queue: BinaryHeap<State> = BinaryHeap::new();
queue.push(State::new(start, 1.0));
probs[start] = 1.0;
while let Some(parent) = queue.pop() {
if parent.id == end {
return parent.prob;
}
for (&child_id, &prob) in &adj[parent.id] {
let new_prob = parent.prob * prob;
if new_prob > probs[child_id] {
probs[child_id] = new_prob;
queue.push(State::new(child_id, new_prob));
}
}
}
0.0
}
}
#[test]
fn test() {
use assert_approx_eq::assert_approx_eq;
let n = 3;
let edges = vec_vec_i32![[0, 1], [1, 2], [0, 2]];
let succ_prob = vec![0.5, 0.5, 0.2];
let start = 0;
let end = 2;
let res = 0.25;
assert_approx_eq!(
Solution::max_probability(n, edges, succ_prob, start, end),
res
);
let n = 3;
let edges = vec_vec_i32![[0, 1], [1, 2], [0, 2]];
let succ_prob = vec![0.5, 0.5, 0.3];
let start = 0;
let end = 2;
let res = 0.3;
assert_approx_eq!(
Solution::max_probability(n, edges, succ_prob, start, end),
res
);
let n = 3;
let edges = vec_vec_i32![[0, 1]];
let succ_prob = vec![0.5];
let start = 0;
let end = 2;
let res = 0.00000;
assert_approx_eq!(
Solution::max_probability(n, edges, succ_prob, start, end),
res
);
}
// Accepted solution for LeetCode #1514: Path with Maximum Probability
function maxProbability(
n: number,
edges: number[][],
succProb: number[],
start_node: number,
end_node: number,
): number {
const pq = new PriorityQueue<number[]>((a, b) => b[0] - a[0]);
const g: [number, number][][] = Array.from({ length: n }, () => []);
for (let i = 0; i < edges.length; ++i) {
const [a, b] = edges[i];
g[a].push([b, succProb[i]]);
g[b].push([a, succProb[i]]);
}
const dist = Array.from({ length: n }, () => 0);
dist[start_node] = 1;
pq.enqueue([1, start_node]);
while (!pq.isEmpty()) {
const [w, a] = pq.dequeue();
if (dist[a] > w) {
continue;
}
for (const [b, p] of g[a]) {
const nw = w * p;
if (nw > dist[b]) {
dist[b] = nw;
pq.enqueue([nw, b]);
}
}
}
return dist[end_node];
}
Use this to step through a reusable interview workflow for this problem.
Two nested loops check every pair or subarray. The outer loop fixes a starting point, the inner loop extends or searches. For n elements this gives up to n²/2 operations. No extra space, but the quadratic time is prohibitive for large inputs.
Most array problems have an O(n²) brute force (nested loops) and an O(n) optimal (single pass with clever state tracking). The key is identifying what information to maintain as you scan: a running max, a prefix sum, a hash map of seen values, or two pointers.
Review these before coding to avoid predictable interview regressions.
Wrong move: Loop endpoints miss first/last candidate.
Usually fails on: Fails on minimal arrays and exact-boundary answers.
Fix: Re-derive loops from inclusive/exclusive ranges before coding.