Show More
@@ -1,934 +1,936 | |||
|
1 | 1 | // status.rs |
|
2 | 2 | // |
|
3 | 3 | // Copyright 2019 Raphaël Gomès <rgomes@octobus.net> |
|
4 | 4 | // |
|
5 | 5 | // This software may be used and distributed according to the terms of the |
|
6 | 6 | // GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | //! Rust implementation of dirstate.status (dirstate.py). |
|
9 | 9 | //! It is currently missing a lot of functionality compared to the Python one |
|
10 | 10 | //! and will only be triggered in narrow cases. |
|
11 | 11 | |
|
12 | 12 | use crate::utils::path_auditor::PathAuditor; |
|
13 | 13 | use crate::{ |
|
14 | 14 | dirstate::SIZE_FROM_OTHER_PARENT, |
|
15 | 15 | filepatterns::PatternFileWarning, |
|
16 | 16 | matchers::{get_ignore_function, Matcher, VisitChildrenSet}, |
|
17 | 17 | utils::{ |
|
18 | 18 | files::{find_dirs, HgMetadata}, |
|
19 | 19 | hg_path::{ |
|
20 | 20 | hg_path_to_path_buf, os_string_to_hg_path_buf, HgPath, HgPathBuf, |
|
21 | 21 | HgPathError, |
|
22 | 22 | }, |
|
23 | 23 | }, |
|
24 | 24 | CopyMap, DirstateEntry, DirstateMap, EntryState, FastHashMap, |
|
25 | 25 | PatternError, |
|
26 | 26 | }; |
|
27 | 27 | use lazy_static::lazy_static; |
|
28 | 28 | use micro_timer::timed; |
|
29 | 29 | use rayon::prelude::*; |
|
30 | 30 | use std::{ |
|
31 | 31 | borrow::Cow, |
|
32 | 32 | collections::HashSet, |
|
33 | 33 | fmt, |
|
34 | 34 | fs::{read_dir, DirEntry}, |
|
35 | 35 | io::ErrorKind, |
|
36 | 36 | ops::Deref, |
|
37 | 37 | path::{Path, PathBuf}, |
|
38 | 38 | }; |
|
39 | 39 | |
|
40 | 40 | /// Wrong type of file from a `BadMatch` |
|
41 | 41 | /// Note: a lot of those don't exist on all platforms. |
|
42 | 42 | #[derive(Debug, Copy, Clone)] |
|
43 | 43 | pub enum BadType { |
|
44 | 44 | CharacterDevice, |
|
45 | 45 | BlockDevice, |
|
46 | 46 | FIFO, |
|
47 | 47 | Socket, |
|
48 | 48 | Directory, |
|
49 | 49 | Unknown, |
|
50 | 50 | } |
|
51 | 51 | |
|
52 | 52 | impl fmt::Display for BadType { |
|
53 | 53 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
|
54 | 54 | f.write_str(match self { |
|
55 | 55 | BadType::CharacterDevice => "character device", |
|
56 | 56 | BadType::BlockDevice => "block device", |
|
57 | 57 | BadType::FIFO => "fifo", |
|
58 | 58 | BadType::Socket => "socket", |
|
59 | 59 | BadType::Directory => "directory", |
|
60 | 60 | BadType::Unknown => "unknown", |
|
61 | 61 | }) |
|
62 | 62 | } |
|
63 | 63 | } |
|
64 | 64 | |
|
65 | 65 | /// Was explicitly matched but cannot be found/accessed |
|
66 | 66 | #[derive(Debug, Copy, Clone)] |
|
67 | 67 | pub enum BadMatch { |
|
68 | 68 | OsError(i32), |
|
69 | 69 | BadType(BadType), |
|
70 | 70 | } |
|
71 | 71 | |
|
72 | 72 | /// Enum used to dispatch new status entries into the right collections. |
|
73 | 73 | /// Is similar to `crate::EntryState`, but represents the transient state of |
|
74 | 74 | /// entries during the lifetime of a command. |
|
75 | 75 | #[derive(Debug, Copy, Clone)] |
|
76 | 76 | pub enum Dispatch { |
|
77 | 77 | Unsure, |
|
78 | 78 | Modified, |
|
79 | 79 | Added, |
|
80 | 80 | Removed, |
|
81 | 81 | Deleted, |
|
82 | 82 | Clean, |
|
83 | 83 | Unknown, |
|
84 | 84 | Ignored, |
|
85 | 85 | /// Empty dispatch, the file is not worth listing |
|
86 | 86 | None, |
|
87 | 87 | /// Was explicitly matched but cannot be found/accessed |
|
88 | 88 | Bad(BadMatch), |
|
89 | 89 | Directory { |
|
90 | 90 | /// True if the directory used to be a file in the dmap so we can say |
|
91 | 91 | /// that it's been removed. |
|
92 | 92 | was_file: bool, |
|
93 | 93 | }, |
|
94 | 94 | } |
|
95 | 95 | |
|
96 | 96 | type IoResult<T> = std::io::Result<T>; |
|
97 | 97 | |
|
98 | 98 | /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add |
|
99 | 99 | /// an explicit lifetime here to not fight `'static` bounds "out of nowhere". |
|
100 | 100 | pub type IgnoreFnType<'a> = |
|
101 | 101 | Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>; |
|
102 | 102 | |
|
103 | 103 | /// We have a good mix of owned (from directory traversal) and borrowed (from |
|
104 | 104 | /// the dirstate/explicit) paths, this comes up a lot. |
|
105 | 105 | pub type HgPathCow<'a> = Cow<'a, HgPath>; |
|
106 | 106 | |
|
107 | 107 | /// A path with its computed ``Dispatch`` information |
|
108 | 108 | type DispatchedPath<'a> = (HgPathCow<'a>, Dispatch); |
|
109 | 109 | |
|
110 | 110 | /// The conversion from `HgPath` to a real fs path failed. |
|
111 | 111 | /// `22` is the error code for "Invalid argument" |
|
112 | 112 | const INVALID_PATH_DISPATCH: Dispatch = Dispatch::Bad(BadMatch::OsError(22)); |
|
113 | 113 | |
|
114 | 114 | /// Dates and times that are outside the 31-bit signed range are compared |
|
115 | 115 | /// modulo 2^31. This should prevent hg from behaving badly with very large |
|
116 | 116 | /// files or corrupt dates while still having a high probability of detecting |
|
117 | 117 | /// changes. (issue2608) |
|
118 | 118 | /// TODO I haven't found a way of having `b` be `Into<i32>`, since `From<u64>` |
|
119 | 119 | /// is not defined for `i32`, and there is no `As` trait. This forces the |
|
120 | 120 | /// caller to cast `b` as `i32`. |
|
121 | 121 | fn mod_compare(a: i32, b: i32) -> bool { |
|
122 | 122 | a & i32::max_value() != b & i32::max_value() |
|
123 | 123 | } |
|
124 | 124 | |
|
125 | 125 | /// Return a sorted list containing information about the entries |
|
126 | 126 | /// in the directory. |
|
127 | 127 | /// |
|
128 | 128 | /// * `skip_dot_hg` - Return an empty vec if `path` contains a `.hg` directory |
|
129 | 129 | fn list_directory( |
|
130 | 130 | path: impl AsRef<Path>, |
|
131 | 131 | skip_dot_hg: bool, |
|
132 | 132 | ) -> std::io::Result<Vec<(HgPathBuf, DirEntry)>> { |
|
133 | 133 | let mut results = vec![]; |
|
134 | 134 | let entries = read_dir(path.as_ref())?; |
|
135 | 135 | |
|
136 | 136 | for entry in entries { |
|
137 | 137 | let entry = entry?; |
|
138 | 138 | let filename = os_string_to_hg_path_buf(entry.file_name())?; |
|
139 | 139 | let file_type = entry.file_type()?; |
|
140 | 140 | if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() { |
|
141 | 141 | return Ok(vec![]); |
|
142 | 142 | } else { |
|
143 | 143 | results.push((filename, entry)) |
|
144 | 144 | } |
|
145 | 145 | } |
|
146 | 146 | |
|
147 | 147 | results.sort_unstable_by_key(|e| e.0.clone()); |
|
148 | 148 | Ok(results) |
|
149 | 149 | } |
|
150 | 150 | |
|
151 | 151 | /// The file corresponding to the dirstate entry was found on the filesystem. |
|
152 | 152 | fn dispatch_found( |
|
153 | 153 | filename: impl AsRef<HgPath>, |
|
154 | 154 | entry: DirstateEntry, |
|
155 | 155 | metadata: HgMetadata, |
|
156 | 156 | copy_map: &CopyMap, |
|
157 | 157 | options: StatusOptions, |
|
158 | 158 | ) -> Dispatch { |
|
159 | 159 | let DirstateEntry { |
|
160 | 160 | state, |
|
161 | 161 | mode, |
|
162 | 162 | mtime, |
|
163 | 163 | size, |
|
164 | 164 | } = entry; |
|
165 | 165 | |
|
166 | 166 | let HgMetadata { |
|
167 | 167 | st_mode, |
|
168 | 168 | st_size, |
|
169 | 169 | st_mtime, |
|
170 | 170 | .. |
|
171 | 171 | } = metadata; |
|
172 | 172 | |
|
173 | 173 | match state { |
|
174 | 174 | EntryState::Normal => { |
|
175 | 175 | let size_changed = mod_compare(size, st_size as i32); |
|
176 | 176 | let mode_changed = |
|
177 | 177 | (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec; |
|
178 | 178 | let metadata_changed = size >= 0 && (size_changed || mode_changed); |
|
179 | 179 | let other_parent = size == SIZE_FROM_OTHER_PARENT; |
|
180 | 180 | |
|
181 | 181 | if metadata_changed |
|
182 | 182 | || other_parent |
|
183 | 183 | || copy_map.contains_key(filename.as_ref()) |
|
184 | 184 | { |
|
185 | 185 | if metadata.is_symlink() && size_changed { |
|
186 | 186 | // issue6456: Size returned may be longer due to encryption |
|
187 | 187 | // on EXT-4 fscrypt. TODO maybe only do it on EXT4? |
|
188 | 188 | Dispatch::Unsure |
|
189 | 189 | } else { |
|
190 | 190 | Dispatch::Modified |
|
191 | 191 | } |
|
192 | 192 | } else if mod_compare(mtime, st_mtime as i32) |
|
193 | 193 | || st_mtime == options.last_normal_time |
|
194 | 194 | { |
|
195 | 195 | // the file may have just been marked as normal and |
|
196 | 196 | // it may have changed in the same second without |
|
197 | 197 | // changing its size. This can happen if we quickly |
|
198 | 198 | // do multiple commits. Force lookup, so we don't |
|
199 | 199 | // miss such a racy file change. |
|
200 | 200 | Dispatch::Unsure |
|
201 | 201 | } else if options.list_clean { |
|
202 | 202 | Dispatch::Clean |
|
203 | 203 | } else { |
|
204 | 204 | Dispatch::None |
|
205 | 205 | } |
|
206 | 206 | } |
|
207 | 207 | EntryState::Merged => Dispatch::Modified, |
|
208 | 208 | EntryState::Added => Dispatch::Added, |
|
209 | 209 | EntryState::Removed => Dispatch::Removed, |
|
210 | 210 | EntryState::Unknown => Dispatch::Unknown, |
|
211 | 211 | } |
|
212 | 212 | } |
|
213 | 213 | |
|
214 | 214 | /// The file corresponding to this Dirstate entry is missing. |
|
215 | 215 | fn dispatch_missing(state: EntryState) -> Dispatch { |
|
216 | 216 | match state { |
|
217 | 217 | // File was removed from the filesystem during commands |
|
218 | 218 | EntryState::Normal | EntryState::Merged | EntryState::Added => { |
|
219 | 219 | Dispatch::Deleted |
|
220 | 220 | } |
|
221 | 221 | // File was removed, everything is normal |
|
222 | 222 | EntryState::Removed => Dispatch::Removed, |
|
223 | 223 | // File is unknown to Mercurial, everything is normal |
|
224 | 224 | EntryState::Unknown => Dispatch::Unknown, |
|
225 | 225 | } |
|
226 | 226 | } |
|
227 | 227 | |
|
228 | 228 | fn dispatch_os_error(e: &std::io::Error) -> Dispatch { |
|
229 | 229 | Dispatch::Bad(BadMatch::OsError( |
|
230 | 230 | e.raw_os_error().expect("expected real OS error"), |
|
231 | 231 | )) |
|
232 | 232 | } |
|
233 | 233 | |
|
234 | 234 | lazy_static! { |
|
235 | 235 | static ref DEFAULT_WORK: HashSet<&'static HgPath> = { |
|
236 | 236 | let mut h = HashSet::new(); |
|
237 | 237 | h.insert(HgPath::new(b"")); |
|
238 | 238 | h |
|
239 | 239 | }; |
|
240 | 240 | } |
|
241 | 241 | |
|
242 | 242 | #[derive(Debug, Copy, Clone)] |
|
243 | 243 | pub struct StatusOptions { |
|
244 | 244 | /// Remember the most recent modification timeslot for status, to make |
|
245 | 245 | /// sure we won't miss future size-preserving file content modifications |
|
246 | 246 | /// that happen within the same timeslot. |
|
247 | 247 | pub last_normal_time: i64, |
|
248 | 248 | /// Whether we are on a filesystem with UNIX-like exec flags |
|
249 | 249 | pub check_exec: bool, |
|
250 | 250 | pub list_clean: bool, |
|
251 | 251 | pub list_unknown: bool, |
|
252 | 252 | pub list_ignored: bool, |
|
253 | 253 | /// Whether to collect traversed dirs for applying a callback later. |
|
254 | 254 | /// Used by `hg purge` for example. |
|
255 | 255 | pub collect_traversed_dirs: bool, |
|
256 | 256 | } |
|
257 | 257 | |
|
258 | 258 | #[derive(Debug, Default)] |
|
259 | 259 | pub struct DirstateStatus<'a> { |
|
260 | 260 | /// Tracked files whose contents have changed since the parent revision |
|
261 | 261 | pub modified: Vec<HgPathCow<'a>>, |
|
262 | 262 | |
|
263 | 263 | /// Newly-tracked files that were not present in the parent |
|
264 | 264 | pub added: Vec<HgPathCow<'a>>, |
|
265 | 265 | |
|
266 | 266 | /// Previously-tracked files that have been (re)moved with an hg command |
|
267 | 267 | pub removed: Vec<HgPathCow<'a>>, |
|
268 | 268 | |
|
269 | 269 | /// (Still) tracked files that are missing, (re)moved with an non-hg |
|
270 | 270 | /// command |
|
271 | 271 | pub deleted: Vec<HgPathCow<'a>>, |
|
272 | 272 | |
|
273 | 273 | /// Tracked files that are up to date with the parent. |
|
274 | 274 | /// Only pupulated if `StatusOptions::list_clean` is true. |
|
275 | 275 | pub clean: Vec<HgPathCow<'a>>, |
|
276 | 276 | |
|
277 | 277 | /// Files in the working directory that are ignored with `.hgignore`. |
|
278 | 278 | /// Only pupulated if `StatusOptions::list_ignored` is true. |
|
279 | 279 | pub ignored: Vec<HgPathCow<'a>>, |
|
280 | 280 | |
|
281 | 281 | /// Files in the working directory that are neither tracked nor ignored. |
|
282 | 282 | /// Only pupulated if `StatusOptions::list_unknown` is true. |
|
283 | 283 | pub unknown: Vec<HgPathCow<'a>>, |
|
284 | 284 | |
|
285 | 285 | /// Was explicitly matched but cannot be found/accessed |
|
286 | 286 | pub bad: Vec<(HgPathCow<'a>, BadMatch)>, |
|
287 | 287 | |
|
288 | 288 | /// Either clean or modified, but we can’t tell from filesystem metadata |
|
289 | 289 | /// alone. The file contents need to be read and compared with that in |
|
290 | 290 | /// the parent. |
|
291 | 291 | pub unsure: Vec<HgPathCow<'a>>, |
|
292 | 292 | |
|
293 | 293 | /// Only filled if `collect_traversed_dirs` is `true` |
|
294 | 294 | pub traversed: Vec<HgPathBuf>, |
|
295 | 295 | } |
|
296 | 296 | |
|
297 | 297 | #[derive(Debug, derive_more::From)] |
|
298 | 298 | pub enum StatusError { |
|
299 | 299 | /// Generic IO error |
|
300 | 300 | IO(std::io::Error), |
|
301 | 301 | /// An invalid path that cannot be represented in Mercurial was found |
|
302 | 302 | Path(HgPathError), |
|
303 | 303 | /// An invalid "ignore" pattern was found |
|
304 | 304 | Pattern(PatternError), |
|
305 | 305 | } |
|
306 | 306 | |
|
307 | 307 | pub type StatusResult<T> = Result<T, StatusError>; |
|
308 | 308 | |
|
309 | 309 | impl fmt::Display for StatusError { |
|
310 | 310 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
|
311 | 311 | match self { |
|
312 | 312 | StatusError::IO(error) => error.fmt(f), |
|
313 | 313 | StatusError::Path(error) => error.fmt(f), |
|
314 | 314 | StatusError::Pattern(error) => error.fmt(f), |
|
315 | 315 | } |
|
316 | 316 | } |
|
317 | 317 | } |
|
318 | 318 | |
|
319 | 319 | /// Gives information about which files are changed in the working directory |
|
320 | 320 | /// and how, compared to the revision we're based on |
|
321 | 321 | pub struct Status<'a, M: ?Sized + Matcher + Sync> { |
|
322 | 322 | dmap: &'a DirstateMap, |
|
323 | 323 | pub(crate) matcher: &'a M, |
|
324 | 324 | root_dir: PathBuf, |
|
325 | 325 | pub(crate) options: StatusOptions, |
|
326 | 326 | ignore_fn: IgnoreFnType<'a>, |
|
327 | 327 | } |
|
328 | 328 | |
|
329 | 329 | impl<'a, M> Status<'a, M> |
|
330 | 330 | where |
|
331 | 331 | M: ?Sized + Matcher + Sync, |
|
332 | 332 | { |
|
333 | 333 | pub fn new( |
|
334 | 334 | dmap: &'a DirstateMap, |
|
335 | 335 | matcher: &'a M, |
|
336 | 336 | root_dir: PathBuf, |
|
337 | 337 | ignore_files: Vec<PathBuf>, |
|
338 | 338 | options: StatusOptions, |
|
339 | 339 | ) -> StatusResult<(Self, Vec<PatternFileWarning>)> { |
|
340 | 340 | // Needs to outlive `dir_ignore_fn` since it's captured. |
|
341 | 341 | |
|
342 | 342 | let (ignore_fn, warnings): (IgnoreFnType, _) = |
|
343 | 343 | if options.list_ignored || options.list_unknown { |
|
344 | 344 | get_ignore_function(ignore_files, &root_dir)? |
|
345 | 345 | } else { |
|
346 | 346 | (Box::new(|&_| true), vec![]) |
|
347 | 347 | }; |
|
348 | 348 | |
|
349 | 349 | Ok(( |
|
350 | 350 | Self { |
|
351 | 351 | dmap, |
|
352 | 352 | matcher, |
|
353 | 353 | root_dir, |
|
354 | 354 | options, |
|
355 | 355 | ignore_fn, |
|
356 | 356 | }, |
|
357 | 357 | warnings, |
|
358 | 358 | )) |
|
359 | 359 | } |
|
360 | 360 | |
|
361 | 361 | /// Is the path ignored? |
|
362 | 362 | pub fn is_ignored(&self, path: impl AsRef<HgPath>) -> bool { |
|
363 | 363 | (self.ignore_fn)(path.as_ref()) |
|
364 | 364 | } |
|
365 | 365 | |
|
366 | 366 | /// Is the path or one of its ancestors ignored? |
|
367 | 367 | pub fn dir_ignore(&self, dir: impl AsRef<HgPath>) -> bool { |
|
368 | 368 | // Only involve ignore mechanism if we're listing unknowns or ignored. |
|
369 | 369 | if self.options.list_ignored || self.options.list_unknown { |
|
370 | 370 | if self.is_ignored(&dir) { |
|
371 | 371 | true |
|
372 | 372 | } else { |
|
373 | 373 | for p in find_dirs(dir.as_ref()) { |
|
374 | 374 | if self.is_ignored(p) { |
|
375 | 375 | return true; |
|
376 | 376 | } |
|
377 | 377 | } |
|
378 | 378 | false |
|
379 | 379 | } |
|
380 | 380 | } else { |
|
381 | 381 | true |
|
382 | 382 | } |
|
383 | 383 | } |
|
384 | 384 | |
|
385 | 385 | /// Get stat data about the files explicitly specified by the matcher. |
|
386 | 386 | /// Returns a tuple of the directories that need to be traversed and the |
|
387 | 387 | /// files with their corresponding `Dispatch`. |
|
388 | 388 | /// TODO subrepos |
|
389 | 389 | #[timed] |
|
390 | 390 | pub fn walk_explicit( |
|
391 | 391 | &self, |
|
392 | 392 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
393 | 393 | ) -> (Vec<DispatchedPath<'a>>, Vec<DispatchedPath<'a>>) { |
|
394 | 394 | self.matcher |
|
395 | 395 | .file_set() |
|
396 | 396 | .unwrap_or(&DEFAULT_WORK) |
|
397 | 397 | .par_iter() |
|
398 | 398 | .flat_map(|&filename| -> Option<_> { |
|
399 | 399 | // TODO normalization |
|
400 | 400 | let normalized = filename; |
|
401 | 401 | |
|
402 | 402 | let buf = match hg_path_to_path_buf(normalized) { |
|
403 | 403 | Ok(x) => x, |
|
404 | 404 | Err(_) => { |
|
405 | 405 | return Some(( |
|
406 | 406 | Cow::Borrowed(normalized), |
|
407 | 407 | INVALID_PATH_DISPATCH, |
|
408 | 408 | )) |
|
409 | 409 | } |
|
410 | 410 | }; |
|
411 | 411 | let target = self.root_dir.join(buf); |
|
412 | 412 | let st = target.symlink_metadata(); |
|
413 | 413 | let in_dmap = self.dmap.get(normalized); |
|
414 | 414 | match st { |
|
415 | 415 | Ok(meta) => { |
|
416 | 416 | let file_type = meta.file_type(); |
|
417 | 417 | return if file_type.is_file() || file_type.is_symlink() |
|
418 | 418 | { |
|
419 | 419 | if let Some(entry) = in_dmap { |
|
420 | 420 | return Some(( |
|
421 | 421 | Cow::Borrowed(normalized), |
|
422 | 422 | dispatch_found( |
|
423 | 423 | &normalized, |
|
424 | 424 | *entry, |
|
425 | 425 | HgMetadata::from_metadata(meta), |
|
426 | 426 | &self.dmap.copy_map, |
|
427 | 427 | self.options, |
|
428 | 428 | ), |
|
429 | 429 | )); |
|
430 | 430 | } |
|
431 | 431 | Some(( |
|
432 | 432 | Cow::Borrowed(normalized), |
|
433 | 433 | Dispatch::Unknown, |
|
434 | 434 | )) |
|
435 | 435 | } else if file_type.is_dir() { |
|
436 | 436 | if self.options.collect_traversed_dirs { |
|
437 | 437 | traversed_sender |
|
438 | 438 | .send(normalized.to_owned()) |
|
439 | 439 | .expect("receiver should outlive sender"); |
|
440 | 440 | } |
|
441 | 441 | Some(( |
|
442 | 442 | Cow::Borrowed(normalized), |
|
443 | 443 | Dispatch::Directory { |
|
444 | 444 | was_file: in_dmap.is_some(), |
|
445 | 445 | }, |
|
446 | 446 | )) |
|
447 | 447 | } else { |
|
448 | 448 | Some(( |
|
449 | 449 | Cow::Borrowed(normalized), |
|
450 | 450 | Dispatch::Bad(BadMatch::BadType( |
|
451 | 451 | // TODO do more than unknown |
|
452 | 452 | // Support for all `BadType` variant |
|
453 | 453 | // varies greatly between platforms. |
|
454 | 454 | // So far, no tests check the type and |
|
455 | 455 | // this should be good enough for most |
|
456 | 456 | // users. |
|
457 | 457 | BadType::Unknown, |
|
458 | 458 | )), |
|
459 | 459 | )) |
|
460 | 460 | }; |
|
461 | 461 | } |
|
462 | 462 | Err(_) => { |
|
463 | 463 | if let Some(entry) = in_dmap { |
|
464 | 464 | return Some(( |
|
465 | 465 | Cow::Borrowed(normalized), |
|
466 | 466 | dispatch_missing(entry.state), |
|
467 | 467 | )); |
|
468 | 468 | } |
|
469 | 469 | } |
|
470 | 470 | }; |
|
471 | 471 | None |
|
472 | 472 | }) |
|
473 | 473 | .partition(|(_, dispatch)| match dispatch { |
|
474 | 474 | Dispatch::Directory { .. } => true, |
|
475 | 475 | _ => false, |
|
476 | 476 | }) |
|
477 | 477 | } |
|
478 | 478 | |
|
479 | 479 | /// Walk the working directory recursively to look for changes compared to |
|
480 | 480 | /// the current `DirstateMap`. |
|
481 | 481 | /// |
|
482 | 482 | /// This takes a mutable reference to the results to account for the |
|
483 | 483 | /// `extend` in timings |
|
484 | 484 | #[timed] |
|
485 | 485 | pub fn traverse( |
|
486 | 486 | &self, |
|
487 | 487 | path: impl AsRef<HgPath>, |
|
488 | 488 | old_results: &FastHashMap<HgPathCow<'a>, Dispatch>, |
|
489 | 489 | results: &mut Vec<DispatchedPath<'a>>, |
|
490 | 490 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
491 | 491 | ) { |
|
492 | 492 | // The traversal is done in parallel, so use a channel to gather |
|
493 | 493 | // entries. `crossbeam_channel::Sender` is `Sync`, while `mpsc::Sender` |
|
494 | 494 | // is not. |
|
495 | 495 | let (files_transmitter, files_receiver) = |
|
496 | 496 | crossbeam_channel::unbounded(); |
|
497 | 497 | |
|
498 | 498 | self.traverse_dir( |
|
499 | 499 | &files_transmitter, |
|
500 | 500 | path, |
|
501 | 501 | &old_results, |
|
502 | 502 | traversed_sender, |
|
503 | 503 | ); |
|
504 | 504 | |
|
505 | 505 | // Disconnect the channel so the receiver stops waiting |
|
506 | 506 | drop(files_transmitter); |
|
507 | 507 | |
|
508 | 508 | let new_results = files_receiver |
|
509 | 509 | .into_iter() |
|
510 | 510 | .par_bridge() |
|
511 | 511 | .map(|(f, d)| (Cow::Owned(f), d)); |
|
512 | 512 | |
|
513 | 513 | results.par_extend(new_results); |
|
514 | 514 | } |
|
515 | 515 | |
|
516 | 516 | /// Dispatch a single entry (file, folder, symlink...) found during |
|
517 | 517 | /// `traverse`. If the entry is a folder that needs to be traversed, it |
|
518 | 518 | /// will be handled in a separate thread. |
|
519 | 519 | fn handle_traversed_entry<'b>( |
|
520 | 520 | &'a self, |
|
521 | 521 | scope: &rayon::Scope<'b>, |
|
522 | 522 | files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>, |
|
523 | 523 | old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>, |
|
524 | 524 | filename: HgPathBuf, |
|
525 | 525 | dir_entry: DirEntry, |
|
526 | 526 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
527 | 527 | ) -> IoResult<()> |
|
528 | 528 | where |
|
529 | 529 | 'a: 'b, |
|
530 | 530 | { |
|
531 | 531 | let file_type = dir_entry.file_type()?; |
|
532 | 532 | let entry_option = self.dmap.get(&filename); |
|
533 | 533 | |
|
534 | 534 | if filename.as_bytes() == b".hg" { |
|
535 | 535 | // Could be a directory or a symlink |
|
536 | 536 | return Ok(()); |
|
537 | 537 | } |
|
538 | 538 | |
|
539 | 539 | if file_type.is_dir() { |
|
540 | 540 | self.handle_traversed_dir( |
|
541 | 541 | scope, |
|
542 | 542 | files_sender, |
|
543 | 543 | old_results, |
|
544 | 544 | entry_option, |
|
545 | 545 | filename, |
|
546 | 546 | traversed_sender, |
|
547 | 547 | ); |
|
548 | 548 | } else if file_type.is_file() || file_type.is_symlink() { |
|
549 | 549 | if let Some(entry) = entry_option { |
|
550 | 550 | if self.matcher.matches_everything() |
|
551 | 551 | || self.matcher.matches(&filename) |
|
552 | 552 | { |
|
553 | 553 | let metadata = dir_entry.metadata()?; |
|
554 | 554 | files_sender |
|
555 | 555 | .send(( |
|
556 | 556 | filename.to_owned(), |
|
557 | 557 | dispatch_found( |
|
558 | 558 | &filename, |
|
559 | 559 | *entry, |
|
560 | 560 | HgMetadata::from_metadata(metadata), |
|
561 | 561 | &self.dmap.copy_map, |
|
562 | 562 | self.options, |
|
563 | 563 | ), |
|
564 | 564 | )) |
|
565 | 565 | .unwrap(); |
|
566 | 566 | } |
|
567 | 567 | } else if (self.matcher.matches_everything() |
|
568 | 568 | || self.matcher.matches(&filename)) |
|
569 | 569 | && !self.is_ignored(&filename) |
|
570 | 570 | { |
|
571 | 571 | if (self.options.list_ignored |
|
572 | 572 | || self.matcher.exact_match(&filename)) |
|
573 | 573 | && self.dir_ignore(&filename) |
|
574 | 574 | { |
|
575 | 575 | if self.options.list_ignored { |
|
576 | 576 | files_sender |
|
577 | 577 | .send((filename.to_owned(), Dispatch::Ignored)) |
|
578 | 578 | .unwrap(); |
|
579 | 579 | } |
|
580 | 580 | } else if self.options.list_unknown { |
|
581 | 581 | files_sender |
|
582 | 582 | .send((filename.to_owned(), Dispatch::Unknown)) |
|
583 | 583 | .unwrap(); |
|
584 | 584 | } |
|
585 | 585 | } else if self.is_ignored(&filename) && self.options.list_ignored { |
|
586 | files_sender | |
|
587 | .send((filename.to_owned(), Dispatch::Ignored)) | |
|
588 | .unwrap(); | |
|
586 | if self.matcher.matches(&filename) { | |
|
587 | files_sender | |
|
588 | .send((filename.to_owned(), Dispatch::Ignored)) | |
|
589 | .unwrap(); | |
|
590 | } | |
|
589 | 591 | } |
|
590 | 592 | } else if let Some(entry) = entry_option { |
|
591 | 593 | // Used to be a file or a folder, now something else. |
|
592 | 594 | if self.matcher.matches_everything() |
|
593 | 595 | || self.matcher.matches(&filename) |
|
594 | 596 | { |
|
595 | 597 | files_sender |
|
596 | 598 | .send((filename.to_owned(), dispatch_missing(entry.state))) |
|
597 | 599 | .unwrap(); |
|
598 | 600 | } |
|
599 | 601 | } |
|
600 | 602 | |
|
601 | 603 | Ok(()) |
|
602 | 604 | } |
|
603 | 605 | |
|
604 | 606 | /// A directory was found in the filesystem and needs to be traversed |
|
605 | 607 | fn handle_traversed_dir<'b>( |
|
606 | 608 | &'a self, |
|
607 | 609 | scope: &rayon::Scope<'b>, |
|
608 | 610 | files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>, |
|
609 | 611 | old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>, |
|
610 | 612 | entry_option: Option<&'a DirstateEntry>, |
|
611 | 613 | directory: HgPathBuf, |
|
612 | 614 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
613 | 615 | ) where |
|
614 | 616 | 'a: 'b, |
|
615 | 617 | { |
|
616 | 618 | scope.spawn(move |_| { |
|
617 | 619 | // Nested `if` until `rust-lang/rust#53668` is stable |
|
618 | 620 | if let Some(entry) = entry_option { |
|
619 | 621 | // Used to be a file, is now a folder |
|
620 | 622 | if self.matcher.matches_everything() |
|
621 | 623 | || self.matcher.matches(&directory) |
|
622 | 624 | { |
|
623 | 625 | files_sender |
|
624 | 626 | .send(( |
|
625 | 627 | directory.to_owned(), |
|
626 | 628 | dispatch_missing(entry.state), |
|
627 | 629 | )) |
|
628 | 630 | .unwrap(); |
|
629 | 631 | } |
|
630 | 632 | } |
|
631 | 633 | // Do we need to traverse it? |
|
632 | 634 | if !self.is_ignored(&directory) || self.options.list_ignored { |
|
633 | 635 | self.traverse_dir( |
|
634 | 636 | files_sender, |
|
635 | 637 | directory, |
|
636 | 638 | &old_results, |
|
637 | 639 | traversed_sender, |
|
638 | 640 | ) |
|
639 | 641 | } |
|
640 | 642 | }); |
|
641 | 643 | } |
|
642 | 644 | |
|
643 | 645 | /// Decides whether the directory needs to be listed, and if so handles the |
|
644 | 646 | /// entries in a separate thread. |
|
645 | 647 | fn traverse_dir( |
|
646 | 648 | &self, |
|
647 | 649 | files_sender: &crossbeam_channel::Sender<(HgPathBuf, Dispatch)>, |
|
648 | 650 | directory: impl AsRef<HgPath>, |
|
649 | 651 | old_results: &FastHashMap<Cow<HgPath>, Dispatch>, |
|
650 | 652 | traversed_sender: crossbeam_channel::Sender<HgPathBuf>, |
|
651 | 653 | ) { |
|
652 | 654 | let directory = directory.as_ref(); |
|
653 | 655 | |
|
654 | 656 | if self.options.collect_traversed_dirs { |
|
655 | 657 | traversed_sender |
|
656 | 658 | .send(directory.to_owned()) |
|
657 | 659 | .expect("receiver should outlive sender"); |
|
658 | 660 | } |
|
659 | 661 | |
|
660 | 662 | let visit_entries = match self.matcher.visit_children_set(directory) { |
|
661 | 663 | VisitChildrenSet::Empty => return, |
|
662 | 664 | VisitChildrenSet::This | VisitChildrenSet::Recursive => None, |
|
663 | 665 | VisitChildrenSet::Set(set) => Some(set), |
|
664 | 666 | }; |
|
665 | 667 | let buf = match hg_path_to_path_buf(directory) { |
|
666 | 668 | Ok(b) => b, |
|
667 | 669 | Err(_) => { |
|
668 | 670 | files_sender |
|
669 | 671 | .send((directory.to_owned(), INVALID_PATH_DISPATCH)) |
|
670 | 672 | .expect("receiver should outlive sender"); |
|
671 | 673 | return; |
|
672 | 674 | } |
|
673 | 675 | }; |
|
674 | 676 | let dir_path = self.root_dir.join(buf); |
|
675 | 677 | |
|
676 | 678 | let skip_dot_hg = !directory.as_bytes().is_empty(); |
|
677 | 679 | let entries = match list_directory(dir_path, skip_dot_hg) { |
|
678 | 680 | Err(e) => { |
|
679 | 681 | files_sender |
|
680 | 682 | .send((directory.to_owned(), dispatch_os_error(&e))) |
|
681 | 683 | .expect("receiver should outlive sender"); |
|
682 | 684 | return; |
|
683 | 685 | } |
|
684 | 686 | Ok(entries) => entries, |
|
685 | 687 | }; |
|
686 | 688 | |
|
687 | 689 | rayon::scope(|scope| { |
|
688 | 690 | for (filename, dir_entry) in entries { |
|
689 | 691 | if let Some(ref set) = visit_entries { |
|
690 | 692 | if !set.contains(filename.deref()) { |
|
691 | 693 | continue; |
|
692 | 694 | } |
|
693 | 695 | } |
|
694 | 696 | // TODO normalize |
|
695 | 697 | let filename = if directory.is_empty() { |
|
696 | 698 | filename.to_owned() |
|
697 | 699 | } else { |
|
698 | 700 | directory.join(&filename) |
|
699 | 701 | }; |
|
700 | 702 | |
|
701 | 703 | if !old_results.contains_key(filename.deref()) { |
|
702 | 704 | match self.handle_traversed_entry( |
|
703 | 705 | scope, |
|
704 | 706 | files_sender, |
|
705 | 707 | old_results, |
|
706 | 708 | filename, |
|
707 | 709 | dir_entry, |
|
708 | 710 | traversed_sender.clone(), |
|
709 | 711 | ) { |
|
710 | 712 | Err(e) => { |
|
711 | 713 | files_sender |
|
712 | 714 | .send(( |
|
713 | 715 | directory.to_owned(), |
|
714 | 716 | dispatch_os_error(&e), |
|
715 | 717 | )) |
|
716 | 718 | .expect("receiver should outlive sender"); |
|
717 | 719 | } |
|
718 | 720 | Ok(_) => {} |
|
719 | 721 | } |
|
720 | 722 | } |
|
721 | 723 | } |
|
722 | 724 | }) |
|
723 | 725 | } |
|
724 | 726 | |
|
725 | 727 | /// Add the files in the dirstate to the results. |
|
726 | 728 | /// |
|
727 | 729 | /// This takes a mutable reference to the results to account for the |
|
728 | 730 | /// `extend` in timings |
|
729 | 731 | #[timed] |
|
730 | 732 | pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) { |
|
731 | 733 | results.par_extend( |
|
732 | 734 | self.dmap |
|
733 | 735 | .par_iter() |
|
734 | 736 | .filter(|(path, _)| self.matcher.matches(path)) |
|
735 | 737 | .map(move |(filename, entry)| { |
|
736 | 738 | let filename: &HgPath = filename; |
|
737 | 739 | let filename_as_path = match hg_path_to_path_buf(filename) |
|
738 | 740 | { |
|
739 | 741 | Ok(f) => f, |
|
740 | 742 | Err(_) => { |
|
741 | 743 | return ( |
|
742 | 744 | Cow::Borrowed(filename), |
|
743 | 745 | INVALID_PATH_DISPATCH, |
|
744 | 746 | ) |
|
745 | 747 | } |
|
746 | 748 | }; |
|
747 | 749 | let meta = self |
|
748 | 750 | .root_dir |
|
749 | 751 | .join(filename_as_path) |
|
750 | 752 | .symlink_metadata(); |
|
751 | 753 | match meta { |
|
752 | 754 | Ok(m) |
|
753 | 755 | if !(m.file_type().is_file() |
|
754 | 756 | || m.file_type().is_symlink()) => |
|
755 | 757 | { |
|
756 | 758 | ( |
|
757 | 759 | Cow::Borrowed(filename), |
|
758 | 760 | dispatch_missing(entry.state), |
|
759 | 761 | ) |
|
760 | 762 | } |
|
761 | 763 | Ok(m) => ( |
|
762 | 764 | Cow::Borrowed(filename), |
|
763 | 765 | dispatch_found( |
|
764 | 766 | filename, |
|
765 | 767 | *entry, |
|
766 | 768 | HgMetadata::from_metadata(m), |
|
767 | 769 | &self.dmap.copy_map, |
|
768 | 770 | self.options, |
|
769 | 771 | ), |
|
770 | 772 | ), |
|
771 | 773 | Err(e) |
|
772 | 774 | if e.kind() == ErrorKind::NotFound |
|
773 | 775 | || e.raw_os_error() == Some(20) => |
|
774 | 776 | { |
|
775 | 777 | // Rust does not yet have an `ErrorKind` for |
|
776 | 778 | // `NotADirectory` (errno 20) |
|
777 | 779 | // It happens if the dirstate contains `foo/bar` |
|
778 | 780 | // and foo is not a |
|
779 | 781 | // directory |
|
780 | 782 | ( |
|
781 | 783 | Cow::Borrowed(filename), |
|
782 | 784 | dispatch_missing(entry.state), |
|
783 | 785 | ) |
|
784 | 786 | } |
|
785 | 787 | Err(e) => { |
|
786 | 788 | (Cow::Borrowed(filename), dispatch_os_error(&e)) |
|
787 | 789 | } |
|
788 | 790 | } |
|
789 | 791 | }), |
|
790 | 792 | ); |
|
791 | 793 | } |
|
792 | 794 | |
|
793 | 795 | /// Checks all files that are in the dirstate but were not found during the |
|
794 | 796 | /// working directory traversal. This means that the rest must |
|
795 | 797 | /// be either ignored, under a symlink or under a new nested repo. |
|
796 | 798 | /// |
|
797 | 799 | /// This takes a mutable reference to the results to account for the |
|
798 | 800 | /// `extend` in timings |
|
799 | 801 | #[timed] |
|
800 | 802 | pub fn handle_unknowns(&self, results: &mut Vec<DispatchedPath<'a>>) { |
|
801 | 803 | let to_visit: Vec<(&HgPath, &DirstateEntry)> = |
|
802 | 804 | if results.is_empty() && self.matcher.matches_everything() { |
|
803 | 805 | self.dmap.iter().map(|(f, e)| (f.deref(), e)).collect() |
|
804 | 806 | } else { |
|
805 | 807 | // Only convert to a hashmap if needed. |
|
806 | 808 | let old_results: FastHashMap<_, _> = |
|
807 | 809 | results.iter().cloned().collect(); |
|
808 | 810 | self.dmap |
|
809 | 811 | .iter() |
|
810 | 812 | .filter_map(move |(f, e)| { |
|
811 | 813 | if !old_results.contains_key(f.deref()) |
|
812 | 814 | && self.matcher.matches(f) |
|
813 | 815 | { |
|
814 | 816 | Some((f.deref(), e)) |
|
815 | 817 | } else { |
|
816 | 818 | None |
|
817 | 819 | } |
|
818 | 820 | }) |
|
819 | 821 | .collect() |
|
820 | 822 | }; |
|
821 | 823 | |
|
822 | 824 | let path_auditor = PathAuditor::new(&self.root_dir); |
|
823 | 825 | |
|
824 | 826 | let new_results = to_visit.into_par_iter().filter_map( |
|
825 | 827 | |(filename, entry)| -> Option<_> { |
|
826 | 828 | // Report ignored items in the dmap as long as they are not |
|
827 | 829 | // under a symlink directory. |
|
828 | 830 | if path_auditor.check(filename) { |
|
829 | 831 | // TODO normalize for case-insensitive filesystems |
|
830 | 832 | let buf = match hg_path_to_path_buf(filename) { |
|
831 | 833 | Ok(x) => x, |
|
832 | 834 | Err(_) => { |
|
833 | 835 | return Some(( |
|
834 | 836 | Cow::Owned(filename.to_owned()), |
|
835 | 837 | INVALID_PATH_DISPATCH, |
|
836 | 838 | )); |
|
837 | 839 | } |
|
838 | 840 | }; |
|
839 | 841 | Some(( |
|
840 | 842 | Cow::Owned(filename.to_owned()), |
|
841 | 843 | match self.root_dir.join(&buf).symlink_metadata() { |
|
842 | 844 | // File was just ignored, no links, and exists |
|
843 | 845 | Ok(meta) => { |
|
844 | 846 | let metadata = HgMetadata::from_metadata(meta); |
|
845 | 847 | dispatch_found( |
|
846 | 848 | filename, |
|
847 | 849 | *entry, |
|
848 | 850 | metadata, |
|
849 | 851 | &self.dmap.copy_map, |
|
850 | 852 | self.options, |
|
851 | 853 | ) |
|
852 | 854 | } |
|
853 | 855 | // File doesn't exist |
|
854 | 856 | Err(_) => dispatch_missing(entry.state), |
|
855 | 857 | }, |
|
856 | 858 | )) |
|
857 | 859 | } else { |
|
858 | 860 | // It's either missing or under a symlink directory which |
|
859 | 861 | // we, in this case, report as missing. |
|
860 | 862 | Some(( |
|
861 | 863 | Cow::Owned(filename.to_owned()), |
|
862 | 864 | dispatch_missing(entry.state), |
|
863 | 865 | )) |
|
864 | 866 | } |
|
865 | 867 | }, |
|
866 | 868 | ); |
|
867 | 869 | |
|
868 | 870 | results.par_extend(new_results); |
|
869 | 871 | } |
|
870 | 872 | } |
|
871 | 873 | |
|
872 | 874 | #[timed] |
|
873 | 875 | pub fn build_response<'a>( |
|
874 | 876 | results: impl IntoIterator<Item = DispatchedPath<'a>>, |
|
875 | 877 | traversed: Vec<HgPathBuf>, |
|
876 | 878 | ) -> DirstateStatus<'a> { |
|
877 | 879 | let mut unsure = vec![]; |
|
878 | 880 | let mut modified = vec![]; |
|
879 | 881 | let mut added = vec![]; |
|
880 | 882 | let mut removed = vec![]; |
|
881 | 883 | let mut deleted = vec![]; |
|
882 | 884 | let mut clean = vec![]; |
|
883 | 885 | let mut ignored = vec![]; |
|
884 | 886 | let mut unknown = vec![]; |
|
885 | 887 | let mut bad = vec![]; |
|
886 | 888 | |
|
887 | 889 | for (filename, dispatch) in results.into_iter() { |
|
888 | 890 | match dispatch { |
|
889 | 891 | Dispatch::Unknown => unknown.push(filename), |
|
890 | 892 | Dispatch::Unsure => unsure.push(filename), |
|
891 | 893 | Dispatch::Modified => modified.push(filename), |
|
892 | 894 | Dispatch::Added => added.push(filename), |
|
893 | 895 | Dispatch::Removed => removed.push(filename), |
|
894 | 896 | Dispatch::Deleted => deleted.push(filename), |
|
895 | 897 | Dispatch::Clean => clean.push(filename), |
|
896 | 898 | Dispatch::Ignored => ignored.push(filename), |
|
897 | 899 | Dispatch::None => {} |
|
898 | 900 | Dispatch::Bad(reason) => bad.push((filename, reason)), |
|
899 | 901 | Dispatch::Directory { .. } => {} |
|
900 | 902 | } |
|
901 | 903 | } |
|
902 | 904 | |
|
903 | 905 | DirstateStatus { |
|
904 | 906 | modified, |
|
905 | 907 | added, |
|
906 | 908 | removed, |
|
907 | 909 | deleted, |
|
908 | 910 | clean, |
|
909 | 911 | ignored, |
|
910 | 912 | unknown, |
|
911 | 913 | bad, |
|
912 | 914 | unsure, |
|
913 | 915 | traversed, |
|
914 | 916 | } |
|
915 | 917 | } |
|
916 | 918 | |
|
917 | 919 | /// Get the status of files in the working directory. |
|
918 | 920 | /// |
|
919 | 921 | /// This is the current entry-point for `hg-core` and is realistically unusable |
|
920 | 922 | /// outside of a Python context because its arguments need to provide a lot of |
|
921 | 923 | /// information that will not be necessary in the future. |
|
922 | 924 | #[timed] |
|
923 | 925 | pub fn status<'a>( |
|
924 | 926 | dmap: &'a DirstateMap, |
|
925 | 927 | matcher: &'a (dyn Matcher + Sync), |
|
926 | 928 | root_dir: PathBuf, |
|
927 | 929 | ignore_files: Vec<PathBuf>, |
|
928 | 930 | options: StatusOptions, |
|
929 | 931 | ) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> { |
|
930 | 932 | let (status, warnings) = |
|
931 | 933 | Status::new(dmap, matcher, root_dir, ignore_files, options)?; |
|
932 | 934 | |
|
933 | 935 | Ok((status.run()?, warnings)) |
|
934 | 936 | } |
@@ -1,412 +1,412 | |||
|
1 | 1 | #require serve |
|
2 | 2 | |
|
3 | 3 | This test is a duplicate of 'test-http.t', feel free to factor out |
|
4 | 4 | parts that are not bundle1/bundle2 specific. |
|
5 | 5 | |
|
6 | 6 | $ cat << EOF >> $HGRCPATH |
|
7 | 7 | > [devel] |
|
8 | 8 | > # This test is dedicated to interaction through old bundle |
|
9 | 9 | > legacy.exchange = bundle1 |
|
10 | 10 | > EOF |
|
11 | 11 | |
|
12 | 12 | $ hg init test |
|
13 | 13 | $ cd test |
|
14 | 14 | $ echo foo>foo |
|
15 | 15 | $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg |
|
16 | 16 | $ echo foo>foo.d/foo |
|
17 | 17 | $ echo bar>foo.d/bAr.hg.d/BaR |
|
18 | 18 | $ echo bar>foo.d/baR.d.hg/bAR |
|
19 | 19 | $ hg commit -A -m 1 |
|
20 | 20 | adding foo |
|
21 | 21 | adding foo.d/bAr.hg.d/BaR |
|
22 | 22 | adding foo.d/baR.d.hg/bAR |
|
23 | 23 | adding foo.d/foo |
|
24 | 24 | $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log |
|
25 | 25 | $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid |
|
26 | 26 | |
|
27 | 27 | Test server address cannot be reused |
|
28 | 28 | |
|
29 | 29 | $ hg serve -p $HGPORT1 2>&1 |
|
30 | 30 | abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$ |
|
31 | 31 | [255] |
|
32 | 32 | |
|
33 | 33 | $ cd .. |
|
34 | 34 | $ cat hg1.pid hg2.pid >> $DAEMON_PIDS |
|
35 | 35 | |
|
36 | 36 | clone via stream |
|
37 | 37 | |
|
38 | 38 | #if no-reposimplestore |
|
39 | 39 | $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1 |
|
40 | 40 | streaming all changes |
|
41 | 41 | 6 files to transfer, 606 bytes of data (no-zstd !) |
|
42 | 42 | 6 files to transfer, 608 bytes of data (zstd !) |
|
43 | 43 | transferred * bytes in * seconds (*/sec) (glob) |
|
44 | 44 | searching for changes |
|
45 | 45 | no changes found |
|
46 | 46 | updating to branch default |
|
47 | 47 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
48 | 48 | $ hg verify -R copy |
|
49 | 49 | checking changesets |
|
50 | 50 | checking manifests |
|
51 | 51 | crosschecking files in changesets and manifests |
|
52 | 52 | checking files |
|
53 | 53 | checked 1 changesets with 4 changes to 4 files |
|
54 | 54 | #endif |
|
55 | 55 | |
|
56 | 56 | try to clone via stream, should use pull instead |
|
57 | 57 | |
|
58 | 58 | $ hg clone --stream http://localhost:$HGPORT1/ copy2 |
|
59 | 59 | warning: stream clone requested but server has them disabled |
|
60 | 60 | requesting all changes |
|
61 | 61 | adding changesets |
|
62 | 62 | adding manifests |
|
63 | 63 | adding file changes |
|
64 | 64 | added 1 changesets with 4 changes to 4 files |
|
65 | 65 | new changesets 8b6053c928fe |
|
66 | 66 | updating to branch default |
|
67 | 67 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
68 | 68 | |
|
69 | 69 | try to clone via stream but missing requirements, so should use pull instead |
|
70 | 70 | |
|
71 | 71 | $ cat > $TESTTMP/removesupportedformat.py << EOF |
|
72 | 72 | > from mercurial import localrepo |
|
73 | 73 | > def extsetup(ui): |
|
74 | 74 | > localrepo.localrepository.supportedformats.remove(b'generaldelta') |
|
75 | 75 | > EOF |
|
76 | 76 | |
|
77 | 77 | $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3 |
|
78 | 78 | warning: stream clone requested but client is missing requirements: generaldelta |
|
79 | 79 | (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information) |
|
80 | 80 | requesting all changes |
|
81 | 81 | adding changesets |
|
82 | 82 | adding manifests |
|
83 | 83 | adding file changes |
|
84 | 84 | added 1 changesets with 4 changes to 4 files |
|
85 | 85 | new changesets 8b6053c928fe |
|
86 | 86 | updating to branch default |
|
87 | 87 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
88 | 88 | |
|
89 | 89 | clone via pull |
|
90 | 90 | |
|
91 | 91 | $ hg clone http://localhost:$HGPORT1/ copy-pull |
|
92 | 92 | requesting all changes |
|
93 | 93 | adding changesets |
|
94 | 94 | adding manifests |
|
95 | 95 | adding file changes |
|
96 | 96 | added 1 changesets with 4 changes to 4 files |
|
97 | 97 | new changesets 8b6053c928fe |
|
98 | 98 | updating to branch default |
|
99 | 99 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
100 | 100 | $ hg verify -R copy-pull |
|
101 | 101 | checking changesets |
|
102 | 102 | checking manifests |
|
103 | 103 | crosschecking files in changesets and manifests |
|
104 | 104 | checking files |
|
105 | 105 | checked 1 changesets with 4 changes to 4 files |
|
106 | 106 | $ cd test |
|
107 | 107 | $ echo bar > bar |
|
108 | 108 | $ hg commit -A -d '1 0' -m 2 |
|
109 | 109 | adding bar |
|
110 | 110 | $ cd .. |
|
111 | 111 | |
|
112 | 112 | clone over http with --update |
|
113 | 113 | |
|
114 | 114 | $ hg clone http://localhost:$HGPORT1/ updated --update 0 |
|
115 | 115 | requesting all changes |
|
116 | 116 | adding changesets |
|
117 | 117 | adding manifests |
|
118 | 118 | adding file changes |
|
119 | 119 | added 2 changesets with 5 changes to 5 files |
|
120 | 120 | new changesets 8b6053c928fe:5fed3813f7f5 |
|
121 | 121 | updating to branch default |
|
122 | 122 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
123 | 123 | $ hg log -r . -R updated |
|
124 | 124 | changeset: 0:8b6053c928fe |
|
125 | 125 | user: test |
|
126 | 126 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
127 | 127 | summary: 1 |
|
128 | 128 | |
|
129 | 129 | $ rm -rf updated |
|
130 | 130 | |
|
131 | 131 | incoming via HTTP |
|
132 | 132 | |
|
133 | 133 | $ hg clone http://localhost:$HGPORT1/ --rev 0 partial |
|
134 | 134 | adding changesets |
|
135 | 135 | adding manifests |
|
136 | 136 | adding file changes |
|
137 | 137 | added 1 changesets with 4 changes to 4 files |
|
138 | 138 | new changesets 8b6053c928fe |
|
139 | 139 | updating to branch default |
|
140 | 140 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
141 | 141 | $ cd partial |
|
142 | 142 | $ touch LOCAL |
|
143 | 143 | $ hg ci -qAm LOCAL |
|
144 | 144 | $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n' |
|
145 | 145 | comparing with http://localhost:$HGPORT1/ |
|
146 | 146 | searching for changes |
|
147 | 147 | 2 |
|
148 | 148 | $ cd .. |
|
149 | 149 | |
|
150 | 150 | pull |
|
151 | 151 | |
|
152 | 152 | $ cd copy-pull |
|
153 | 153 | $ cat >> .hg/hgrc <<EOF |
|
154 | 154 | > [hooks] |
|
155 | 155 | > changegroup = sh -c "printenv.py --line changegroup" |
|
156 | 156 | > EOF |
|
157 | 157 | $ hg pull |
|
158 | 158 | pulling from http://localhost:$HGPORT1/ |
|
159 | 159 | searching for changes |
|
160 | 160 | adding changesets |
|
161 | 161 | adding manifests |
|
162 | 162 | adding file changes |
|
163 | 163 | added 1 changesets with 1 changes to 1 files |
|
164 | 164 | new changesets 5fed3813f7f5 |
|
165 | 165 | changegroup hook: HG_HOOKNAME=changegroup |
|
166 | 166 | HG_HOOKTYPE=changegroup |
|
167 | 167 | HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d |
|
168 | 168 | HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d |
|
169 | 169 | HG_SOURCE=pull |
|
170 | 170 | HG_TXNID=TXN:$ID$ |
|
171 | 171 | HG_TXNNAME=pull |
|
172 | 172 | http://localhost:$HGPORT1/ |
|
173 | 173 | HG_URL=http://localhost:$HGPORT1/ |
|
174 | 174 | |
|
175 | 175 | (run 'hg update' to get a working copy) |
|
176 | 176 | $ cd .. |
|
177 | 177 | |
|
178 | 178 | clone from invalid URL |
|
179 | 179 | |
|
180 | 180 | $ hg clone http://localhost:$HGPORT/bad |
|
181 | 181 | abort: HTTP Error 404: Not Found |
|
182 | 182 | [100] |
|
183 | 183 | |
|
184 | 184 | test http authentication |
|
185 | 185 | + use the same server to test server side streaming preference |
|
186 | 186 | |
|
187 | 187 | $ cd test |
|
188 | 188 | |
|
189 | 189 | $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \ |
|
190 | 190 | > --pid-file=pid --config server.preferuncompressed=True \ |
|
191 | 191 | > --config web.push_ssl=False --config web.allow_push=* -A ../access.log |
|
192 | 192 | $ cat pid >> $DAEMON_PIDS |
|
193 | 193 | |
|
194 | 194 | $ cat << EOF > get_pass.py |
|
195 | > import getpass | |
|
196 |
> def newgetpass( |
|
|
195 | > from mercurial import util | |
|
196 | > def newgetpass(): | |
|
197 | 197 | > return "pass" |
|
198 |
> |
|
|
198 | > util.get_password = newgetpass | |
|
199 | 199 | > EOF |
|
200 | 200 | |
|
201 | 201 | $ hg id http://localhost:$HGPORT2/ |
|
202 | 202 | abort: http authorization required for http://localhost:$HGPORT2/ |
|
203 | 203 | [255] |
|
204 | 204 | $ hg id http://localhost:$HGPORT2/ |
|
205 | 205 | abort: http authorization required for http://localhost:$HGPORT2/ |
|
206 | 206 | [255] |
|
207 | 207 | $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/ |
|
208 | 208 | http authorization required for http://localhost:$HGPORT2/ |
|
209 | 209 | realm: mercurial |
|
210 | 210 | user: user |
|
211 | 211 | password: 5fed3813f7f5 |
|
212 | 212 | $ hg id http://user:pass@localhost:$HGPORT2/ |
|
213 | 213 | 5fed3813f7f5 |
|
214 | 214 | $ echo '[auth]' >> .hg/hgrc |
|
215 | 215 | $ echo 'l.schemes=http' >> .hg/hgrc |
|
216 | 216 | $ echo 'l.prefix=lo' >> .hg/hgrc |
|
217 | 217 | $ echo 'l.username=user' >> .hg/hgrc |
|
218 | 218 | $ echo 'l.password=pass' >> .hg/hgrc |
|
219 | 219 | $ hg id http://localhost:$HGPORT2/ |
|
220 | 220 | 5fed3813f7f5 |
|
221 | 221 | $ hg id http://localhost:$HGPORT2/ |
|
222 | 222 | 5fed3813f7f5 |
|
223 | 223 | $ hg id http://user@localhost:$HGPORT2/ |
|
224 | 224 | 5fed3813f7f5 |
|
225 | 225 | |
|
226 | 226 | #if no-reposimplestore |
|
227 | 227 | $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1 |
|
228 | 228 | streaming all changes |
|
229 | 229 | 7 files to transfer, 916 bytes of data (no-zstd !) |
|
230 | 230 | 7 files to transfer, 919 bytes of data (zstd !) |
|
231 | 231 | transferred * bytes in * seconds (*/sec) (glob) |
|
232 | 232 | searching for changes |
|
233 | 233 | no changes found |
|
234 | 234 | updating to branch default |
|
235 | 235 | 5 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
236 | 236 | #endif |
|
237 | 237 | |
|
238 | 238 | --pull should override server's preferuncompressed |
|
239 | 239 | |
|
240 | 240 | $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1 |
|
241 | 241 | requesting all changes |
|
242 | 242 | adding changesets |
|
243 | 243 | adding manifests |
|
244 | 244 | adding file changes |
|
245 | 245 | added 2 changesets with 5 changes to 5 files |
|
246 | 246 | new changesets 8b6053c928fe:5fed3813f7f5 |
|
247 | 247 | updating to branch default |
|
248 | 248 | 5 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
249 | 249 | |
|
250 | 250 | $ hg id http://user2@localhost:$HGPORT2/ |
|
251 | 251 | abort: http authorization required for http://localhost:$HGPORT2/ |
|
252 | 252 | [255] |
|
253 | 253 | $ hg id http://user:pass2@localhost:$HGPORT2/ |
|
254 | 254 | abort: HTTP Error 403: no |
|
255 | 255 | [100] |
|
256 | 256 | |
|
257 | 257 | $ hg -R dest-pull tag -r tip top |
|
258 | 258 | $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ |
|
259 | 259 | pushing to http://user:***@localhost:$HGPORT2/ |
|
260 | 260 | searching for changes |
|
261 | 261 | remote: adding changesets |
|
262 | 262 | remote: adding manifests |
|
263 | 263 | remote: adding file changes |
|
264 | 264 | remote: added 1 changesets with 1 changes to 1 files |
|
265 | 265 | $ hg rollback -q |
|
266 | 266 | |
|
267 | 267 | $ sed 's/.*] "/"/' < ../access.log |
|
268 | 268 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
269 | 269 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
270 | 270 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
271 | 271 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
272 | 272 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
273 | 273 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
274 | 274 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
275 | 275 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
276 | 276 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
277 | 277 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
278 | 278 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
279 | 279 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
280 | 280 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
281 | 281 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
282 | 282 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
283 | 283 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
284 | 284 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
285 | 285 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
286 | 286 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
287 | 287 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
288 | 288 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
289 | 289 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
290 | 290 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
291 | 291 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
292 | 292 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
293 | 293 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
294 | 294 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
295 | 295 | "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !) |
|
296 | 296 | "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !) |
|
297 | 297 | "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
298 | 298 | "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
299 | 299 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
300 | 300 | "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
301 | 301 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
302 | 302 | "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !) |
|
303 | 303 | "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !) |
|
304 | 304 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
305 | 305 | "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
306 | 306 | "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
307 | 307 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
308 | 308 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
309 | 309 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
310 | 310 | "GET /?cmd=capabilities HTTP/1.1" 403 - |
|
311 | 311 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
312 | 312 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
313 | 313 | "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
314 | 314 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
315 | 315 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
316 | 316 | "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
317 | 317 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
318 | 318 | "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob) |
|
319 | 319 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
320 | 320 | |
|
321 | 321 | $ cd .. |
|
322 | 322 | |
|
323 | 323 | clone of serve with repo in root and unserved subrepo (issue2970) |
|
324 | 324 | |
|
325 | 325 | $ hg --cwd test init sub |
|
326 | 326 | $ echo empty > test/sub/empty |
|
327 | 327 | $ hg --cwd test/sub add empty |
|
328 | 328 | $ hg --cwd test/sub commit -qm 'add empty' |
|
329 | 329 | $ hg --cwd test/sub tag -r 0 something |
|
330 | 330 | $ echo sub = sub > test/.hgsub |
|
331 | 331 | $ hg --cwd test add .hgsub |
|
332 | 332 | $ hg --cwd test commit -qm 'add subrepo' |
|
333 | 333 | $ hg clone http://localhost:$HGPORT noslash-clone |
|
334 | 334 | requesting all changes |
|
335 | 335 | adding changesets |
|
336 | 336 | adding manifests |
|
337 | 337 | adding file changes |
|
338 | 338 | added 3 changesets with 7 changes to 7 files |
|
339 | 339 | new changesets 8b6053c928fe:56f9bc90cce6 |
|
340 | 340 | updating to branch default |
|
341 | 341 | cloning subrepo sub from http://localhost:$HGPORT/sub |
|
342 | 342 | abort: HTTP Error 404: Not Found |
|
343 | 343 | [100] |
|
344 | 344 | $ hg clone http://localhost:$HGPORT/ slash-clone |
|
345 | 345 | requesting all changes |
|
346 | 346 | adding changesets |
|
347 | 347 | adding manifests |
|
348 | 348 | adding file changes |
|
349 | 349 | added 3 changesets with 7 changes to 7 files |
|
350 | 350 | new changesets 8b6053c928fe:56f9bc90cce6 |
|
351 | 351 | updating to branch default |
|
352 | 352 | cloning subrepo sub from http://localhost:$HGPORT/sub |
|
353 | 353 | abort: HTTP Error 404: Not Found |
|
354 | 354 | [100] |
|
355 | 355 | |
|
356 | 356 | check error log |
|
357 | 357 | |
|
358 | 358 | $ cat error.log |
|
359 | 359 | |
|
360 | 360 | Check error reporting while pulling/cloning |
|
361 | 361 | |
|
362 | 362 | $ $RUNTESTDIR/killdaemons.py |
|
363 | 363 | $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py |
|
364 | 364 | $ cat hg3.pid >> $DAEMON_PIDS |
|
365 | 365 | $ hg clone http://localhost:$HGPORT/ abort-clone |
|
366 | 366 | requesting all changes |
|
367 | 367 | abort: remote error: |
|
368 | 368 | this is an exercise |
|
369 | 369 | [100] |
|
370 | 370 | $ cat error.log |
|
371 | 371 | |
|
372 | 372 | disable pull-based clones |
|
373 | 373 | |
|
374 | 374 | $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True |
|
375 | 375 | $ cat hg4.pid >> $DAEMON_PIDS |
|
376 | 376 | $ hg clone http://localhost:$HGPORT1/ disable-pull-clone |
|
377 | 377 | requesting all changes |
|
378 | 378 | abort: remote error: |
|
379 | 379 | server has pull-based clones disabled |
|
380 | 380 | [100] |
|
381 | 381 | |
|
382 | 382 | #if no-reposimplestore |
|
383 | 383 | ... but keep stream clones working |
|
384 | 384 | |
|
385 | 385 | $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone |
|
386 | 386 | streaming all changes |
|
387 | 387 | * files to transfer, * of data (glob) |
|
388 | 388 | transferred * in * seconds (* KB/sec) (glob) |
|
389 | 389 | searching for changes |
|
390 | 390 | no changes found |
|
391 | 391 | #endif |
|
392 | 392 | |
|
393 | 393 | ... and also keep partial clones and pulls working |
|
394 | 394 | $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone |
|
395 | 395 | adding changesets |
|
396 | 396 | adding manifests |
|
397 | 397 | adding file changes |
|
398 | 398 | added 1 changesets with 4 changes to 4 files |
|
399 | 399 | new changesets 8b6053c928fe |
|
400 | 400 | updating to branch default |
|
401 | 401 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
402 | 402 | $ hg pull -R test-partial-clone |
|
403 | 403 | pulling from http://localhost:$HGPORT1/ |
|
404 | 404 | searching for changes |
|
405 | 405 | adding changesets |
|
406 | 406 | adding manifests |
|
407 | 407 | adding file changes |
|
408 | 408 | added 2 changesets with 3 changes to 3 files |
|
409 | 409 | new changesets 5fed3813f7f5:56f9bc90cce6 |
|
410 | 410 | (run 'hg update' to get a working copy) |
|
411 | 411 | |
|
412 | 412 | $ cat error.log |
@@ -1,617 +1,617 | |||
|
1 | 1 | #require serve |
|
2 | 2 | |
|
3 | 3 | $ hg init test |
|
4 | 4 | $ cd test |
|
5 | 5 | $ echo foo>foo |
|
6 | 6 | $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg |
|
7 | 7 | $ echo foo>foo.d/foo |
|
8 | 8 | $ echo bar>foo.d/bAr.hg.d/BaR |
|
9 | 9 | $ echo bar>foo.d/baR.d.hg/bAR |
|
10 | 10 | $ hg commit -A -m 1 |
|
11 | 11 | adding foo |
|
12 | 12 | adding foo.d/bAr.hg.d/BaR |
|
13 | 13 | adding foo.d/baR.d.hg/bAR |
|
14 | 14 | adding foo.d/foo |
|
15 | 15 | $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log |
|
16 | 16 | $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid |
|
17 | 17 | |
|
18 | 18 | Test server address cannot be reused |
|
19 | 19 | |
|
20 | 20 | $ hg serve -p $HGPORT1 2>&1 |
|
21 | 21 | abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$ |
|
22 | 22 | [255] |
|
23 | 23 | |
|
24 | 24 | $ cd .. |
|
25 | 25 | $ cat hg1.pid hg2.pid >> $DAEMON_PIDS |
|
26 | 26 | |
|
27 | 27 | clone via stream |
|
28 | 28 | |
|
29 | 29 | #if no-reposimplestore |
|
30 | 30 | $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1 |
|
31 | 31 | streaming all changes |
|
32 | 32 | 9 files to transfer, 715 bytes of data (no-zstd !) |
|
33 | 33 | 9 files to transfer, 717 bytes of data (zstd !) |
|
34 | 34 | transferred * bytes in * seconds (*/sec) (glob) |
|
35 | 35 | updating to branch default |
|
36 | 36 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
37 | 37 | $ hg verify -R copy |
|
38 | 38 | checking changesets |
|
39 | 39 | checking manifests |
|
40 | 40 | crosschecking files in changesets and manifests |
|
41 | 41 | checking files |
|
42 | 42 | checked 1 changesets with 4 changes to 4 files |
|
43 | 43 | #endif |
|
44 | 44 | |
|
45 | 45 | try to clone via stream, should use pull instead |
|
46 | 46 | |
|
47 | 47 | $ hg clone --stream http://localhost:$HGPORT1/ copy2 |
|
48 | 48 | warning: stream clone requested but server has them disabled |
|
49 | 49 | requesting all changes |
|
50 | 50 | adding changesets |
|
51 | 51 | adding manifests |
|
52 | 52 | adding file changes |
|
53 | 53 | added 1 changesets with 4 changes to 4 files |
|
54 | 54 | new changesets 8b6053c928fe |
|
55 | 55 | updating to branch default |
|
56 | 56 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
57 | 57 | |
|
58 | 58 | try to clone via stream but missing requirements, so should use pull instead |
|
59 | 59 | |
|
60 | 60 | $ cat > $TESTTMP/removesupportedformat.py << EOF |
|
61 | 61 | > from mercurial import localrepo |
|
62 | 62 | > def extsetup(ui): |
|
63 | 63 | > localrepo.localrepository.supportedformats.remove(b'generaldelta') |
|
64 | 64 | > EOF |
|
65 | 65 | |
|
66 | 66 | $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3 |
|
67 | 67 | warning: stream clone requested but client is missing requirements: generaldelta |
|
68 | 68 | (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information) |
|
69 | 69 | requesting all changes |
|
70 | 70 | adding changesets |
|
71 | 71 | adding manifests |
|
72 | 72 | adding file changes |
|
73 | 73 | added 1 changesets with 4 changes to 4 files |
|
74 | 74 | new changesets 8b6053c928fe |
|
75 | 75 | updating to branch default |
|
76 | 76 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
77 | 77 | |
|
78 | 78 | clone via pull |
|
79 | 79 | |
|
80 | 80 | $ hg clone http://localhost:$HGPORT1/ copy-pull |
|
81 | 81 | requesting all changes |
|
82 | 82 | adding changesets |
|
83 | 83 | adding manifests |
|
84 | 84 | adding file changes |
|
85 | 85 | added 1 changesets with 4 changes to 4 files |
|
86 | 86 | new changesets 8b6053c928fe |
|
87 | 87 | updating to branch default |
|
88 | 88 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
89 | 89 | $ hg verify -R copy-pull |
|
90 | 90 | checking changesets |
|
91 | 91 | checking manifests |
|
92 | 92 | crosschecking files in changesets and manifests |
|
93 | 93 | checking files |
|
94 | 94 | checked 1 changesets with 4 changes to 4 files |
|
95 | 95 | $ cd test |
|
96 | 96 | $ echo bar > bar |
|
97 | 97 | $ hg commit -A -d '1 0' -m 2 |
|
98 | 98 | adding bar |
|
99 | 99 | $ cd .. |
|
100 | 100 | |
|
101 | 101 | clone over http with --update |
|
102 | 102 | |
|
103 | 103 | $ hg clone http://localhost:$HGPORT1/ updated --update 0 |
|
104 | 104 | requesting all changes |
|
105 | 105 | adding changesets |
|
106 | 106 | adding manifests |
|
107 | 107 | adding file changes |
|
108 | 108 | added 2 changesets with 5 changes to 5 files |
|
109 | 109 | new changesets 8b6053c928fe:5fed3813f7f5 |
|
110 | 110 | updating to branch default |
|
111 | 111 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
112 | 112 | $ hg log -r . -R updated |
|
113 | 113 | changeset: 0:8b6053c928fe |
|
114 | 114 | user: test |
|
115 | 115 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
116 | 116 | summary: 1 |
|
117 | 117 | |
|
118 | 118 | $ rm -rf updated |
|
119 | 119 | |
|
120 | 120 | incoming via HTTP |
|
121 | 121 | |
|
122 | 122 | $ hg clone http://localhost:$HGPORT1/ --rev 0 partial |
|
123 | 123 | adding changesets |
|
124 | 124 | adding manifests |
|
125 | 125 | adding file changes |
|
126 | 126 | added 1 changesets with 4 changes to 4 files |
|
127 | 127 | new changesets 8b6053c928fe |
|
128 | 128 | updating to branch default |
|
129 | 129 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
130 | 130 | $ cd partial |
|
131 | 131 | $ touch LOCAL |
|
132 | 132 | $ hg ci -qAm LOCAL |
|
133 | 133 | $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n' |
|
134 | 134 | comparing with http://localhost:$HGPORT1/ |
|
135 | 135 | searching for changes |
|
136 | 136 | 2 |
|
137 | 137 | $ cd .. |
|
138 | 138 | |
|
139 | 139 | pull |
|
140 | 140 | |
|
141 | 141 | $ cd copy-pull |
|
142 | 142 | $ cat >> .hg/hgrc <<EOF |
|
143 | 143 | > [hooks] |
|
144 | 144 | > changegroup = sh -c "printenv.py --line changegroup" |
|
145 | 145 | > EOF |
|
146 | 146 | $ hg pull |
|
147 | 147 | pulling from http://localhost:$HGPORT1/ |
|
148 | 148 | searching for changes |
|
149 | 149 | adding changesets |
|
150 | 150 | adding manifests |
|
151 | 151 | adding file changes |
|
152 | 152 | added 1 changesets with 1 changes to 1 files |
|
153 | 153 | new changesets 5fed3813f7f5 |
|
154 | 154 | changegroup hook: HG_HOOKNAME=changegroup |
|
155 | 155 | HG_HOOKTYPE=changegroup |
|
156 | 156 | HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d |
|
157 | 157 | HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d |
|
158 | 158 | HG_SOURCE=pull |
|
159 | 159 | HG_TXNID=TXN:$ID$ |
|
160 | 160 | HG_TXNNAME=pull |
|
161 | 161 | http://localhost:$HGPORT1/ |
|
162 | 162 | HG_URL=http://localhost:$HGPORT1/ |
|
163 | 163 | |
|
164 | 164 | (run 'hg update' to get a working copy) |
|
165 | 165 | $ cd .. |
|
166 | 166 | |
|
167 | 167 | clone from invalid URL |
|
168 | 168 | |
|
169 | 169 | $ hg clone http://localhost:$HGPORT/bad |
|
170 | 170 | abort: HTTP Error 404: Not Found |
|
171 | 171 | [100] |
|
172 | 172 | |
|
173 | 173 | test http authentication |
|
174 | 174 | + use the same server to test server side streaming preference |
|
175 | 175 | |
|
176 | 176 | $ cd test |
|
177 | 177 | |
|
178 | 178 | $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \ |
|
179 | 179 | > --pid-file=pid --config server.preferuncompressed=True -E ../errors2.log \ |
|
180 | 180 | > --config web.push_ssl=False --config web.allow_push=* -A ../access.log |
|
181 | 181 | $ cat pid >> $DAEMON_PIDS |
|
182 | 182 | |
|
183 | 183 | $ cat << EOF > get_pass.py |
|
184 | > import getpass | |
|
185 |
> def newgetpass( |
|
|
184 | > from mercurial import util | |
|
185 | > def newgetpass(): | |
|
186 | 186 | > return "pass" |
|
187 |
> |
|
|
187 | > util.get_password = newgetpass | |
|
188 | 188 | > EOF |
|
189 | 189 | |
|
190 | 190 | $ hg id http://localhost:$HGPORT2/ |
|
191 | 191 | abort: http authorization required for http://localhost:$HGPORT2/ |
|
192 | 192 | [255] |
|
193 | 193 | $ hg id http://localhost:$HGPORT2/ |
|
194 | 194 | abort: http authorization required for http://localhost:$HGPORT2/ |
|
195 | 195 | [255] |
|
196 | 196 | $ hg id --config ui.interactive=true --debug http://localhost:$HGPORT2/ |
|
197 | 197 | using http://localhost:$HGPORT2/ |
|
198 | 198 | sending capabilities command |
|
199 | 199 | http authorization required for http://localhost:$HGPORT2/ |
|
200 | 200 | realm: mercurial |
|
201 | 201 | user: abort: response expected |
|
202 | 202 | [255] |
|
203 | 203 | $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/ |
|
204 | 204 | > |
|
205 | 205 | > EOF |
|
206 | 206 | using http://localhost:$HGPORT2/ |
|
207 | 207 | sending capabilities command |
|
208 | 208 | http authorization required for http://localhost:$HGPORT2/ |
|
209 | 209 | realm: mercurial |
|
210 | 210 | user: |
|
211 | 211 | password: abort: response expected |
|
212 | 212 | [255] |
|
213 | 213 | $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/ |
|
214 | 214 | > |
|
215 | 215 | > |
|
216 | 216 | > EOF |
|
217 | 217 | using http://localhost:$HGPORT2/ |
|
218 | 218 | sending capabilities command |
|
219 | 219 | http authorization required for http://localhost:$HGPORT2/ |
|
220 | 220 | realm: mercurial |
|
221 | 221 | user: |
|
222 | 222 | password: abort: authorization failed |
|
223 | 223 | [255] |
|
224 | 224 | $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/ |
|
225 | 225 | http authorization required for http://localhost:$HGPORT2/ |
|
226 | 226 | realm: mercurial |
|
227 | 227 | user: user |
|
228 | 228 | password: 5fed3813f7f5 |
|
229 | 229 | $ hg id http://user:pass@localhost:$HGPORT2/ |
|
230 | 230 | 5fed3813f7f5 |
|
231 | 231 | $ echo '[auth]' >> .hg/hgrc |
|
232 | 232 | $ echo 'l.schemes=http' >> .hg/hgrc |
|
233 | 233 | $ echo 'l.prefix=lo' >> .hg/hgrc |
|
234 | 234 | $ echo 'l.username=user' >> .hg/hgrc |
|
235 | 235 | $ echo 'l.password=pass' >> .hg/hgrc |
|
236 | 236 | $ hg id http://localhost:$HGPORT2/ |
|
237 | 237 | 5fed3813f7f5 |
|
238 | 238 | $ hg id http://localhost:$HGPORT2/ |
|
239 | 239 | 5fed3813f7f5 |
|
240 | 240 | $ hg id http://user@localhost:$HGPORT2/ |
|
241 | 241 | 5fed3813f7f5 |
|
242 | 242 | |
|
243 | 243 | $ cat > use_digests.py << EOF |
|
244 | 244 | > from mercurial import ( |
|
245 | 245 | > exthelper, |
|
246 | 246 | > url, |
|
247 | 247 | > ) |
|
248 | 248 | > |
|
249 | 249 | > eh = exthelper.exthelper() |
|
250 | 250 | > uisetup = eh.finaluisetup |
|
251 | 251 | > |
|
252 | 252 | > @eh.wrapfunction(url, 'opener') |
|
253 | 253 | > def urlopener(orig, *args, **kwargs): |
|
254 | 254 | > opener = orig(*args, **kwargs) |
|
255 | 255 | > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest')) |
|
256 | 256 | > return opener |
|
257 | 257 | > EOF |
|
258 | 258 | |
|
259 | 259 | $ hg id http://localhost:$HGPORT2/ --config extensions.x=use_digests.py |
|
260 | 260 | 5fed3813f7f5 |
|
261 | 261 | |
|
262 | 262 | #if no-reposimplestore |
|
263 | 263 | $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1 |
|
264 | 264 | streaming all changes |
|
265 | 265 | 10 files to transfer, 1.01 KB of data |
|
266 | 266 | transferred * KB in * seconds (*/sec) (glob) |
|
267 | 267 | updating to branch default |
|
268 | 268 | 5 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
269 | 269 | #endif |
|
270 | 270 | |
|
271 | 271 | --pull should override server's preferuncompressed |
|
272 | 272 | $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1 |
|
273 | 273 | requesting all changes |
|
274 | 274 | adding changesets |
|
275 | 275 | adding manifests |
|
276 | 276 | adding file changes |
|
277 | 277 | added 2 changesets with 5 changes to 5 files |
|
278 | 278 | new changesets 8b6053c928fe:5fed3813f7f5 |
|
279 | 279 | updating to branch default |
|
280 | 280 | 5 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
281 | 281 | |
|
282 | 282 | $ hg id http://user2@localhost:$HGPORT2/ |
|
283 | 283 | abort: http authorization required for http://localhost:$HGPORT2/ |
|
284 | 284 | [255] |
|
285 | 285 | $ hg id http://user:pass2@localhost:$HGPORT2/ |
|
286 | 286 | abort: HTTP Error 403: no |
|
287 | 287 | [100] |
|
288 | 288 | |
|
289 | 289 | $ hg -R dest-pull tag -r tip top |
|
290 | 290 | $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ |
|
291 | 291 | pushing to http://user:***@localhost:$HGPORT2/ |
|
292 | 292 | searching for changes |
|
293 | 293 | remote: adding changesets |
|
294 | 294 | remote: adding manifests |
|
295 | 295 | remote: adding file changes |
|
296 | 296 | remote: added 1 changesets with 1 changes to 1 files |
|
297 | 297 | $ hg rollback -q |
|
298 | 298 | $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes |
|
299 | 299 | pushing to http://user:***@localhost:$HGPORT2/ |
|
300 | 300 | using http://localhost:$HGPORT2/ |
|
301 | 301 | http auth: user user, password **** |
|
302 | 302 | sending capabilities command |
|
303 | 303 | devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities |
|
304 | 304 | http auth: user user, password **** |
|
305 | 305 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
306 | 306 | query 1; heads |
|
307 | 307 | devel-peer-request: batched-content |
|
308 | 308 | devel-peer-request: - heads (0 arguments) |
|
309 | 309 | devel-peer-request: - known (1 arguments) |
|
310 | 310 | sending batch command |
|
311 | 311 | devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch |
|
312 | 312 | devel-peer-request: Vary X-HgArg-1,X-HgProto-1 |
|
313 | 313 | devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
314 | 314 | devel-peer-request: 68 bytes of commands arguments in headers |
|
315 | 315 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
316 | 316 | searching for changes |
|
317 | 317 | all remote heads known locally |
|
318 | 318 | preparing listkeys for "phases" |
|
319 | 319 | sending listkeys command |
|
320 | 320 | devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys |
|
321 | 321 | devel-peer-request: Vary X-HgArg-1,X-HgProto-1 |
|
322 | 322 | devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
323 | 323 | devel-peer-request: 16 bytes of commands arguments in headers |
|
324 | 324 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
325 | 325 | received listkey for "phases": 58 bytes |
|
326 | 326 | checking for updated bookmarks |
|
327 | 327 | preparing listkeys for "bookmarks" |
|
328 | 328 | sending listkeys command |
|
329 | 329 | devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys |
|
330 | 330 | devel-peer-request: Vary X-HgArg-1,X-HgProto-1 |
|
331 | 331 | devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
332 | 332 | devel-peer-request: 19 bytes of commands arguments in headers |
|
333 | 333 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
334 | 334 | received listkey for "bookmarks": 0 bytes |
|
335 | 335 | sending branchmap command |
|
336 | 336 | devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap |
|
337 | 337 | devel-peer-request: Vary X-HgProto-1 |
|
338 | 338 | devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
339 | 339 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
340 | 340 | preparing listkeys for "bookmarks" |
|
341 | 341 | sending listkeys command |
|
342 | 342 | devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys |
|
343 | 343 | devel-peer-request: Vary X-HgArg-1,X-HgProto-1 |
|
344 | 344 | devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
345 | 345 | devel-peer-request: 19 bytes of commands arguments in headers |
|
346 | 346 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
347 | 347 | received listkey for "bookmarks": 0 bytes |
|
348 | 348 | 1 changesets found |
|
349 | 349 | list of changesets: |
|
350 | 350 | 7f4e523d01f2cc3765ac8934da3d14db775ff872 |
|
351 | 351 | bundle2-output-bundle: "HG20", 5 parts total |
|
352 | 352 | bundle2-output-part: "replycaps" 207 bytes payload |
|
353 | 353 | bundle2-output-part: "check:phases" 24 bytes payload |
|
354 | 354 | bundle2-output-part: "check:updated-heads" streamed payload |
|
355 | 355 | bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload |
|
356 | 356 | bundle2-output-part: "phase-heads" 24 bytes payload |
|
357 | 357 | sending unbundle command |
|
358 | 358 | sending 1023 bytes |
|
359 | 359 | devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle |
|
360 | 360 | devel-peer-request: Content-length 1023 |
|
361 | 361 | devel-peer-request: Content-type application/mercurial-0.1 |
|
362 | 362 | devel-peer-request: Vary X-HgArg-1,X-HgProto-1 |
|
363 | 363 | devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
364 | 364 | devel-peer-request: 16 bytes of commands arguments in headers |
|
365 | 365 | devel-peer-request: 1023 bytes of data |
|
366 | 366 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
367 | 367 | bundle2-input-bundle: no-transaction |
|
368 | 368 | bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported |
|
369 | 369 | bundle2-input-part: "output" (advisory) (params: 0 advisory) supported |
|
370 | 370 | bundle2-input-part: total payload size 55 |
|
371 | 371 | remote: adding changesets |
|
372 | 372 | remote: adding manifests |
|
373 | 373 | remote: adding file changes |
|
374 | 374 | bundle2-input-part: "output" (advisory) supported |
|
375 | 375 | bundle2-input-part: total payload size 45 |
|
376 | 376 | remote: added 1 changesets with 1 changes to 1 files |
|
377 | 377 | bundle2-input-bundle: 3 parts total |
|
378 | 378 | preparing listkeys for "phases" |
|
379 | 379 | sending listkeys command |
|
380 | 380 | devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys |
|
381 | 381 | devel-peer-request: Vary X-HgArg-1,X-HgProto-1 |
|
382 | 382 | devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
383 | 383 | devel-peer-request: 16 bytes of commands arguments in headers |
|
384 | 384 | devel-peer-request: finished in *.???? seconds (200) (glob) |
|
385 | 385 | received listkey for "phases": 15 bytes |
|
386 | 386 | (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?) |
|
387 | 387 | $ hg rollback -q |
|
388 | 388 | |
|
389 | 389 | $ sed 's/.*] "/"/' < ../access.log |
|
390 | 390 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
391 | 391 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
392 | 392 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
393 | 393 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
394 | 394 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
395 | 395 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
396 | 396 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
397 | 397 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
398 | 398 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
399 | 399 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
400 | 400 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
401 | 401 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
402 | 402 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
403 | 403 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
404 | 404 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
405 | 405 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
406 | 406 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
407 | 407 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
408 | 408 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
409 | 409 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
410 | 410 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
411 | 411 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
412 | 412 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
413 | 413 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
414 | 414 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
415 | 415 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
416 | 416 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
417 | 417 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
418 | 418 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
419 | 419 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
420 | 420 | "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest |
|
421 | 421 | "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest |
|
422 | 422 | "GET /?cmd=lookup HTTP/1.1" 401 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest |
|
423 | 423 | "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest |
|
424 | 424 | "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest |
|
425 | 425 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest |
|
426 | 426 | "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest |
|
427 | 427 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest |
|
428 | 428 | "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !) |
|
429 | 429 | "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !) |
|
430 | 430 | "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
431 | 431 | "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !) |
|
432 | 432 | "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !) |
|
433 | 433 | "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !) |
|
434 | 434 | "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
435 | 435 | "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
436 | 436 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
437 | 437 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
438 | 438 | "GET /?cmd=capabilities HTTP/1.1" 403 - |
|
439 | 439 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
440 | 440 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
441 | 441 | "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
442 | 442 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
443 | 443 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
444 | 444 | "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
445 | 445 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
446 | 446 | "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob) |
|
447 | 447 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
448 | 448 | "GET /?cmd=capabilities HTTP/1.1" 401 - |
|
449 | 449 | "GET /?cmd=capabilities HTTP/1.1" 200 - |
|
450 | 450 | "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
451 | 451 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
452 | 452 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
453 | 453 | "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
454 | 454 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
455 | 455 | "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
456 | 456 | "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull |
|
457 | 457 | |
|
458 | 458 | $ cd .. |
|
459 | 459 | |
|
460 | 460 | clone of serve with repo in root and unserved subrepo (issue2970) |
|
461 | 461 | |
|
462 | 462 | $ hg --cwd test init sub |
|
463 | 463 | $ echo empty > test/sub/empty |
|
464 | 464 | $ hg --cwd test/sub add empty |
|
465 | 465 | $ hg --cwd test/sub commit -qm 'add empty' |
|
466 | 466 | $ hg --cwd test/sub tag -r 0 something |
|
467 | 467 | $ echo sub = sub > test/.hgsub |
|
468 | 468 | $ hg --cwd test add .hgsub |
|
469 | 469 | $ hg --cwd test commit -qm 'add subrepo' |
|
470 | 470 | $ hg clone http://localhost:$HGPORT noslash-clone |
|
471 | 471 | requesting all changes |
|
472 | 472 | adding changesets |
|
473 | 473 | adding manifests |
|
474 | 474 | adding file changes |
|
475 | 475 | added 3 changesets with 7 changes to 7 files |
|
476 | 476 | new changesets 8b6053c928fe:56f9bc90cce6 |
|
477 | 477 | updating to branch default |
|
478 | 478 | cloning subrepo sub from http://localhost:$HGPORT/sub |
|
479 | 479 | abort: HTTP Error 404: Not Found |
|
480 | 480 | [100] |
|
481 | 481 | $ hg clone http://localhost:$HGPORT/ slash-clone |
|
482 | 482 | requesting all changes |
|
483 | 483 | adding changesets |
|
484 | 484 | adding manifests |
|
485 | 485 | adding file changes |
|
486 | 486 | added 3 changesets with 7 changes to 7 files |
|
487 | 487 | new changesets 8b6053c928fe:56f9bc90cce6 |
|
488 | 488 | updating to branch default |
|
489 | 489 | cloning subrepo sub from http://localhost:$HGPORT/sub |
|
490 | 490 | abort: HTTP Error 404: Not Found |
|
491 | 491 | [100] |
|
492 | 492 | |
|
493 | 493 | check error log |
|
494 | 494 | |
|
495 | 495 | $ cat error.log |
|
496 | 496 | |
|
497 | 497 | $ cat errors2.log |
|
498 | 498 | |
|
499 | 499 | check abort error reporting while pulling/cloning |
|
500 | 500 | |
|
501 | 501 | $ $RUNTESTDIR/killdaemons.py |
|
502 | 502 | $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py |
|
503 | 503 | $ cat hg3.pid >> $DAEMON_PIDS |
|
504 | 504 | $ hg clone http://localhost:$HGPORT/ abort-clone |
|
505 | 505 | requesting all changes |
|
506 | 506 | remote: abort: this is an exercise |
|
507 | 507 | abort: pull failed on remote |
|
508 | 508 | [100] |
|
509 | 509 | $ cat error.log |
|
510 | 510 | |
|
511 | 511 | disable pull-based clones |
|
512 | 512 | |
|
513 | 513 | $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True |
|
514 | 514 | $ cat hg4.pid >> $DAEMON_PIDS |
|
515 | 515 | $ hg clone http://localhost:$HGPORT1/ disable-pull-clone |
|
516 | 516 | requesting all changes |
|
517 | 517 | remote: abort: server has pull-based clones disabled |
|
518 | 518 | abort: pull failed on remote |
|
519 | 519 | (remove --pull if specified or upgrade Mercurial) |
|
520 | 520 | [100] |
|
521 | 521 | |
|
522 | 522 | #if no-reposimplestore |
|
523 | 523 | ... but keep stream clones working |
|
524 | 524 | |
|
525 | 525 | $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone |
|
526 | 526 | streaming all changes |
|
527 | 527 | * files to transfer, * of data (glob) |
|
528 | 528 | transferred * in * seconds (*/sec) (glob) |
|
529 | 529 | $ cat error.log |
|
530 | 530 | #endif |
|
531 | 531 | |
|
532 | 532 | ... and also keep partial clones and pulls working |
|
533 | 533 | $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone |
|
534 | 534 | adding changesets |
|
535 | 535 | adding manifests |
|
536 | 536 | adding file changes |
|
537 | 537 | added 1 changesets with 4 changes to 4 files |
|
538 | 538 | new changesets 8b6053c928fe |
|
539 | 539 | updating to branch default |
|
540 | 540 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
541 | 541 | $ hg pull -R test/partial/clone |
|
542 | 542 | pulling from http://localhost:$HGPORT1/ |
|
543 | 543 | searching for changes |
|
544 | 544 | adding changesets |
|
545 | 545 | adding manifests |
|
546 | 546 | adding file changes |
|
547 | 547 | added 2 changesets with 3 changes to 3 files |
|
548 | 548 | new changesets 5fed3813f7f5:56f9bc90cce6 |
|
549 | 549 | (run 'hg update' to get a working copy) |
|
550 | 550 | |
|
551 | 551 | $ hg clone -U -r 0 test/partial/clone test/another/clone |
|
552 | 552 | adding changesets |
|
553 | 553 | adding manifests |
|
554 | 554 | adding file changes |
|
555 | 555 | added 1 changesets with 4 changes to 4 files |
|
556 | 556 | new changesets 8b6053c928fe |
|
557 | 557 | |
|
558 | 558 | corrupt cookies file should yield a warning |
|
559 | 559 | |
|
560 | 560 | $ cat > $TESTTMP/cookies.txt << EOF |
|
561 | 561 | > bad format |
|
562 | 562 | > EOF |
|
563 | 563 | |
|
564 | 564 | $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/ |
|
565 | 565 | (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob) |
|
566 | 566 | 56f9bc90cce6 |
|
567 | 567 | |
|
568 | 568 | $ killdaemons.py |
|
569 | 569 | |
|
570 | 570 | Create dummy authentication handler that looks for cookies. It doesn't do anything |
|
571 | 571 | useful. It just raises an HTTP 500 with details about the Cookie request header. |
|
572 | 572 | We raise HTTP 500 because its message is printed in the abort message. |
|
573 | 573 | |
|
574 | 574 | $ cat > cookieauth.py << EOF |
|
575 | 575 | > from mercurial import util |
|
576 | 576 | > from mercurial.hgweb import common |
|
577 | 577 | > def perform_authentication(hgweb, req, op): |
|
578 | 578 | > cookie = req.headers.get(b'Cookie') |
|
579 | 579 | > if not cookie: |
|
580 | 580 | > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie') |
|
581 | 581 | > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie) |
|
582 | 582 | > def extsetup(ui): |
|
583 | 583 | > common.permhooks.insert(0, perform_authentication) |
|
584 | 584 | > EOF |
|
585 | 585 | |
|
586 | 586 | $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid |
|
587 | 587 | $ cat pid > $DAEMON_PIDS |
|
588 | 588 | |
|
589 | 589 | Request without cookie sent should fail due to lack of cookie |
|
590 | 590 | |
|
591 | 591 | $ hg id http://localhost:$HGPORT |
|
592 | 592 | abort: HTTP Error 500: no-cookie |
|
593 | 593 | [100] |
|
594 | 594 | |
|
595 | 595 | Populate a cookies file |
|
596 | 596 | |
|
597 | 597 | $ cat > cookies.txt << EOF |
|
598 | 598 | > # HTTP Cookie File |
|
599 | 599 | > # Expiration is 2030-01-01 at midnight |
|
600 | 600 | > .example.com TRUE / FALSE 1893456000 hgkey examplevalue |
|
601 | 601 | > EOF |
|
602 | 602 | |
|
603 | 603 | Should not send a cookie for another domain |
|
604 | 604 | |
|
605 | 605 | $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/ |
|
606 | 606 | abort: HTTP Error 500: no-cookie |
|
607 | 607 | [100] |
|
608 | 608 | |
|
609 | 609 | Add a cookie entry for our test server and verify it is sent |
|
610 | 610 | |
|
611 | 611 | $ cat >> cookies.txt << EOF |
|
612 | 612 | > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue |
|
613 | 613 | > EOF |
|
614 | 614 | |
|
615 | 615 | $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/ |
|
616 | 616 | abort: HTTP Error 500: Cookie: hgkey=localhostvalue |
|
617 | 617 | [100] |
@@ -1,452 +1,452 | |||
|
1 | 1 | #testcases sshv1 sshv2 |
|
2 | 2 | |
|
3 | 3 | #if sshv2 |
|
4 | 4 | $ cat >> $HGRCPATH << EOF |
|
5 | 5 | > [experimental] |
|
6 | 6 | > sshpeer.advertise-v2 = true |
|
7 | 7 | > sshserver.support-v2 = true |
|
8 | 8 | > EOF |
|
9 | 9 | #endif |
|
10 | 10 | |
|
11 | 11 | This file contains testcases that tend to be related to the wire protocol part |
|
12 | 12 | of largefiles. |
|
13 | 13 | |
|
14 | 14 | $ USERCACHE="$TESTTMP/cache"; export USERCACHE |
|
15 | 15 | $ mkdir "${USERCACHE}" |
|
16 | 16 | $ cat >> $HGRCPATH <<EOF |
|
17 | 17 | > [extensions] |
|
18 | 18 | > largefiles= |
|
19 | 19 | > purge= |
|
20 | 20 | > rebase= |
|
21 | 21 | > transplant= |
|
22 | 22 | > [phases] |
|
23 | 23 | > publish=False |
|
24 | 24 | > [largefiles] |
|
25 | 25 | > minsize=2 |
|
26 | 26 | > patterns=glob:**.dat |
|
27 | 27 | > usercache=${USERCACHE} |
|
28 | 28 | > [web] |
|
29 | 29 | > allow-archive = zip |
|
30 | 30 | > [hooks] |
|
31 | 31 | > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status" |
|
32 | 32 | > EOF |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | #if serve |
|
36 | 36 | vanilla clients not locked out from largefiles servers on vanilla repos |
|
37 | 37 | $ mkdir r1 |
|
38 | 38 | $ cd r1 |
|
39 | 39 | $ hg init |
|
40 | 40 | $ echo c1 > f1 |
|
41 | 41 | $ hg add f1 |
|
42 | 42 | $ hg commit -m "m1" |
|
43 | 43 | Invoking status precommit hook |
|
44 | 44 | A f1 |
|
45 | 45 | $ cd .. |
|
46 | 46 | $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid |
|
47 | 47 | $ cat hg.pid >> $DAEMON_PIDS |
|
48 | 48 | $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2 |
|
49 | 49 | requesting all changes |
|
50 | 50 | adding changesets |
|
51 | 51 | adding manifests |
|
52 | 52 | adding file changes |
|
53 | 53 | added 1 changesets with 1 changes to 1 files |
|
54 | 54 | new changesets b6eb3a2e2efe (1 drafts) |
|
55 | 55 | updating to branch default |
|
56 | 56 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
57 | 57 | |
|
58 | 58 | largefiles clients still work with vanilla servers |
|
59 | 59 | $ hg serve --config extensions.largefiles=! -R r1 -d -p $HGPORT1 --pid-file hg.pid |
|
60 | 60 | $ cat hg.pid >> $DAEMON_PIDS |
|
61 | 61 | $ hg clone http://localhost:$HGPORT1 r3 |
|
62 | 62 | requesting all changes |
|
63 | 63 | adding changesets |
|
64 | 64 | adding manifests |
|
65 | 65 | adding file changes |
|
66 | 66 | added 1 changesets with 1 changes to 1 files |
|
67 | 67 | new changesets b6eb3a2e2efe (1 drafts) |
|
68 | 68 | updating to branch default |
|
69 | 69 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
70 | 70 | #endif |
|
71 | 71 | |
|
72 | 72 | vanilla clients locked out from largefiles http repos |
|
73 | 73 | $ mkdir r4 |
|
74 | 74 | $ cd r4 |
|
75 | 75 | $ hg init |
|
76 | 76 | $ echo c1 > f1 |
|
77 | 77 | $ hg add --large f1 |
|
78 | 78 | $ hg commit -m "m1" |
|
79 | 79 | Invoking status precommit hook |
|
80 | 80 | A f1 |
|
81 | 81 | $ cd .. |
|
82 | 82 | |
|
83 | 83 | largefiles can be pushed locally (issue3583) |
|
84 | 84 | $ hg init dest |
|
85 | 85 | $ cd r4 |
|
86 | 86 | $ hg outgoing ../dest |
|
87 | 87 | comparing with ../dest |
|
88 | 88 | searching for changes |
|
89 | 89 | changeset: 0:639881c12b4c |
|
90 | 90 | tag: tip |
|
91 | 91 | user: test |
|
92 | 92 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
93 | 93 | summary: m1 |
|
94 | 94 | |
|
95 | 95 | $ hg push ../dest |
|
96 | 96 | pushing to ../dest |
|
97 | 97 | searching for changes |
|
98 | 98 | adding changesets |
|
99 | 99 | adding manifests |
|
100 | 100 | adding file changes |
|
101 | 101 | added 1 changesets with 1 changes to 1 files |
|
102 | 102 | |
|
103 | 103 | exit code with nothing outgoing (issue3611) |
|
104 | 104 | $ hg outgoing ../dest |
|
105 | 105 | comparing with ../dest |
|
106 | 106 | searching for changes |
|
107 | 107 | no changes found |
|
108 | 108 | [1] |
|
109 | 109 | $ cd .. |
|
110 | 110 | |
|
111 | 111 | #if serve |
|
112 | 112 | $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid |
|
113 | 113 | $ cat hg.pid >> $DAEMON_PIDS |
|
114 | 114 | $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5 |
|
115 | 115 | abort: remote error: |
|
116 | 116 | |
|
117 | 117 | This repository uses the largefiles extension. |
|
118 | 118 | |
|
119 | 119 | Please enable it in your Mercurial config file. |
|
120 | 120 | [100] |
|
121 | 121 | |
|
122 | 122 | used all HGPORTs, kill all daemons |
|
123 | 123 | $ killdaemons.py |
|
124 | 124 | #endif |
|
125 | 125 | |
|
126 | 126 | vanilla clients locked out from largefiles ssh repos |
|
127 | 127 | $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5 |
|
128 | 128 | remote: |
|
129 | 129 | remote: This repository uses the largefiles extension. |
|
130 | 130 | remote: |
|
131 | 131 | remote: Please enable it in your Mercurial config file. |
|
132 | 132 | remote: |
|
133 | 133 | remote: - |
|
134 | 134 | abort: remote error |
|
135 | 135 | (check previous remote output) |
|
136 | 136 | [100] |
|
137 | 137 | |
|
138 | 138 | #if serve |
|
139 | 139 | |
|
140 | 140 | largefiles clients refuse to push largefiles repos to vanilla servers |
|
141 | 141 | $ mkdir r6 |
|
142 | 142 | $ cd r6 |
|
143 | 143 | $ hg init |
|
144 | 144 | $ echo c1 > f1 |
|
145 | 145 | $ hg add f1 |
|
146 | 146 | $ hg commit -m "m1" |
|
147 | 147 | Invoking status precommit hook |
|
148 | 148 | A f1 |
|
149 | 149 | $ cat >> .hg/hgrc <<! |
|
150 | 150 | > [web] |
|
151 | 151 | > push_ssl = false |
|
152 | 152 | > allow_push = * |
|
153 | 153 | > ! |
|
154 | 154 | $ cd .. |
|
155 | 155 | $ hg clone r6 r7 |
|
156 | 156 | updating to branch default |
|
157 | 157 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
158 | 158 | $ cd r7 |
|
159 | 159 | $ echo c2 > f2 |
|
160 | 160 | $ hg add --large f2 |
|
161 | 161 | $ hg commit -m "m2" |
|
162 | 162 | Invoking status precommit hook |
|
163 | 163 | A f2 |
|
164 | 164 | $ hg verify --large |
|
165 | 165 | checking changesets |
|
166 | 166 | checking manifests |
|
167 | 167 | crosschecking files in changesets and manifests |
|
168 | 168 | checking files |
|
169 | 169 | checked 2 changesets with 2 changes to 2 files |
|
170 | 170 | searching 1 changesets for largefiles |
|
171 | 171 | verified existence of 1 revisions of 1 largefiles |
|
172 | 172 | $ hg serve --config extensions.largefiles=! -R ../r6 -d -p $HGPORT --pid-file ../hg.pid |
|
173 | 173 | $ cat ../hg.pid >> $DAEMON_PIDS |
|
174 | 174 | $ hg push http://localhost:$HGPORT |
|
175 | 175 | pushing to http://localhost:$HGPORT/ |
|
176 | 176 | searching for changes |
|
177 | 177 | abort: http://localhost:$HGPORT/ does not appear to be a largefile store |
|
178 | 178 | [255] |
|
179 | 179 | $ cd .. |
|
180 | 180 | |
|
181 | 181 | putlfile errors are shown (issue3123) |
|
182 | 182 | Corrupt the cached largefile in r7 and move it out of the servers usercache |
|
183 | 183 | $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 . |
|
184 | 184 | $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 |
|
185 | 185 | $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8" |
|
186 | 186 | $ hg init empty |
|
187 | 187 | $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \ |
|
188 | 188 | > --config 'web.allow_push=*' --config web.push_ssl=False |
|
189 | 189 | $ cat hg.pid >> $DAEMON_PIDS |
|
190 | 190 | $ hg push -R r7 http://localhost:$HGPORT1 |
|
191 | 191 | pushing to http://localhost:$HGPORT1/ |
|
192 | 192 | searching for changes |
|
193 | 193 | remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash |
|
194 | 194 | abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/ |
|
195 | 195 | [255] |
|
196 | 196 | $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 |
|
197 | 197 | Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic |
|
198 | 198 | $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 |
|
199 | 199 | $ hg push -R r7 http://localhost:$HGPORT1 |
|
200 | 200 | pushing to http://localhost:$HGPORT1/ |
|
201 | 201 | searching for changes |
|
202 | 202 | remote: adding changesets |
|
203 | 203 | remote: adding manifests |
|
204 | 204 | remote: adding file changes |
|
205 | 205 | remote: added 2 changesets with 2 changes to 2 files |
|
206 | 206 | $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 |
|
207 | 207 | server side corruption |
|
208 | 208 | $ rm -rf empty |
|
209 | 209 | |
|
210 | 210 | Push a largefiles repository to a served empty repository |
|
211 | 211 | $ hg init r8 |
|
212 | 212 | $ echo c3 > r8/f1 |
|
213 | 213 | $ hg add --large r8/f1 -R r8 |
|
214 | 214 | $ hg commit -m "m1" -R r8 |
|
215 | 215 | Invoking status precommit hook |
|
216 | 216 | A f1 |
|
217 | 217 | $ hg init empty |
|
218 | 218 | $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \ |
|
219 | 219 | > --config 'web.allow_push=*' --config web.push_ssl=False |
|
220 | 220 | $ cat hg.pid >> $DAEMON_PIDS |
|
221 | 221 | $ rm "${USERCACHE}"/* |
|
222 | 222 | $ hg push -R r8 http://localhost:$HGPORT2/#default |
|
223 | 223 | pushing to http://localhost:$HGPORT2/ |
|
224 | 224 | searching for changes |
|
225 | 225 | remote: adding changesets |
|
226 | 226 | remote: adding manifests |
|
227 | 227 | remote: adding file changes |
|
228 | 228 | remote: added 1 changesets with 1 changes to 1 files |
|
229 | 229 | $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ] |
|
230 | 230 | $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ] |
|
231 | 231 | |
|
232 | 232 | Clone over http, no largefiles pulled on clone. |
|
233 | 233 | |
|
234 | 234 | $ hg clone http://localhost:$HGPORT2/#default http-clone -U |
|
235 | 235 | adding changesets |
|
236 | 236 | adding manifests |
|
237 | 237 | adding file changes |
|
238 | 238 | added 1 changesets with 1 changes to 1 files |
|
239 | 239 | new changesets cf03e5bb9936 (1 drafts) |
|
240 | 240 | |
|
241 | 241 | Archive contains largefiles |
|
242 | 242 | >>> import os |
|
243 | 243 | >>> from mercurial import urllibcompat |
|
244 | 244 | >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2'] |
|
245 | 245 | >>> with open('archive.zip', 'wb') as f: |
|
246 | 246 | ... f.write(urllibcompat.urlreq.urlopen(u).read()) and None |
|
247 | 247 | $ unzip -t archive.zip |
|
248 | 248 | Archive: archive.zip |
|
249 | 249 | testing: empty-default/.hg_archival.txt*OK (glob) |
|
250 | 250 | testing: empty-default/f1*OK (glob) |
|
251 | 251 | No errors detected in compressed data of archive.zip. |
|
252 | 252 | |
|
253 | 253 | test 'verify' with remotestore: |
|
254 | 254 | |
|
255 | 255 | $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 |
|
256 | 256 | $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 . |
|
257 | 257 | $ hg -R http-clone verify --large --lfa |
|
258 | 258 | checking changesets |
|
259 | 259 | checking manifests |
|
260 | 260 | crosschecking files in changesets and manifests |
|
261 | 261 | checking files |
|
262 | 262 | checked 1 changesets with 1 changes to 1 files |
|
263 | 263 | searching 1 changesets for largefiles |
|
264 | 264 | changeset 0:cf03e5bb9936: f1 missing |
|
265 | 265 | verified existence of 1 revisions of 1 largefiles |
|
266 | 266 | [1] |
|
267 | 267 | $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/ |
|
268 | 268 | $ hg -R http-clone -q verify --large --lfa |
|
269 | 269 | |
|
270 | 270 | largefiles pulled on update - a largefile missing on the server: |
|
271 | 271 | $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 . |
|
272 | 272 | $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache |
|
273 | 273 | getting changed largefiles |
|
274 | 274 | f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/ |
|
275 | 275 | 0 largefiles updated, 0 removed |
|
276 | 276 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
277 | 277 | $ hg -R http-clone st |
|
278 | 278 | ! f1 |
|
279 | 279 | $ hg -R http-clone up -Cqr null |
|
280 | 280 | |
|
281 | 281 | largefiles pulled on update - a largefile corrupted on the server: |
|
282 | 282 | $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 |
|
283 | 283 | $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache |
|
284 | 284 | getting changed largefiles |
|
285 | 285 | f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27) |
|
286 | 286 | 0 largefiles updated, 0 removed |
|
287 | 287 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
288 | 288 | $ hg -R http-clone st |
|
289 | 289 | ! f1 |
|
290 | 290 | $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ] |
|
291 | 291 | $ [ ! -f http-clone/f1 ] |
|
292 | 292 | $ [ ! -f http-clone-usercache ] |
|
293 | 293 | $ hg -R http-clone verify --large --lfc |
|
294 | 294 | checking changesets |
|
295 | 295 | checking manifests |
|
296 | 296 | crosschecking files in changesets and manifests |
|
297 | 297 | checking files |
|
298 | 298 | checked 1 changesets with 1 changes to 1 files |
|
299 | 299 | searching 1 changesets for largefiles |
|
300 | 300 | verified contents of 1 revisions of 1 largefiles |
|
301 | 301 | $ hg -R http-clone up -Cqr null |
|
302 | 302 | |
|
303 | 303 | largefiles pulled on update - no server side problems: |
|
304 | 304 | $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/ |
|
305 | 305 | $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true |
|
306 | 306 | resolving manifests |
|
307 | 307 | branchmerge: False, force: False, partial: False |
|
308 | 308 | ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936 |
|
309 | 309 | .hglf/f1: remote created -> g |
|
310 | 310 | getting .hglf/f1 |
|
311 | 311 | updating: .hglf/f1 1/1 files (100.00%) |
|
312 | 312 | getting changed largefiles |
|
313 | 313 | using http://localhost:$HGPORT2/ |
|
314 | 314 | sending capabilities command |
|
315 | 315 | sending statlfile command |
|
316 | 316 | getting largefiles: 0/1 files (0.00%) |
|
317 | 317 | getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90 |
|
318 | 318 | sending getlfile command |
|
319 | 319 | found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store |
|
320 | 320 | 1 largefiles updated, 0 removed |
|
321 | 321 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
322 | 322 | |
|
323 | 323 | $ ls http-clone-usercache/* |
|
324 | 324 | http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90 |
|
325 | 325 | |
|
326 | 326 | $ rm -rf empty http-clone* |
|
327 | 327 | |
|
328 | 328 | used all HGPORTs, kill all daemons |
|
329 | 329 | $ killdaemons.py |
|
330 | 330 | |
|
331 | 331 | largefiles should batch verify remote calls |
|
332 | 332 | |
|
333 | 333 | $ hg init batchverifymain |
|
334 | 334 | $ cd batchverifymain |
|
335 | 335 | $ echo "aaa" >> a |
|
336 | 336 | $ hg add --large a |
|
337 | 337 | $ hg commit -m "a" |
|
338 | 338 | Invoking status precommit hook |
|
339 | 339 | A a |
|
340 | 340 | $ echo "bbb" >> b |
|
341 | 341 | $ hg add --large b |
|
342 | 342 | $ hg commit -m "b" |
|
343 | 343 | Invoking status precommit hook |
|
344 | 344 | A b |
|
345 | 345 | $ cd .. |
|
346 | 346 | $ hg serve -R batchverifymain -d -p $HGPORT --pid-file hg.pid \ |
|
347 | 347 | > -A access.log |
|
348 | 348 | $ cat hg.pid >> $DAEMON_PIDS |
|
349 | 349 | $ hg clone --noupdate http://localhost:$HGPORT batchverifyclone |
|
350 | 350 | requesting all changes |
|
351 | 351 | adding changesets |
|
352 | 352 | adding manifests |
|
353 | 353 | adding file changes |
|
354 | 354 | added 2 changesets with 2 changes to 2 files |
|
355 | 355 | new changesets 567253b0f523:04d19c27a332 (2 drafts) |
|
356 | 356 | $ hg -R batchverifyclone verify --large --lfa |
|
357 | 357 | checking changesets |
|
358 | 358 | checking manifests |
|
359 | 359 | crosschecking files in changesets and manifests |
|
360 | 360 | checking files |
|
361 | 361 | checked 2 changesets with 2 changes to 2 files |
|
362 | 362 | searching 2 changesets for largefiles |
|
363 | 363 | verified existence of 2 revisions of 2 largefiles |
|
364 | 364 | $ tail -1 access.log |
|
365 | 365 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
366 | 366 | $ hg -R batchverifyclone update |
|
367 | 367 | getting changed largefiles |
|
368 | 368 | 2 largefiles updated, 0 removed |
|
369 | 369 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
370 | 370 | |
|
371 | 371 | Clear log file before next test |
|
372 | 372 | |
|
373 | 373 | $ printf "" > access.log |
|
374 | 374 | |
|
375 | 375 | Verify should check file on remote server only when file is not |
|
376 | 376 | available locally. |
|
377 | 377 | |
|
378 | 378 | $ echo "ccc" >> batchverifymain/c |
|
379 | 379 | $ hg -R batchverifymain status |
|
380 | 380 | ? c |
|
381 | 381 | $ hg -R batchverifymain add --large batchverifymain/c |
|
382 | 382 | $ hg -R batchverifymain commit -m "c" |
|
383 | 383 | Invoking status precommit hook |
|
384 | 384 | A c |
|
385 | 385 | $ hg -R batchverifyclone pull |
|
386 | 386 | pulling from http://localhost:$HGPORT/ |
|
387 | 387 | searching for changes |
|
388 | 388 | adding changesets |
|
389 | 389 | adding manifests |
|
390 | 390 | adding file changes |
|
391 | 391 | added 1 changesets with 1 changes to 1 files |
|
392 | 392 | new changesets 6bba8cb6935d (1 drafts) |
|
393 | 393 | (run 'hg update' to get a working copy) |
|
394 | 394 | $ hg -R batchverifyclone verify --lfa |
|
395 | 395 | checking changesets |
|
396 | 396 | checking manifests |
|
397 | 397 | crosschecking files in changesets and manifests |
|
398 | 398 | checking files |
|
399 | 399 | checked 3 changesets with 3 changes to 3 files |
|
400 | 400 | searching 3 changesets for largefiles |
|
401 | 401 | verified existence of 3 revisions of 3 largefiles |
|
402 | 402 | $ tail -1 access.log |
|
403 | 403 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=statlfile HTTP/1.1" 200 - x-hgarg-1:sha=c8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
404 | 404 | |
|
405 | 405 | $ killdaemons.py |
|
406 | 406 | |
|
407 | 407 | largefiles should not ask for password again after successful authorization |
|
408 | 408 | |
|
409 | 409 | $ hg init credentialmain |
|
410 | 410 | $ cd credentialmain |
|
411 | 411 | $ echo "aaa" >> a |
|
412 | 412 | $ hg add --large a |
|
413 | 413 | $ hg commit -m "a" |
|
414 | 414 | Invoking status precommit hook |
|
415 | 415 | A a |
|
416 | 416 | |
|
417 | 417 | Before running server clear the user cache to force clone to download |
|
418 | 418 | a large file from the server rather than to get it from the cache |
|
419 | 419 | |
|
420 | 420 | $ rm "${USERCACHE}"/* |
|
421 | 421 | |
|
422 | 422 | $ cd .. |
|
423 | 423 | |
|
424 | 424 | $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -R credentialmain \ |
|
425 | 425 | > -d -p $HGPORT --pid-file hg.pid -A access.log |
|
426 | 426 | $ cat hg.pid >> $DAEMON_PIDS |
|
427 | 427 | $ cat << EOF > get_pass.py |
|
428 | > import getpass | |
|
429 |
> def newgetpass( |
|
|
428 | > from mercurial import util | |
|
429 | > def newgetpass(): | |
|
430 | 430 | > return "pass" |
|
431 |
> |
|
|
431 | > util.get_password = newgetpass | |
|
432 | 432 | > EOF |
|
433 | 433 | $ hg clone --config ui.interactive=true --config extensions.getpass=get_pass.py \ |
|
434 | 434 | > http://user@localhost:$HGPORT credentialclone |
|
435 | 435 | http authorization required for http://localhost:$HGPORT/ |
|
436 | 436 | realm: mercurial |
|
437 | 437 | user: user |
|
438 | 438 | password: requesting all changes |
|
439 | 439 | adding changesets |
|
440 | 440 | adding manifests |
|
441 | 441 | adding file changes |
|
442 | 442 | added 1 changesets with 1 changes to 1 files |
|
443 | 443 | new changesets 567253b0f523 (1 drafts) |
|
444 | 444 | updating to branch default |
|
445 | 445 | getting changed largefiles |
|
446 | 446 | 1 largefiles updated, 0 removed |
|
447 | 447 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
448 | 448 | |
|
449 | 449 | $ killdaemons.py |
|
450 | 450 | $ rm hg.pid access.log |
|
451 | 451 | |
|
452 | 452 | #endif |
@@ -1,883 +1,900 | |||
|
1 | 1 | #testcases dirstate-v1 dirstate-v1-tree dirstate-v2 |
|
2 | 2 | |
|
3 | 3 | #if no-rust |
|
4 | 4 | $ hg init repo0 --config format.exp-dirstate-v2=1 |
|
5 | 5 | abort: dirstate v2 format requested by config but not supported (requires Rust extensions) |
|
6 | 6 | [255] |
|
7 | 7 | #endif |
|
8 | 8 | |
|
9 | 9 | #if dirstate-v1-tree |
|
10 | 10 | #require rust |
|
11 | 11 | $ echo '[experimental]' >> $HGRCPATH |
|
12 | 12 | $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH |
|
13 | 13 | #endif |
|
14 | 14 | |
|
15 | 15 | #if dirstate-v2 |
|
16 | 16 | #require rust |
|
17 | 17 | $ echo '[format]' >> $HGRCPATH |
|
18 | 18 | $ echo 'exp-dirstate-v2=1' >> $HGRCPATH |
|
19 | 19 | #endif |
|
20 | 20 | |
|
21 | 21 | $ hg init repo1 |
|
22 | 22 | $ cd repo1 |
|
23 | 23 | $ mkdir a b a/1 b/1 b/2 |
|
24 | 24 | $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2 |
|
25 | 25 | |
|
26 | 26 | hg status in repo root: |
|
27 | 27 | |
|
28 | 28 | $ hg status |
|
29 | 29 | ? a/1/in_a_1 |
|
30 | 30 | ? a/in_a |
|
31 | 31 | ? b/1/in_b_1 |
|
32 | 32 | ? b/2/in_b_2 |
|
33 | 33 | ? b/in_b |
|
34 | 34 | ? in_root |
|
35 | 35 | |
|
36 | 36 | hg status . in repo root: |
|
37 | 37 | |
|
38 | 38 | $ hg status . |
|
39 | 39 | ? a/1/in_a_1 |
|
40 | 40 | ? a/in_a |
|
41 | 41 | ? b/1/in_b_1 |
|
42 | 42 | ? b/2/in_b_2 |
|
43 | 43 | ? b/in_b |
|
44 | 44 | ? in_root |
|
45 | 45 | |
|
46 | 46 | $ hg status --cwd a |
|
47 | 47 | ? a/1/in_a_1 |
|
48 | 48 | ? a/in_a |
|
49 | 49 | ? b/1/in_b_1 |
|
50 | 50 | ? b/2/in_b_2 |
|
51 | 51 | ? b/in_b |
|
52 | 52 | ? in_root |
|
53 | 53 | $ hg status --cwd a . |
|
54 | 54 | ? 1/in_a_1 |
|
55 | 55 | ? in_a |
|
56 | 56 | $ hg status --cwd a .. |
|
57 | 57 | ? 1/in_a_1 |
|
58 | 58 | ? in_a |
|
59 | 59 | ? ../b/1/in_b_1 |
|
60 | 60 | ? ../b/2/in_b_2 |
|
61 | 61 | ? ../b/in_b |
|
62 | 62 | ? ../in_root |
|
63 | 63 | |
|
64 | 64 | $ hg status --cwd b |
|
65 | 65 | ? a/1/in_a_1 |
|
66 | 66 | ? a/in_a |
|
67 | 67 | ? b/1/in_b_1 |
|
68 | 68 | ? b/2/in_b_2 |
|
69 | 69 | ? b/in_b |
|
70 | 70 | ? in_root |
|
71 | 71 | $ hg status --cwd b . |
|
72 | 72 | ? 1/in_b_1 |
|
73 | 73 | ? 2/in_b_2 |
|
74 | 74 | ? in_b |
|
75 | 75 | $ hg status --cwd b .. |
|
76 | 76 | ? ../a/1/in_a_1 |
|
77 | 77 | ? ../a/in_a |
|
78 | 78 | ? 1/in_b_1 |
|
79 | 79 | ? 2/in_b_2 |
|
80 | 80 | ? in_b |
|
81 | 81 | ? ../in_root |
|
82 | 82 | |
|
83 | 83 | $ hg status --cwd a/1 |
|
84 | 84 | ? a/1/in_a_1 |
|
85 | 85 | ? a/in_a |
|
86 | 86 | ? b/1/in_b_1 |
|
87 | 87 | ? b/2/in_b_2 |
|
88 | 88 | ? b/in_b |
|
89 | 89 | ? in_root |
|
90 | 90 | $ hg status --cwd a/1 . |
|
91 | 91 | ? in_a_1 |
|
92 | 92 | $ hg status --cwd a/1 .. |
|
93 | 93 | ? in_a_1 |
|
94 | 94 | ? ../in_a |
|
95 | 95 | |
|
96 | 96 | $ hg status --cwd b/1 |
|
97 | 97 | ? a/1/in_a_1 |
|
98 | 98 | ? a/in_a |
|
99 | 99 | ? b/1/in_b_1 |
|
100 | 100 | ? b/2/in_b_2 |
|
101 | 101 | ? b/in_b |
|
102 | 102 | ? in_root |
|
103 | 103 | $ hg status --cwd b/1 . |
|
104 | 104 | ? in_b_1 |
|
105 | 105 | $ hg status --cwd b/1 .. |
|
106 | 106 | ? in_b_1 |
|
107 | 107 | ? ../2/in_b_2 |
|
108 | 108 | ? ../in_b |
|
109 | 109 | |
|
110 | 110 | $ hg status --cwd b/2 |
|
111 | 111 | ? a/1/in_a_1 |
|
112 | 112 | ? a/in_a |
|
113 | 113 | ? b/1/in_b_1 |
|
114 | 114 | ? b/2/in_b_2 |
|
115 | 115 | ? b/in_b |
|
116 | 116 | ? in_root |
|
117 | 117 | $ hg status --cwd b/2 . |
|
118 | 118 | ? in_b_2 |
|
119 | 119 | $ hg status --cwd b/2 .. |
|
120 | 120 | ? ../1/in_b_1 |
|
121 | 121 | ? in_b_2 |
|
122 | 122 | ? ../in_b |
|
123 | 123 | |
|
124 | 124 | combining patterns with root and patterns without a root works |
|
125 | 125 | |
|
126 | 126 | $ hg st a/in_a re:.*b$ |
|
127 | 127 | ? a/in_a |
|
128 | 128 | ? b/in_b |
|
129 | 129 | |
|
130 | 130 | tweaking defaults works |
|
131 | 131 | $ hg status --cwd a --config ui.tweakdefaults=yes |
|
132 | 132 | ? 1/in_a_1 |
|
133 | 133 | ? in_a |
|
134 | 134 | ? ../b/1/in_b_1 |
|
135 | 135 | ? ../b/2/in_b_2 |
|
136 | 136 | ? ../b/in_b |
|
137 | 137 | ? ../in_root |
|
138 | 138 | $ HGPLAIN=1 hg status --cwd a --config ui.tweakdefaults=yes |
|
139 | 139 | ? a/1/in_a_1 (glob) |
|
140 | 140 | ? a/in_a (glob) |
|
141 | 141 | ? b/1/in_b_1 (glob) |
|
142 | 142 | ? b/2/in_b_2 (glob) |
|
143 | 143 | ? b/in_b (glob) |
|
144 | 144 | ? in_root |
|
145 | 145 | $ HGPLAINEXCEPT=tweakdefaults hg status --cwd a --config ui.tweakdefaults=yes |
|
146 | 146 | ? 1/in_a_1 |
|
147 | 147 | ? in_a |
|
148 | 148 | ? ../b/1/in_b_1 |
|
149 | 149 | ? ../b/2/in_b_2 |
|
150 | 150 | ? ../b/in_b |
|
151 | 151 | ? ../in_root (glob) |
|
152 | 152 | |
|
153 | 153 | relative paths can be requested |
|
154 | 154 | |
|
155 | 155 | $ hg status --cwd a --config ui.relative-paths=yes |
|
156 | 156 | ? 1/in_a_1 |
|
157 | 157 | ? in_a |
|
158 | 158 | ? ../b/1/in_b_1 |
|
159 | 159 | ? ../b/2/in_b_2 |
|
160 | 160 | ? ../b/in_b |
|
161 | 161 | ? ../in_root |
|
162 | 162 | |
|
163 | 163 | $ hg status --cwd a . --config ui.relative-paths=legacy |
|
164 | 164 | ? 1/in_a_1 |
|
165 | 165 | ? in_a |
|
166 | 166 | $ hg status --cwd a . --config ui.relative-paths=no |
|
167 | 167 | ? a/1/in_a_1 |
|
168 | 168 | ? a/in_a |
|
169 | 169 | |
|
170 | 170 | commands.status.relative overrides ui.relative-paths |
|
171 | 171 | |
|
172 | 172 | $ cat >> $HGRCPATH <<EOF |
|
173 | 173 | > [ui] |
|
174 | 174 | > relative-paths = False |
|
175 | 175 | > [commands] |
|
176 | 176 | > status.relative = True |
|
177 | 177 | > EOF |
|
178 | 178 | $ hg status --cwd a |
|
179 | 179 | ? 1/in_a_1 |
|
180 | 180 | ? in_a |
|
181 | 181 | ? ../b/1/in_b_1 |
|
182 | 182 | ? ../b/2/in_b_2 |
|
183 | 183 | ? ../b/in_b |
|
184 | 184 | ? ../in_root |
|
185 | 185 | $ HGPLAIN=1 hg status --cwd a |
|
186 | 186 | ? a/1/in_a_1 (glob) |
|
187 | 187 | ? a/in_a (glob) |
|
188 | 188 | ? b/1/in_b_1 (glob) |
|
189 | 189 | ? b/2/in_b_2 (glob) |
|
190 | 190 | ? b/in_b (glob) |
|
191 | 191 | ? in_root |
|
192 | 192 | |
|
193 | 193 | if relative paths are explicitly off, tweakdefaults doesn't change it |
|
194 | 194 | $ cat >> $HGRCPATH <<EOF |
|
195 | 195 | > [commands] |
|
196 | 196 | > status.relative = False |
|
197 | 197 | > EOF |
|
198 | 198 | $ hg status --cwd a --config ui.tweakdefaults=yes |
|
199 | 199 | ? a/1/in_a_1 |
|
200 | 200 | ? a/in_a |
|
201 | 201 | ? b/1/in_b_1 |
|
202 | 202 | ? b/2/in_b_2 |
|
203 | 203 | ? b/in_b |
|
204 | 204 | ? in_root |
|
205 | 205 | |
|
206 | 206 | $ cd .. |
|
207 | 207 | |
|
208 | 208 | $ hg init repo2 |
|
209 | 209 | $ cd repo2 |
|
210 | 210 | $ touch modified removed deleted ignored |
|
211 | 211 | $ echo "^ignored$" > .hgignore |
|
212 | 212 | $ hg ci -A -m 'initial checkin' |
|
213 | 213 | adding .hgignore |
|
214 | 214 | adding deleted |
|
215 | 215 | adding modified |
|
216 | 216 | adding removed |
|
217 | 217 | $ touch modified added unknown ignored |
|
218 | 218 | $ hg add added |
|
219 | 219 | $ hg remove removed |
|
220 | 220 | $ rm deleted |
|
221 | 221 | |
|
222 | 222 | hg status: |
|
223 | 223 | |
|
224 | 224 | $ hg status |
|
225 | 225 | A added |
|
226 | 226 | R removed |
|
227 | 227 | ! deleted |
|
228 | 228 | ? unknown |
|
229 | 229 | |
|
230 | 230 | hg status modified added removed deleted unknown never-existed ignored: |
|
231 | 231 | |
|
232 | 232 | $ hg status modified added removed deleted unknown never-existed ignored |
|
233 | 233 | never-existed: * (glob) |
|
234 | 234 | A added |
|
235 | 235 | R removed |
|
236 | 236 | ! deleted |
|
237 | 237 | ? unknown |
|
238 | 238 | |
|
239 | 239 | $ hg copy modified copied |
|
240 | 240 | |
|
241 | 241 | hg status -C: |
|
242 | 242 | |
|
243 | 243 | $ hg status -C |
|
244 | 244 | A added |
|
245 | 245 | A copied |
|
246 | 246 | modified |
|
247 | 247 | R removed |
|
248 | 248 | ! deleted |
|
249 | 249 | ? unknown |
|
250 | 250 | |
|
251 | 251 | hg status -A: |
|
252 | 252 | |
|
253 | 253 | $ hg status -A |
|
254 | 254 | A added |
|
255 | 255 | A copied |
|
256 | 256 | modified |
|
257 | 257 | R removed |
|
258 | 258 | ! deleted |
|
259 | 259 | ? unknown |
|
260 | 260 | I ignored |
|
261 | 261 | C .hgignore |
|
262 | 262 | C modified |
|
263 | 263 | |
|
264 | 264 | $ hg status -A -T '{status} {path} {node|shortest}\n' |
|
265 | 265 | A added ffff |
|
266 | 266 | A copied ffff |
|
267 | 267 | R removed ffff |
|
268 | 268 | ! deleted ffff |
|
269 | 269 | ? unknown ffff |
|
270 | 270 | I ignored ffff |
|
271 | 271 | C .hgignore ffff |
|
272 | 272 | C modified ffff |
|
273 | 273 | |
|
274 | 274 | $ hg status -A -Tjson |
|
275 | 275 | [ |
|
276 | 276 | { |
|
277 | 277 | "itemtype": "file", |
|
278 | 278 | "path": "added", |
|
279 | 279 | "status": "A" |
|
280 | 280 | }, |
|
281 | 281 | { |
|
282 | 282 | "itemtype": "file", |
|
283 | 283 | "path": "copied", |
|
284 | 284 | "source": "modified", |
|
285 | 285 | "status": "A" |
|
286 | 286 | }, |
|
287 | 287 | { |
|
288 | 288 | "itemtype": "file", |
|
289 | 289 | "path": "removed", |
|
290 | 290 | "status": "R" |
|
291 | 291 | }, |
|
292 | 292 | { |
|
293 | 293 | "itemtype": "file", |
|
294 | 294 | "path": "deleted", |
|
295 | 295 | "status": "!" |
|
296 | 296 | }, |
|
297 | 297 | { |
|
298 | 298 | "itemtype": "file", |
|
299 | 299 | "path": "unknown", |
|
300 | 300 | "status": "?" |
|
301 | 301 | }, |
|
302 | 302 | { |
|
303 | 303 | "itemtype": "file", |
|
304 | 304 | "path": "ignored", |
|
305 | 305 | "status": "I" |
|
306 | 306 | }, |
|
307 | 307 | { |
|
308 | 308 | "itemtype": "file", |
|
309 | 309 | "path": ".hgignore", |
|
310 | 310 | "status": "C" |
|
311 | 311 | }, |
|
312 | 312 | { |
|
313 | 313 | "itemtype": "file", |
|
314 | 314 | "path": "modified", |
|
315 | 315 | "status": "C" |
|
316 | 316 | } |
|
317 | 317 | ] |
|
318 | 318 | |
|
319 | 319 | $ hg status -A -Tpickle > pickle |
|
320 | 320 | >>> from __future__ import print_function |
|
321 | 321 | >>> from mercurial import util |
|
322 | 322 | >>> pickle = util.pickle |
|
323 | 323 | >>> data = sorted((x[b'status'].decode(), x[b'path'].decode()) for x in pickle.load(open("pickle", r"rb"))) |
|
324 | 324 | >>> for s, p in data: print("%s %s" % (s, p)) |
|
325 | 325 | ! deleted |
|
326 | 326 | ? pickle |
|
327 | 327 | ? unknown |
|
328 | 328 | A added |
|
329 | 329 | A copied |
|
330 | 330 | C .hgignore |
|
331 | 331 | C modified |
|
332 | 332 | I ignored |
|
333 | 333 | R removed |
|
334 | 334 | $ rm pickle |
|
335 | 335 | |
|
336 | 336 | $ echo "^ignoreddir$" > .hgignore |
|
337 | 337 | $ mkdir ignoreddir |
|
338 | 338 | $ touch ignoreddir/file |
|
339 | 339 | |
|
340 | 340 | Test templater support: |
|
341 | 341 | |
|
342 | 342 | $ hg status -AT "[{status}]\t{if(source, '{source} -> ')}{path}\n" |
|
343 | 343 | [M] .hgignore |
|
344 | 344 | [A] added |
|
345 | 345 | [A] modified -> copied |
|
346 | 346 | [R] removed |
|
347 | 347 | [!] deleted |
|
348 | 348 | [?] ignored |
|
349 | 349 | [?] unknown |
|
350 | 350 | [I] ignoreddir/file |
|
351 | 351 | [C] modified |
|
352 | 352 | $ hg status -AT default |
|
353 | 353 | M .hgignore |
|
354 | 354 | A added |
|
355 | 355 | A copied |
|
356 | 356 | modified |
|
357 | 357 | R removed |
|
358 | 358 | ! deleted |
|
359 | 359 | ? ignored |
|
360 | 360 | ? unknown |
|
361 | 361 | I ignoreddir/file |
|
362 | 362 | C modified |
|
363 | 363 | $ hg status -T compact |
|
364 | 364 | abort: "status" not in template map |
|
365 | 365 | [255] |
|
366 | 366 | |
|
367 | 367 | hg status ignoreddir/file: |
|
368 | 368 | |
|
369 | 369 | $ hg status ignoreddir/file |
|
370 | 370 | |
|
371 | 371 | hg status -i ignoreddir/file: |
|
372 | 372 | |
|
373 | 373 | $ hg status -i ignoreddir/file |
|
374 | 374 | I ignoreddir/file |
|
375 | 375 | $ cd .. |
|
376 | 376 | |
|
377 | 377 | Check 'status -q' and some combinations |
|
378 | 378 | |
|
379 | 379 | $ hg init repo3 |
|
380 | 380 | $ cd repo3 |
|
381 | 381 | $ touch modified removed deleted ignored |
|
382 | 382 | $ echo "^ignored$" > .hgignore |
|
383 | 383 | $ hg commit -A -m 'initial checkin' |
|
384 | 384 | adding .hgignore |
|
385 | 385 | adding deleted |
|
386 | 386 | adding modified |
|
387 | 387 | adding removed |
|
388 | 388 | $ touch added unknown ignored |
|
389 | 389 | $ hg add added |
|
390 | 390 | $ echo "test" >> modified |
|
391 | 391 | $ hg remove removed |
|
392 | 392 | $ rm deleted |
|
393 | 393 | $ hg copy modified copied |
|
394 | 394 | |
|
395 | 395 | Specify working directory revision explicitly, that should be the same as |
|
396 | 396 | "hg status" |
|
397 | 397 | |
|
398 | 398 | $ hg status --change "wdir()" |
|
399 | 399 | M modified |
|
400 | 400 | A added |
|
401 | 401 | A copied |
|
402 | 402 | R removed |
|
403 | 403 | ! deleted |
|
404 | 404 | ? unknown |
|
405 | 405 | |
|
406 | 406 | Run status with 2 different flags. |
|
407 | 407 | Check if result is the same or different. |
|
408 | 408 | If result is not as expected, raise error |
|
409 | 409 | |
|
410 | 410 | $ assert() { |
|
411 | 411 | > hg status $1 > ../a |
|
412 | 412 | > hg status $2 > ../b |
|
413 | 413 | > if diff ../a ../b > /dev/null; then |
|
414 | 414 | > out=0 |
|
415 | 415 | > else |
|
416 | 416 | > out=1 |
|
417 | 417 | > fi |
|
418 | 418 | > if [ $3 -eq 0 ]; then |
|
419 | 419 | > df="same" |
|
420 | 420 | > else |
|
421 | 421 | > df="different" |
|
422 | 422 | > fi |
|
423 | 423 | > if [ $out -ne $3 ]; then |
|
424 | 424 | > echo "Error on $1 and $2, should be $df." |
|
425 | 425 | > fi |
|
426 | 426 | > } |
|
427 | 427 | |
|
428 | 428 | Assert flag1 flag2 [0-same | 1-different] |
|
429 | 429 | |
|
430 | 430 | $ assert "-q" "-mard" 0 |
|
431 | 431 | $ assert "-A" "-marduicC" 0 |
|
432 | 432 | $ assert "-qA" "-mardcC" 0 |
|
433 | 433 | $ assert "-qAui" "-A" 0 |
|
434 | 434 | $ assert "-qAu" "-marducC" 0 |
|
435 | 435 | $ assert "-qAi" "-mardicC" 0 |
|
436 | 436 | $ assert "-qu" "-u" 0 |
|
437 | 437 | $ assert "-q" "-u" 1 |
|
438 | 438 | $ assert "-m" "-a" 1 |
|
439 | 439 | $ assert "-r" "-d" 1 |
|
440 | 440 | $ cd .. |
|
441 | 441 | |
|
442 | 442 | $ hg init repo4 |
|
443 | 443 | $ cd repo4 |
|
444 | 444 | $ touch modified removed deleted |
|
445 | 445 | $ hg ci -q -A -m 'initial checkin' |
|
446 | 446 | $ touch added unknown |
|
447 | 447 | $ hg add added |
|
448 | 448 | $ hg remove removed |
|
449 | 449 | $ rm deleted |
|
450 | 450 | $ echo x > modified |
|
451 | 451 | $ hg copy modified copied |
|
452 | 452 | $ hg ci -m 'test checkin' -d "1000001 0" |
|
453 | 453 | $ rm * |
|
454 | 454 | $ touch unrelated |
|
455 | 455 | $ hg ci -q -A -m 'unrelated checkin' -d "1000002 0" |
|
456 | 456 | |
|
457 | 457 | hg status --change 1: |
|
458 | 458 | |
|
459 | 459 | $ hg status --change 1 |
|
460 | 460 | M modified |
|
461 | 461 | A added |
|
462 | 462 | A copied |
|
463 | 463 | R removed |
|
464 | 464 | |
|
465 | 465 | hg status --change 1 unrelated: |
|
466 | 466 | |
|
467 | 467 | $ hg status --change 1 unrelated |
|
468 | 468 | |
|
469 | 469 | hg status -C --change 1 added modified copied removed deleted: |
|
470 | 470 | |
|
471 | 471 | $ hg status -C --change 1 added modified copied removed deleted |
|
472 | 472 | M modified |
|
473 | 473 | A added |
|
474 | 474 | A copied |
|
475 | 475 | modified |
|
476 | 476 | R removed |
|
477 | 477 | |
|
478 | 478 | hg status -A --change 1 and revset: |
|
479 | 479 | |
|
480 | 480 | $ hg status -A --change '1|1' |
|
481 | 481 | M modified |
|
482 | 482 | A added |
|
483 | 483 | A copied |
|
484 | 484 | modified |
|
485 | 485 | R removed |
|
486 | 486 | C deleted |
|
487 | 487 | |
|
488 | 488 | $ cd .. |
|
489 | 489 | |
|
490 | 490 | hg status with --rev and reverted changes: |
|
491 | 491 | |
|
492 | 492 | $ hg init reverted-changes-repo |
|
493 | 493 | $ cd reverted-changes-repo |
|
494 | 494 | $ echo a > file |
|
495 | 495 | $ hg add file |
|
496 | 496 | $ hg ci -m a |
|
497 | 497 | $ echo b > file |
|
498 | 498 | $ hg ci -m b |
|
499 | 499 | |
|
500 | 500 | reverted file should appear clean |
|
501 | 501 | |
|
502 | 502 | $ hg revert -r 0 . |
|
503 | 503 | reverting file |
|
504 | 504 | $ hg status -A --rev 0 |
|
505 | 505 | C file |
|
506 | 506 | |
|
507 | 507 | #if execbit |
|
508 | 508 | reverted file with changed flag should appear modified |
|
509 | 509 | |
|
510 | 510 | $ chmod +x file |
|
511 | 511 | $ hg status -A --rev 0 |
|
512 | 512 | M file |
|
513 | 513 | |
|
514 | 514 | $ hg revert -r 0 . |
|
515 | 515 | reverting file |
|
516 | 516 | |
|
517 | 517 | reverted and committed file with changed flag should appear modified |
|
518 | 518 | |
|
519 | 519 | $ hg co -C . |
|
520 | 520 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
521 | 521 | $ chmod +x file |
|
522 | 522 | $ hg ci -m 'change flag' |
|
523 | 523 | $ hg status -A --rev 1 --rev 2 |
|
524 | 524 | M file |
|
525 | 525 | $ hg diff -r 1 -r 2 |
|
526 | 526 | |
|
527 | 527 | #endif |
|
528 | 528 | |
|
529 | 529 | $ cd .. |
|
530 | 530 | |
|
531 | 531 | hg status of binary file starting with '\1\n', a separator for metadata: |
|
532 | 532 | |
|
533 | 533 | $ hg init repo5 |
|
534 | 534 | $ cd repo5 |
|
535 | 535 | >>> open("010a", r"wb").write(b"\1\nfoo") and None |
|
536 | 536 | $ hg ci -q -A -m 'initial checkin' |
|
537 | 537 | $ hg status -A |
|
538 | 538 | C 010a |
|
539 | 539 | |
|
540 | 540 | >>> open("010a", r"wb").write(b"\1\nbar") and None |
|
541 | 541 | $ hg status -A |
|
542 | 542 | M 010a |
|
543 | 543 | $ hg ci -q -m 'modify 010a' |
|
544 | 544 | $ hg status -A --rev 0:1 |
|
545 | 545 | M 010a |
|
546 | 546 | |
|
547 | 547 | $ touch empty |
|
548 | 548 | $ hg ci -q -A -m 'add another file' |
|
549 | 549 | $ hg status -A --rev 1:2 010a |
|
550 | 550 | C 010a |
|
551 | 551 | |
|
552 | 552 | $ cd .. |
|
553 | 553 | |
|
554 | 554 | test "hg status" with "directory pattern" which matches against files |
|
555 | 555 | only known on target revision. |
|
556 | 556 | |
|
557 | 557 | $ hg init repo6 |
|
558 | 558 | $ cd repo6 |
|
559 | 559 | |
|
560 | 560 | $ echo a > a.txt |
|
561 | 561 | $ hg add a.txt |
|
562 | 562 | $ hg commit -m '#0' |
|
563 | 563 | $ mkdir -p 1/2/3/4/5 |
|
564 | 564 | $ echo b > 1/2/3/4/5/b.txt |
|
565 | 565 | $ hg add 1/2/3/4/5/b.txt |
|
566 | 566 | $ hg commit -m '#1' |
|
567 | 567 | |
|
568 | 568 | $ hg update -C 0 > /dev/null |
|
569 | 569 | $ hg status -A |
|
570 | 570 | C a.txt |
|
571 | 571 | |
|
572 | 572 | the directory matching against specified pattern should be removed, |
|
573 | 573 | because directory existence prevents 'dirstate.walk()' from showing |
|
574 | 574 | warning message about such pattern. |
|
575 | 575 | |
|
576 | 576 | $ test ! -d 1 |
|
577 | 577 | $ hg status -A --rev 1 1/2/3/4/5/b.txt |
|
578 | 578 | R 1/2/3/4/5/b.txt |
|
579 | 579 | $ hg status -A --rev 1 1/2/3/4/5 |
|
580 | 580 | R 1/2/3/4/5/b.txt |
|
581 | 581 | $ hg status -A --rev 1 1/2/3 |
|
582 | 582 | R 1/2/3/4/5/b.txt |
|
583 | 583 | $ hg status -A --rev 1 1 |
|
584 | 584 | R 1/2/3/4/5/b.txt |
|
585 | 585 | |
|
586 | 586 | $ hg status --config ui.formatdebug=True --rev 1 1 |
|
587 | 587 | status = [ |
|
588 | 588 | { |
|
589 | 589 | 'itemtype': 'file', |
|
590 | 590 | 'path': '1/2/3/4/5/b.txt', |
|
591 | 591 | 'status': 'R' |
|
592 | 592 | }, |
|
593 | 593 | ] |
|
594 | 594 | |
|
595 | 595 | #if windows |
|
596 | 596 | $ hg --config ui.slash=false status -A --rev 1 1 |
|
597 | 597 | R 1\2\3\4\5\b.txt |
|
598 | 598 | #endif |
|
599 | 599 | |
|
600 | 600 | $ cd .. |
|
601 | 601 | |
|
602 | 602 | Status after move overwriting a file (issue4458) |
|
603 | 603 | ================================================= |
|
604 | 604 | |
|
605 | 605 | |
|
606 | 606 | $ hg init issue4458 |
|
607 | 607 | $ cd issue4458 |
|
608 | 608 | $ echo a > a |
|
609 | 609 | $ echo b > b |
|
610 | 610 | $ hg commit -Am base |
|
611 | 611 | adding a |
|
612 | 612 | adding b |
|
613 | 613 | |
|
614 | 614 | |
|
615 | 615 | with --force |
|
616 | 616 | |
|
617 | 617 | $ hg mv b --force a |
|
618 | 618 | $ hg st --copies |
|
619 | 619 | M a |
|
620 | 620 | b |
|
621 | 621 | R b |
|
622 | 622 | $ hg revert --all |
|
623 | 623 | reverting a |
|
624 | 624 | undeleting b |
|
625 | 625 | $ rm *.orig |
|
626 | 626 | |
|
627 | 627 | without force |
|
628 | 628 | |
|
629 | 629 | $ hg rm a |
|
630 | 630 | $ hg st --copies |
|
631 | 631 | R a |
|
632 | 632 | $ hg mv b a |
|
633 | 633 | $ hg st --copies |
|
634 | 634 | M a |
|
635 | 635 | b |
|
636 | 636 | R b |
|
637 | 637 | |
|
638 | 638 | using ui.statuscopies setting |
|
639 | 639 | $ hg st --config ui.statuscopies=true |
|
640 | 640 | M a |
|
641 | 641 | b |
|
642 | 642 | R b |
|
643 | 643 | $ hg st --config ui.statuscopies=false |
|
644 | 644 | M a |
|
645 | 645 | R b |
|
646 | 646 | $ hg st --config ui.tweakdefaults=yes |
|
647 | 647 | M a |
|
648 | 648 | b |
|
649 | 649 | R b |
|
650 | 650 | |
|
651 | 651 | using log status template (issue5155) |
|
652 | 652 | $ hg log -Tstatus -r 'wdir()' -C |
|
653 | 653 | changeset: 2147483647:ffffffffffff |
|
654 | 654 | parent: 0:8c55c58b4c0e |
|
655 | 655 | user: test |
|
656 | 656 | date: * (glob) |
|
657 | 657 | files: |
|
658 | 658 | M a |
|
659 | 659 | b |
|
660 | 660 | R b |
|
661 | 661 | |
|
662 | 662 | $ hg log -GTstatus -r 'wdir()' -C |
|
663 | 663 | o changeset: 2147483647:ffffffffffff |
|
664 | 664 | | parent: 0:8c55c58b4c0e |
|
665 | 665 | ~ user: test |
|
666 | 666 | date: * (glob) |
|
667 | 667 | files: |
|
668 | 668 | M a |
|
669 | 669 | b |
|
670 | 670 | R b |
|
671 | 671 | |
|
672 | 672 | |
|
673 | 673 | Other "bug" highlight, the revision status does not report the copy information. |
|
674 | 674 | This is buggy behavior. |
|
675 | 675 | |
|
676 | 676 | $ hg commit -m 'blah' |
|
677 | 677 | $ hg st --copies --change . |
|
678 | 678 | M a |
|
679 | 679 | R b |
|
680 | 680 | |
|
681 | 681 | using log status template, the copy information is displayed correctly. |
|
682 | 682 | $ hg log -Tstatus -r. -C |
|
683 | 683 | changeset: 1:6685fde43d21 |
|
684 | 684 | tag: tip |
|
685 | 685 | user: test |
|
686 | 686 | date: * (glob) |
|
687 | 687 | summary: blah |
|
688 | 688 | files: |
|
689 | 689 | M a |
|
690 | 690 | b |
|
691 | 691 | R b |
|
692 | 692 | |
|
693 | 693 | |
|
694 | 694 | $ cd .. |
|
695 | 695 | |
|
696 | 696 | Make sure .hg doesn't show up even as a symlink |
|
697 | 697 | |
|
698 | 698 | $ hg init repo0 |
|
699 | 699 | $ mkdir symlink-repo0 |
|
700 | 700 | $ cd symlink-repo0 |
|
701 | 701 | $ ln -s ../repo0/.hg |
|
702 | 702 | $ hg status |
|
703 | 703 | |
|
704 | 704 | If the size hasn’t changed but mtime has, status needs to read the contents |
|
705 | 705 | of the file to check whether it has changed |
|
706 | 706 | |
|
707 | 707 | $ echo 1 > a |
|
708 | 708 | $ echo 1 > b |
|
709 | 709 | $ touch -t 200102030000 a b |
|
710 | 710 | $ hg commit -Aqm '#0' |
|
711 | 711 | $ echo 2 > a |
|
712 | 712 | $ touch -t 200102040000 a b |
|
713 | 713 | $ hg status |
|
714 | 714 | M a |
|
715 | 715 | |
|
716 | 716 | Asking specifically for the status of a deleted/removed file |
|
717 | 717 | |
|
718 | 718 | $ rm a |
|
719 | 719 | $ rm b |
|
720 | 720 | $ hg status a |
|
721 | 721 | ! a |
|
722 | 722 | $ hg rm a |
|
723 | 723 | $ hg rm b |
|
724 | 724 | $ hg status a |
|
725 | 725 | R a |
|
726 | 726 | $ hg commit -qm '#1' |
|
727 | 727 | $ hg status a |
|
728 | 728 | a: $ENOENT$ |
|
729 | 729 | |
|
730 | 730 | Check using include flag with pattern when status does not need to traverse |
|
731 | 731 | the working directory (issue6483) |
|
732 | 732 | |
|
733 | 733 | $ cd .. |
|
734 | 734 | $ hg init issue6483 |
|
735 | 735 | $ cd issue6483 |
|
736 | 736 | $ touch a.py b.rs |
|
737 | 737 | $ hg add a.py b.rs |
|
738 | 738 | $ hg st -aI "*.py" |
|
739 | 739 | A a.py |
|
740 | 740 | |
|
741 | 741 | Also check exclude pattern |
|
742 | 742 | |
|
743 | 743 | $ hg st -aX "*.rs" |
|
744 | 744 | A a.py |
|
745 | 745 | |
|
746 | 746 | issue6335 |
|
747 | 747 | When a directory containing a tracked file gets symlinked, as of 5.8 |
|
748 | 748 | `hg st` only gives the correct answer about clean (or deleted) files |
|
749 | 749 | if also listing unknowns. |
|
750 | 750 | The tree-based dirstate and status algorithm fix this: |
|
751 | 751 | |
|
752 | 752 | #if symlink no-dirstate-v1 |
|
753 | 753 | |
|
754 | 754 | $ cd .. |
|
755 | 755 | $ hg init issue6335 |
|
756 | 756 | $ cd issue6335 |
|
757 | 757 | $ mkdir foo |
|
758 | 758 | $ touch foo/a |
|
759 | 759 | $ hg ci -Ama |
|
760 | 760 | adding foo/a |
|
761 | 761 | $ mv foo bar |
|
762 | 762 | $ ln -s bar foo |
|
763 | 763 | $ hg status |
|
764 | 764 | ! foo/a |
|
765 | 765 | ? bar/a |
|
766 | 766 | ? foo |
|
767 | 767 | |
|
768 | 768 | $ hg status -c # incorrect output with `dirstate-v1` |
|
769 | 769 | $ hg status -cu |
|
770 | 770 | ? bar/a |
|
771 | 771 | ? foo |
|
772 | 772 | $ hg status -d # incorrect output with `dirstate-v1` |
|
773 | 773 | ! foo/a |
|
774 | 774 | $ hg status -du |
|
775 | 775 | ! foo/a |
|
776 | 776 | ? bar/a |
|
777 | 777 | ? foo |
|
778 | 778 | |
|
779 | 779 | #endif |
|
780 | 780 | |
|
781 | 781 | |
|
782 | 782 | Create a repo with files in each possible status |
|
783 | 783 | |
|
784 | 784 | $ cd .. |
|
785 | 785 | $ hg init repo7 |
|
786 | 786 | $ cd repo7 |
|
787 | 787 | $ mkdir subdir |
|
788 | 788 | $ touch clean modified deleted removed |
|
789 | 789 | $ touch subdir/clean subdir/modified subdir/deleted subdir/removed |
|
790 | 790 | $ echo ignored > .hgignore |
|
791 | 791 | $ hg ci -Aqm '#0' |
|
792 | 792 | $ echo 1 > modified |
|
793 | 793 | $ echo 1 > subdir/modified |
|
794 | 794 | $ rm deleted |
|
795 | 795 | $ rm subdir/deleted |
|
796 | 796 | $ hg rm removed |
|
797 | 797 | $ hg rm subdir/removed |
|
798 | 798 | $ touch unknown ignored |
|
799 | 799 | $ touch subdir/unknown subdir/ignored |
|
800 | 800 | |
|
801 | 801 | Check the output |
|
802 | 802 | |
|
803 | 803 | $ hg status |
|
804 | 804 | M modified |
|
805 | 805 | M subdir/modified |
|
806 | 806 | R removed |
|
807 | 807 | R subdir/removed |
|
808 | 808 | ! deleted |
|
809 | 809 | ! subdir/deleted |
|
810 | 810 | ? subdir/unknown |
|
811 | 811 | ? unknown |
|
812 | 812 | |
|
813 | 813 | $ hg status -mard |
|
814 | 814 | M modified |
|
815 | 815 | M subdir/modified |
|
816 | 816 | R removed |
|
817 | 817 | R subdir/removed |
|
818 | 818 | ! deleted |
|
819 | 819 | ! subdir/deleted |
|
820 | 820 | |
|
821 | 821 | $ hg status -A |
|
822 | 822 | M modified |
|
823 | 823 | M subdir/modified |
|
824 | 824 | R removed |
|
825 | 825 | R subdir/removed |
|
826 | 826 | ! deleted |
|
827 | 827 | ! subdir/deleted |
|
828 | 828 | ? subdir/unknown |
|
829 | 829 | ? unknown |
|
830 | 830 | I ignored |
|
831 | 831 | I subdir/ignored |
|
832 | 832 | C .hgignore |
|
833 | 833 | C clean |
|
834 | 834 | C subdir/clean |
|
835 | 835 | |
|
836 | 836 | Note: `hg status some-name` creates a patternmatcher which is not supported |
|
837 | 837 | yet by the Rust implementation of status, but includematcher is supported. |
|
838 | 838 | --include is used below for that reason |
|
839 | 839 | |
|
840 | 840 | Remove a directory that contains tracked files |
|
841 | 841 | |
|
842 | 842 | $ rm -r subdir |
|
843 | 843 | $ hg status --include subdir |
|
844 | 844 | R subdir/removed |
|
845 | 845 | ! subdir/clean |
|
846 | 846 | ! subdir/deleted |
|
847 | 847 | ! subdir/modified |
|
848 | 848 | |
|
849 | 849 | … and replace it by a file |
|
850 | 850 | |
|
851 | 851 | $ touch subdir |
|
852 | 852 | $ hg status --include subdir |
|
853 | 853 | R subdir/removed |
|
854 | 854 | ! subdir/clean |
|
855 | 855 | ! subdir/deleted |
|
856 | 856 | ! subdir/modified |
|
857 | 857 | ? subdir |
|
858 | 858 | |
|
859 | 859 | Replaced a deleted or removed file with a directory |
|
860 | 860 | |
|
861 | 861 | $ mkdir deleted removed |
|
862 | 862 | $ touch deleted/1 removed/1 |
|
863 | 863 | $ hg status --include deleted --include removed |
|
864 | 864 | R removed |
|
865 | 865 | ! deleted |
|
866 | 866 | ? deleted/1 |
|
867 | 867 | ? removed/1 |
|
868 | 868 | $ hg add removed/1 |
|
869 | 869 | $ hg status --include deleted --include removed |
|
870 | 870 | A removed/1 |
|
871 | 871 | R removed |
|
872 | 872 | ! deleted |
|
873 | 873 | ? deleted/1 |
|
874 | 874 | |
|
875 | 875 | Deeply nested files in an ignored directory are still listed on request |
|
876 | 876 | |
|
877 | 877 | $ echo ignored-dir >> .hgignore |
|
878 | 878 | $ mkdir ignored-dir |
|
879 | 879 | $ mkdir ignored-dir/subdir |
|
880 | 880 | $ touch ignored-dir/subdir/1 |
|
881 | 881 | $ hg status --ignored |
|
882 | 882 | I ignored |
|
883 | 883 | I ignored-dir/subdir/1 |
|
884 | ||
|
885 | Check using include flag while listing ignored composes correctly (issue6514) | |
|
886 | ||
|
887 | $ cd .. | |
|
888 | $ hg init issue6514 | |
|
889 | $ cd issue6514 | |
|
890 | $ mkdir ignored-folder | |
|
891 | $ touch A.hs B.hs C.hs ignored-folder/other.txt ignored-folder/ctest.hs | |
|
892 | $ cat >.hgignore <<EOF | |
|
893 | > A.hs | |
|
894 | > B.hs | |
|
895 | > ignored-folder/ | |
|
896 | > EOF | |
|
897 | $ hg st -i -I 're:.*\.hs$' | |
|
898 | I A.hs | |
|
899 | I B.hs | |
|
900 | I ignored-folder/ctest.hs |
General Comments 0
You need to be logged in to leave comments.
Login now