Show More
@@ -1,1599 +1,1590 b'' | |||
|
1 | 1 | # hg.py - repository classes for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
4 | 4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | 12 | import os |
|
13 | 13 | import shutil |
|
14 | 14 | import stat |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | 17 | from .node import ( |
|
18 | 18 | hex, |
|
19 | 19 | sha1nodeconstants, |
|
20 | 20 | short, |
|
21 | 21 | ) |
|
22 | 22 | from .pycompat import getattr |
|
23 | 23 | |
|
24 | 24 | from . import ( |
|
25 | 25 | bookmarks, |
|
26 | 26 | bundlerepo, |
|
27 | cacheutil, | |
|
28 | 27 | cmdutil, |
|
29 | 28 | destutil, |
|
30 | 29 | discovery, |
|
31 | 30 | error, |
|
32 | 31 | exchange, |
|
33 | 32 | extensions, |
|
34 | 33 | graphmod, |
|
35 | 34 | httppeer, |
|
36 | 35 | localrepo, |
|
37 | 36 | lock, |
|
38 | 37 | logcmdutil, |
|
39 | 38 | logexchange, |
|
40 | 39 | merge as mergemod, |
|
41 | 40 | mergestate as mergestatemod, |
|
42 | 41 | narrowspec, |
|
43 | 42 | phases, |
|
44 | 43 | requirements, |
|
45 | 44 | scmutil, |
|
46 | 45 | sshpeer, |
|
47 | 46 | statichttprepo, |
|
48 | 47 | ui as uimod, |
|
49 | 48 | unionrepo, |
|
50 | 49 | url, |
|
51 | 50 | util, |
|
52 | 51 | verify as verifymod, |
|
53 | 52 | vfs as vfsmod, |
|
54 | 53 | ) |
|
55 | 54 | from .interfaces import repository as repositorymod |
|
56 | 55 | from .utils import ( |
|
57 | 56 | hashutil, |
|
58 | 57 | stringutil, |
|
59 | 58 | urlutil, |
|
60 | 59 | ) |
|
61 | 60 | |
|
62 | 61 | |
|
63 | 62 | release = lock.release |
|
64 | 63 | |
|
65 | 64 | # shared features |
|
66 | 65 | sharedbookmarks = b'bookmarks' |
|
67 | 66 | |
|
68 | 67 | |
|
69 | 68 | def _local(path): |
|
70 | 69 | path = util.expandpath(urlutil.urllocalpath(path)) |
|
71 | 70 | |
|
72 | 71 | try: |
|
73 | 72 | # we use os.stat() directly here instead of os.path.isfile() |
|
74 | 73 | # because the latter started returning `False` on invalid path |
|
75 | 74 | # exceptions starting in 3.8 and we care about handling |
|
76 | 75 | # invalid paths specially here. |
|
77 | 76 | st = os.stat(path) |
|
78 | 77 | isfile = stat.S_ISREG(st.st_mode) |
|
79 | 78 | # Python 2 raises TypeError, Python 3 ValueError. |
|
80 | 79 | except (TypeError, ValueError) as e: |
|
81 | 80 | raise error.Abort( |
|
82 | 81 | _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e)) |
|
83 | 82 | ) |
|
84 | 83 | except OSError: |
|
85 | 84 | isfile = False |
|
86 | 85 | |
|
87 | 86 | return isfile and bundlerepo or localrepo |
|
88 | 87 | |
|
89 | 88 | |
|
90 | 89 | def addbranchrevs(lrepo, other, branches, revs): |
|
91 | 90 | peer = other.peer() # a courtesy to callers using a localrepo for other |
|
92 | 91 | hashbranch, branches = branches |
|
93 | 92 | if not hashbranch and not branches: |
|
94 | 93 | x = revs or None |
|
95 | 94 | if revs: |
|
96 | 95 | y = revs[0] |
|
97 | 96 | else: |
|
98 | 97 | y = None |
|
99 | 98 | return x, y |
|
100 | 99 | if revs: |
|
101 | 100 | revs = list(revs) |
|
102 | 101 | else: |
|
103 | 102 | revs = [] |
|
104 | 103 | |
|
105 | 104 | if not peer.capable(b'branchmap'): |
|
106 | 105 | if branches: |
|
107 | 106 | raise error.Abort(_(b"remote branch lookup not supported")) |
|
108 | 107 | revs.append(hashbranch) |
|
109 | 108 | return revs, revs[0] |
|
110 | 109 | |
|
111 | 110 | with peer.commandexecutor() as e: |
|
112 | 111 | branchmap = e.callcommand(b'branchmap', {}).result() |
|
113 | 112 | |
|
114 | 113 | def primary(branch): |
|
115 | 114 | if branch == b'.': |
|
116 | 115 | if not lrepo: |
|
117 | 116 | raise error.Abort(_(b"dirstate branch not accessible")) |
|
118 | 117 | branch = lrepo.dirstate.branch() |
|
119 | 118 | if branch in branchmap: |
|
120 | 119 | revs.extend(hex(r) for r in reversed(branchmap[branch])) |
|
121 | 120 | return True |
|
122 | 121 | else: |
|
123 | 122 | return False |
|
124 | 123 | |
|
125 | 124 | for branch in branches: |
|
126 | 125 | if not primary(branch): |
|
127 | 126 | raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) |
|
128 | 127 | if hashbranch: |
|
129 | 128 | if not primary(hashbranch): |
|
130 | 129 | revs.append(hashbranch) |
|
131 | 130 | return revs, revs[0] |
|
132 | 131 | |
|
133 | 132 | |
|
134 | 133 | def parseurl(path, branches=None): |
|
135 | 134 | '''parse url#branch, returning (url, (branch, branches))''' |
|
136 | 135 | msg = b'parseurl(...) moved to mercurial.utils.urlutil' |
|
137 | 136 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
138 | 137 | return urlutil.parseurl(path, branches=branches) |
|
139 | 138 | |
|
140 | 139 | |
|
141 | 140 | schemes = { |
|
142 | 141 | b'bundle': bundlerepo, |
|
143 | 142 | b'union': unionrepo, |
|
144 | 143 | b'file': _local, |
|
145 | 144 | b'http': httppeer, |
|
146 | 145 | b'https': httppeer, |
|
147 | 146 | b'ssh': sshpeer, |
|
148 | 147 | b'static-http': statichttprepo, |
|
149 | 148 | } |
|
150 | 149 | |
|
151 | 150 | |
|
152 | 151 | def _peerlookup(path): |
|
153 | 152 | u = urlutil.url(path) |
|
154 | 153 | scheme = u.scheme or b'file' |
|
155 | 154 | thing = schemes.get(scheme) or schemes[b'file'] |
|
156 | 155 | try: |
|
157 | 156 | return thing(path) |
|
158 | 157 | except TypeError: |
|
159 | 158 | # we can't test callable(thing) because 'thing' can be an unloaded |
|
160 | 159 | # module that implements __call__ |
|
161 | 160 | if not util.safehasattr(thing, b'instance'): |
|
162 | 161 | raise |
|
163 | 162 | return thing |
|
164 | 163 | |
|
165 | 164 | |
|
166 | 165 | def islocal(repo): |
|
167 | 166 | '''return true if repo (or path pointing to repo) is local''' |
|
168 | 167 | if isinstance(repo, bytes): |
|
169 | 168 | try: |
|
170 | 169 | return _peerlookup(repo).islocal(repo) |
|
171 | 170 | except AttributeError: |
|
172 | 171 | return False |
|
173 | 172 | return repo.local() |
|
174 | 173 | |
|
175 | 174 | |
|
176 | 175 | def openpath(ui, path, sendaccept=True): |
|
177 | 176 | '''open path with open if local, url.open if remote''' |
|
178 | 177 | pathurl = urlutil.url(path, parsequery=False, parsefragment=False) |
|
179 | 178 | if pathurl.islocal(): |
|
180 | 179 | return util.posixfile(pathurl.localpath(), b'rb') |
|
181 | 180 | else: |
|
182 | 181 | return url.open(ui, path, sendaccept=sendaccept) |
|
183 | 182 | |
|
184 | 183 | |
|
185 | 184 | # a list of (ui, repo) functions called for wire peer initialization |
|
186 | 185 | wirepeersetupfuncs = [] |
|
187 | 186 | |
|
188 | 187 | |
|
189 | 188 | def _peerorrepo( |
|
190 | 189 | ui, path, create=False, presetupfuncs=None, intents=None, createopts=None |
|
191 | 190 | ): |
|
192 | 191 | """return a repository object for the specified path""" |
|
193 | 192 | obj = _peerlookup(path).instance( |
|
194 | 193 | ui, path, create, intents=intents, createopts=createopts |
|
195 | 194 | ) |
|
196 | 195 | ui = getattr(obj, "ui", ui) |
|
197 | 196 | for f in presetupfuncs or []: |
|
198 | 197 | f(ui, obj) |
|
199 | 198 | ui.log(b'extension', b'- executing reposetup hooks\n') |
|
200 | 199 | with util.timedcm('all reposetup') as allreposetupstats: |
|
201 | 200 | for name, module in extensions.extensions(ui): |
|
202 | 201 | ui.log(b'extension', b' - running reposetup for %s\n', name) |
|
203 | 202 | hook = getattr(module, 'reposetup', None) |
|
204 | 203 | if hook: |
|
205 | 204 | with util.timedcm('reposetup %r', name) as stats: |
|
206 | 205 | hook(ui, obj) |
|
207 | 206 | ui.log( |
|
208 | 207 | b'extension', b' > reposetup for %s took %s\n', name, stats |
|
209 | 208 | ) |
|
210 | 209 | ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) |
|
211 | 210 | if not obj.local(): |
|
212 | 211 | for f in wirepeersetupfuncs: |
|
213 | 212 | f(ui, obj) |
|
214 | 213 | return obj |
|
215 | 214 | |
|
216 | 215 | |
|
217 | 216 | def repository( |
|
218 | 217 | ui, |
|
219 | 218 | path=b'', |
|
220 | 219 | create=False, |
|
221 | 220 | presetupfuncs=None, |
|
222 | 221 | intents=None, |
|
223 | 222 | createopts=None, |
|
224 | 223 | ): |
|
225 | 224 | """return a repository object for the specified path""" |
|
226 | 225 | peer = _peerorrepo( |
|
227 | 226 | ui, |
|
228 | 227 | path, |
|
229 | 228 | create, |
|
230 | 229 | presetupfuncs=presetupfuncs, |
|
231 | 230 | intents=intents, |
|
232 | 231 | createopts=createopts, |
|
233 | 232 | ) |
|
234 | 233 | repo = peer.local() |
|
235 | 234 | if not repo: |
|
236 | 235 | raise error.Abort( |
|
237 | 236 | _(b"repository '%s' is not local") % (path or peer.url()) |
|
238 | 237 | ) |
|
239 | 238 | return repo.filtered(b'visible') |
|
240 | 239 | |
|
241 | 240 | |
|
242 | 241 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): |
|
243 | 242 | '''return a repository peer for the specified path''' |
|
244 | 243 | rui = remoteui(uiorrepo, opts) |
|
245 | 244 | return _peerorrepo( |
|
246 | 245 | rui, path, create, intents=intents, createopts=createopts |
|
247 | 246 | ).peer() |
|
248 | 247 | |
|
249 | 248 | |
|
250 | 249 | def defaultdest(source): |
|
251 | 250 | """return default destination of clone if none is given |
|
252 | 251 | |
|
253 | 252 | >>> defaultdest(b'foo') |
|
254 | 253 | 'foo' |
|
255 | 254 | >>> defaultdest(b'/foo/bar') |
|
256 | 255 | 'bar' |
|
257 | 256 | >>> defaultdest(b'/') |
|
258 | 257 | '' |
|
259 | 258 | >>> defaultdest(b'') |
|
260 | 259 | '' |
|
261 | 260 | >>> defaultdest(b'http://example.org/') |
|
262 | 261 | '' |
|
263 | 262 | >>> defaultdest(b'http://example.org/foo/') |
|
264 | 263 | 'foo' |
|
265 | 264 | """ |
|
266 | 265 | path = urlutil.url(source).path |
|
267 | 266 | if not path: |
|
268 | 267 | return b'' |
|
269 | 268 | return os.path.basename(os.path.normpath(path)) |
|
270 | 269 | |
|
271 | 270 | |
|
272 | 271 | def sharedreposource(repo): |
|
273 | 272 | """Returns repository object for source repository of a shared repo. |
|
274 | 273 | |
|
275 | 274 | If repo is not a shared repository, returns None. |
|
276 | 275 | """ |
|
277 | 276 | if repo.sharedpath == repo.path: |
|
278 | 277 | return None |
|
279 | 278 | |
|
280 | 279 | if util.safehasattr(repo, b'srcrepo') and repo.srcrepo: |
|
281 | 280 | return repo.srcrepo |
|
282 | 281 | |
|
283 | 282 | # the sharedpath always ends in the .hg; we want the path to the repo |
|
284 | 283 | source = repo.vfs.split(repo.sharedpath)[0] |
|
285 | 284 | srcurl, branches = urlutil.parseurl(source) |
|
286 | 285 | srcrepo = repository(repo.ui, srcurl) |
|
287 | 286 | repo.srcrepo = srcrepo |
|
288 | 287 | return srcrepo |
|
289 | 288 | |
|
290 | 289 | |
|
291 | 290 | def share( |
|
292 | 291 | ui, |
|
293 | 292 | source, |
|
294 | 293 | dest=None, |
|
295 | 294 | update=True, |
|
296 | 295 | bookmarks=True, |
|
297 | 296 | defaultpath=None, |
|
298 | 297 | relative=False, |
|
299 | 298 | ): |
|
300 | 299 | '''create a shared repository''' |
|
301 | 300 | |
|
302 | 301 | if not islocal(source): |
|
303 | 302 | raise error.Abort(_(b'can only share local repositories')) |
|
304 | 303 | |
|
305 | 304 | if not dest: |
|
306 | 305 | dest = defaultdest(source) |
|
307 | 306 | else: |
|
308 | 307 | dest = urlutil.get_clone_path(ui, dest)[1] |
|
309 | 308 | |
|
310 | 309 | if isinstance(source, bytes): |
|
311 | 310 | origsource, source, branches = urlutil.get_clone_path(ui, source) |
|
312 | 311 | srcrepo = repository(ui, source) |
|
313 | 312 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) |
|
314 | 313 | else: |
|
315 | 314 | srcrepo = source.local() |
|
316 | 315 | checkout = None |
|
317 | 316 | |
|
318 | 317 | shareditems = set() |
|
319 | 318 | if bookmarks: |
|
320 | 319 | shareditems.add(sharedbookmarks) |
|
321 | 320 | |
|
322 | 321 | r = repository( |
|
323 | 322 | ui, |
|
324 | 323 | dest, |
|
325 | 324 | create=True, |
|
326 | 325 | createopts={ |
|
327 | 326 | b'sharedrepo': srcrepo, |
|
328 | 327 | b'sharedrelative': relative, |
|
329 | 328 | b'shareditems': shareditems, |
|
330 | 329 | }, |
|
331 | 330 | ) |
|
332 | 331 | |
|
333 | 332 | postshare(srcrepo, r, defaultpath=defaultpath) |
|
334 | 333 | r = repository(ui, dest) |
|
335 | 334 | _postshareupdate(r, update, checkout=checkout) |
|
336 | 335 | return r |
|
337 | 336 | |
|
338 | 337 | |
|
339 | 338 | def _prependsourcehgrc(repo): |
|
340 | 339 | """copies the source repo config and prepend it in current repo .hg/hgrc |
|
341 | 340 | on unshare. This is only done if the share was perfomed using share safe |
|
342 | 341 | method where we share config of source in shares""" |
|
343 | 342 | srcvfs = vfsmod.vfs(repo.sharedpath) |
|
344 | 343 | dstvfs = vfsmod.vfs(repo.path) |
|
345 | 344 | |
|
346 | 345 | if not srcvfs.exists(b'hgrc'): |
|
347 | 346 | return |
|
348 | 347 | |
|
349 | 348 | currentconfig = b'' |
|
350 | 349 | if dstvfs.exists(b'hgrc'): |
|
351 | 350 | currentconfig = dstvfs.read(b'hgrc') |
|
352 | 351 | |
|
353 | 352 | with dstvfs(b'hgrc', b'wb') as fp: |
|
354 | 353 | sourceconfig = srcvfs.read(b'hgrc') |
|
355 | 354 | fp.write(b"# Config copied from shared source\n") |
|
356 | 355 | fp.write(sourceconfig) |
|
357 | 356 | fp.write(b'\n') |
|
358 | 357 | fp.write(currentconfig) |
|
359 | 358 | |
|
360 | 359 | |
|
361 | 360 | def unshare(ui, repo): |
|
362 | 361 | """convert a shared repository to a normal one |
|
363 | 362 | |
|
364 | 363 | Copy the store data to the repo and remove the sharedpath data. |
|
365 | 364 | |
|
366 | 365 | Returns a new repository object representing the unshared repository. |
|
367 | 366 | |
|
368 | 367 | The passed repository object is not usable after this function is |
|
369 | 368 | called. |
|
370 | 369 | """ |
|
371 | 370 | |
|
372 | 371 | with repo.lock(): |
|
373 | 372 | # we use locks here because if we race with commit, we |
|
374 | 373 | # can end up with extra data in the cloned revlogs that's |
|
375 | 374 | # not pointed to by changesets, thus causing verify to |
|
376 | 375 | # fail |
|
377 | 376 | destlock = copystore(ui, repo, repo.path) |
|
378 | 377 | with destlock or util.nullcontextmanager(): |
|
379 | 378 | if requirements.SHARESAFE_REQUIREMENT in repo.requirements: |
|
380 | 379 | # we were sharing .hg/hgrc of the share source with the current |
|
381 | 380 | # repo. We need to copy that while unsharing otherwise it can |
|
382 | 381 | # disable hooks and other checks |
|
383 | 382 | _prependsourcehgrc(repo) |
|
384 | 383 | |
|
385 | 384 | sharefile = repo.vfs.join(b'sharedpath') |
|
386 | 385 | util.rename(sharefile, sharefile + b'.old') |
|
387 | 386 | |
|
388 | 387 | repo.requirements.discard(requirements.SHARED_REQUIREMENT) |
|
389 | 388 | repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT) |
|
390 | 389 | scmutil.writereporequirements(repo) |
|
391 | 390 | |
|
392 | 391 | # Removing share changes some fundamental properties of the repo instance. |
|
393 | 392 | # So we instantiate a new repo object and operate on it rather than |
|
394 | 393 | # try to keep the existing repo usable. |
|
395 | 394 | newrepo = repository(repo.baseui, repo.root, create=False) |
|
396 | 395 | |
|
397 | 396 | # TODO: figure out how to access subrepos that exist, but were previously |
|
398 | 397 | # removed from .hgsub |
|
399 | 398 | c = newrepo[b'.'] |
|
400 | 399 | subs = c.substate |
|
401 | 400 | for s in sorted(subs): |
|
402 | 401 | c.sub(s).unshare() |
|
403 | 402 | |
|
404 | 403 | localrepo.poisonrepository(repo) |
|
405 | 404 | |
|
406 | 405 | return newrepo |
|
407 | 406 | |
|
408 | 407 | |
|
409 | 408 | def postshare(sourcerepo, destrepo, defaultpath=None): |
|
410 | 409 | """Called after a new shared repo is created. |
|
411 | 410 | |
|
412 | 411 | The new repo only has a requirements file and pointer to the source. |
|
413 | 412 | This function configures additional shared data. |
|
414 | 413 | |
|
415 | 414 | Extensions can wrap this function and write additional entries to |
|
416 | 415 | destrepo/.hg/shared to indicate additional pieces of data to be shared. |
|
417 | 416 | """ |
|
418 | 417 | default = defaultpath or sourcerepo.ui.config(b'paths', b'default') |
|
419 | 418 | if default: |
|
420 | 419 | template = b'[paths]\ndefault = %s\n' |
|
421 | 420 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) |
|
422 | 421 | if requirements.NARROW_REQUIREMENT in sourcerepo.requirements: |
|
423 | 422 | with destrepo.wlock(): |
|
424 | 423 | narrowspec.copytoworkingcopy(destrepo) |
|
425 | 424 | |
|
426 | 425 | |
|
427 | 426 | def _postshareupdate(repo, update, checkout=None): |
|
428 | 427 | """Maybe perform a working directory update after a shared repo is created. |
|
429 | 428 | |
|
430 | 429 | ``update`` can be a boolean or a revision to update to. |
|
431 | 430 | """ |
|
432 | 431 | if not update: |
|
433 | 432 | return |
|
434 | 433 | |
|
435 | 434 | repo.ui.status(_(b"updating working directory\n")) |
|
436 | 435 | if update is not True: |
|
437 | 436 | checkout = update |
|
438 | 437 | for test in (checkout, b'default', b'tip'): |
|
439 | 438 | if test is None: |
|
440 | 439 | continue |
|
441 | 440 | try: |
|
442 | 441 | uprev = repo.lookup(test) |
|
443 | 442 | break |
|
444 | 443 | except error.RepoLookupError: |
|
445 | 444 | continue |
|
446 | 445 | _update(repo, uprev) |
|
447 | 446 | |
|
448 | 447 | |
|
449 | 448 | def copystore(ui, srcrepo, destpath): |
|
450 | 449 | """copy files from store of srcrepo in destpath |
|
451 | 450 | |
|
452 | 451 | returns destlock |
|
453 | 452 | """ |
|
454 | 453 | destlock = None |
|
455 | 454 | try: |
|
456 | 455 | hardlink = None |
|
457 | 456 | topic = _(b'linking') if hardlink else _(b'copying') |
|
458 | 457 | with ui.makeprogress(topic, unit=_(b'files')) as progress: |
|
459 | 458 | num = 0 |
|
460 | 459 | srcpublishing = srcrepo.publishing() |
|
461 | 460 | srcvfs = vfsmod.vfs(srcrepo.sharedpath) |
|
462 | 461 | dstvfs = vfsmod.vfs(destpath) |
|
463 | 462 | for f in srcrepo.store.copylist(): |
|
464 | 463 | if srcpublishing and f.endswith(b'phaseroots'): |
|
465 | 464 | continue |
|
466 | 465 | dstbase = os.path.dirname(f) |
|
467 | 466 | if dstbase and not dstvfs.exists(dstbase): |
|
468 | 467 | dstvfs.mkdir(dstbase) |
|
469 | 468 | if srcvfs.exists(f): |
|
470 | 469 | if f.endswith(b'data'): |
|
471 | 470 | # 'dstbase' may be empty (e.g. revlog format 0) |
|
472 | 471 | lockfile = os.path.join(dstbase, b"lock") |
|
473 | 472 | # lock to avoid premature writing to the target |
|
474 | 473 | destlock = lock.lock(dstvfs, lockfile) |
|
475 | 474 | hardlink, n = util.copyfiles( |
|
476 | 475 | srcvfs.join(f), dstvfs.join(f), hardlink, progress |
|
477 | 476 | ) |
|
478 | 477 | num += n |
|
479 | 478 | if hardlink: |
|
480 | 479 | ui.debug(b"linked %d files\n" % num) |
|
481 | 480 | else: |
|
482 | 481 | ui.debug(b"copied %d files\n" % num) |
|
483 | 482 | return destlock |
|
484 | 483 | except: # re-raises |
|
485 | 484 | release(destlock) |
|
486 | 485 | raise |
|
487 | 486 | |
|
488 | 487 | |
|
489 | 488 | def clonewithshare( |
|
490 | 489 | ui, |
|
491 | 490 | peeropts, |
|
492 | 491 | sharepath, |
|
493 | 492 | source, |
|
494 | 493 | srcpeer, |
|
495 | 494 | dest, |
|
496 | 495 | pull=False, |
|
497 | 496 | rev=None, |
|
498 | 497 | update=True, |
|
499 | 498 | stream=False, |
|
500 | 499 | ): |
|
501 | 500 | """Perform a clone using a shared repo. |
|
502 | 501 | |
|
503 | 502 | The store for the repository will be located at <sharepath>/.hg. The |
|
504 | 503 | specified revisions will be cloned or pulled from "source". A shared repo |
|
505 | 504 | will be created at "dest" and a working copy will be created if "update" is |
|
506 | 505 | True. |
|
507 | 506 | """ |
|
508 | 507 | revs = None |
|
509 | 508 | if rev: |
|
510 | 509 | if not srcpeer.capable(b'lookup'): |
|
511 | 510 | raise error.Abort( |
|
512 | 511 | _( |
|
513 | 512 | b"src repository does not support " |
|
514 | 513 | b"revision lookup and so doesn't " |
|
515 | 514 | b"support clone by revision" |
|
516 | 515 | ) |
|
517 | 516 | ) |
|
518 | 517 | |
|
519 | 518 | # TODO this is batchable. |
|
520 | 519 | remoterevs = [] |
|
521 | 520 | for r in rev: |
|
522 | 521 | with srcpeer.commandexecutor() as e: |
|
523 | 522 | remoterevs.append( |
|
524 | 523 | e.callcommand( |
|
525 | 524 | b'lookup', |
|
526 | 525 | { |
|
527 | 526 | b'key': r, |
|
528 | 527 | }, |
|
529 | 528 | ).result() |
|
530 | 529 | ) |
|
531 | 530 | revs = remoterevs |
|
532 | 531 | |
|
533 | 532 | # Obtain a lock before checking for or cloning the pooled repo otherwise |
|
534 | 533 | # 2 clients may race creating or populating it. |
|
535 | 534 | pooldir = os.path.dirname(sharepath) |
|
536 | 535 | # lock class requires the directory to exist. |
|
537 | 536 | try: |
|
538 | 537 | util.makedir(pooldir, False) |
|
539 | 538 | except OSError as e: |
|
540 | 539 | if e.errno != errno.EEXIST: |
|
541 | 540 | raise |
|
542 | 541 | |
|
543 | 542 | poolvfs = vfsmod.vfs(pooldir) |
|
544 | 543 | basename = os.path.basename(sharepath) |
|
545 | 544 | |
|
546 | 545 | with lock.lock(poolvfs, b'%s.lock' % basename): |
|
547 | 546 | if os.path.exists(sharepath): |
|
548 | 547 | ui.status( |
|
549 | 548 | _(b'(sharing from existing pooled repository %s)\n') % basename |
|
550 | 549 | ) |
|
551 | 550 | else: |
|
552 | 551 | ui.status( |
|
553 | 552 | _(b'(sharing from new pooled repository %s)\n') % basename |
|
554 | 553 | ) |
|
555 | 554 | # Always use pull mode because hardlinks in share mode don't work |
|
556 | 555 | # well. Never update because working copies aren't necessary in |
|
557 | 556 | # share mode. |
|
558 | 557 | clone( |
|
559 | 558 | ui, |
|
560 | 559 | peeropts, |
|
561 | 560 | source, |
|
562 | 561 | dest=sharepath, |
|
563 | 562 | pull=True, |
|
564 | 563 | revs=rev, |
|
565 | 564 | update=False, |
|
566 | 565 | stream=stream, |
|
567 | 566 | ) |
|
568 | 567 | |
|
569 | 568 | # Resolve the value to put in [paths] section for the source. |
|
570 | 569 | if islocal(source): |
|
571 | 570 | defaultpath = os.path.abspath(urlutil.urllocalpath(source)) |
|
572 | 571 | else: |
|
573 | 572 | defaultpath = source |
|
574 | 573 | |
|
575 | 574 | sharerepo = repository(ui, path=sharepath) |
|
576 | 575 | destrepo = share( |
|
577 | 576 | ui, |
|
578 | 577 | sharerepo, |
|
579 | 578 | dest=dest, |
|
580 | 579 | update=False, |
|
581 | 580 | bookmarks=False, |
|
582 | 581 | defaultpath=defaultpath, |
|
583 | 582 | ) |
|
584 | 583 | |
|
585 | 584 | # We need to perform a pull against the dest repo to fetch bookmarks |
|
586 | 585 | # and other non-store data that isn't shared by default. In the case of |
|
587 | 586 | # non-existing shared repo, this means we pull from the remote twice. This |
|
588 | 587 | # is a bit weird. But at the time it was implemented, there wasn't an easy |
|
589 | 588 | # way to pull just non-changegroup data. |
|
590 | 589 | exchange.pull(destrepo, srcpeer, heads=revs) |
|
591 | 590 | |
|
592 | 591 | _postshareupdate(destrepo, update) |
|
593 | 592 | |
|
594 | 593 | return srcpeer, peer(ui, peeropts, dest) |
|
595 | 594 | |
|
596 | 595 | |
|
597 | 596 | # Recomputing caches is often slow on big repos, so copy them. |
|
598 | 597 | def _copycache(srcrepo, dstcachedir, fname): |
|
599 | 598 | """copy a cache from srcrepo to destcachedir (if it exists)""" |
|
600 | 599 | srcfname = srcrepo.cachevfs.join(fname) |
|
601 | 600 | dstfname = os.path.join(dstcachedir, fname) |
|
602 | 601 | if os.path.exists(srcfname): |
|
603 | 602 | if not os.path.exists(dstcachedir): |
|
604 | 603 | os.mkdir(dstcachedir) |
|
605 | 604 | util.copyfile(srcfname, dstfname) |
|
606 | 605 | |
|
607 | 606 | |
|
608 | 607 | def clone( |
|
609 | 608 | ui, |
|
610 | 609 | peeropts, |
|
611 | 610 | source, |
|
612 | 611 | dest=None, |
|
613 | 612 | pull=False, |
|
614 | 613 | revs=None, |
|
615 | 614 | update=True, |
|
616 | 615 | stream=False, |
|
617 | 616 | branch=None, |
|
618 | 617 | shareopts=None, |
|
619 | 618 | storeincludepats=None, |
|
620 | 619 | storeexcludepats=None, |
|
621 | 620 | depth=None, |
|
622 | 621 | ): |
|
623 | 622 | """Make a copy of an existing repository. |
|
624 | 623 | |
|
625 | 624 | Create a copy of an existing repository in a new directory. The |
|
626 | 625 | source and destination are URLs, as passed to the repository |
|
627 | 626 | function. Returns a pair of repository peers, the source and |
|
628 | 627 | newly created destination. |
|
629 | 628 | |
|
630 | 629 | The location of the source is added to the new repository's |
|
631 | 630 | .hg/hgrc file, as the default to be used for future pulls and |
|
632 | 631 | pushes. |
|
633 | 632 | |
|
634 | 633 | If an exception is raised, the partly cloned/updated destination |
|
635 | 634 | repository will be deleted. |
|
636 | 635 | |
|
637 | 636 | Arguments: |
|
638 | 637 | |
|
639 | 638 | source: repository object or URL |
|
640 | 639 | |
|
641 | 640 | dest: URL of destination repository to create (defaults to base |
|
642 | 641 | name of source repository) |
|
643 | 642 | |
|
644 | 643 | pull: always pull from source repository, even in local case or if the |
|
645 | 644 | server prefers streaming |
|
646 | 645 | |
|
647 | 646 | stream: stream raw data uncompressed from repository (fast over |
|
648 | 647 | LAN, slow over WAN) |
|
649 | 648 | |
|
650 | 649 | revs: revision to clone up to (implies pull=True) |
|
651 | 650 | |
|
652 | 651 | update: update working directory after clone completes, if |
|
653 | 652 | destination is local repository (True means update to default rev, |
|
654 | 653 | anything else is treated as a revision) |
|
655 | 654 | |
|
656 | 655 | branch: branches to clone |
|
657 | 656 | |
|
658 | 657 | shareopts: dict of options to control auto sharing behavior. The "pool" key |
|
659 | 658 | activates auto sharing mode and defines the directory for stores. The |
|
660 | 659 | "mode" key determines how to construct the directory name of the shared |
|
661 | 660 | repository. "identity" means the name is derived from the node of the first |
|
662 | 661 | changeset in the repository. "remote" means the name is derived from the |
|
663 | 662 | remote's path/URL. Defaults to "identity." |
|
664 | 663 | |
|
665 | 664 | storeincludepats and storeexcludepats: sets of file patterns to include and |
|
666 | 665 | exclude in the repository copy, respectively. If not defined, all files |
|
667 | 666 | will be included (a "full" clone). Otherwise a "narrow" clone containing |
|
668 | 667 | only the requested files will be performed. If ``storeincludepats`` is not |
|
669 | 668 | defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be |
|
670 | 669 | ``path:.``. If both are empty sets, no files will be cloned. |
|
671 | 670 | """ |
|
672 | 671 | |
|
673 | 672 | if isinstance(source, bytes): |
|
674 | 673 | src = urlutil.get_clone_path(ui, source, branch) |
|
675 | 674 | origsource, source, branches = src |
|
676 | 675 | srcpeer = peer(ui, peeropts, source) |
|
677 | 676 | else: |
|
678 | 677 | srcpeer = source.peer() # in case we were called with a localrepo |
|
679 | 678 | branches = (None, branch or []) |
|
680 | 679 | origsource = source = srcpeer.url() |
|
681 | 680 | srclock = destlock = cleandir = None |
|
682 | 681 | destpeer = None |
|
683 | 682 | try: |
|
684 | 683 | revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
|
685 | 684 | |
|
686 | 685 | if dest is None: |
|
687 | 686 | dest = defaultdest(source) |
|
688 | 687 | if dest: |
|
689 | 688 | ui.status(_(b"destination directory: %s\n") % dest) |
|
690 | 689 | else: |
|
691 | 690 | dest = urlutil.get_clone_path(ui, dest)[0] |
|
692 | 691 | |
|
693 | 692 | dest = urlutil.urllocalpath(dest) |
|
694 | 693 | source = urlutil.urllocalpath(source) |
|
695 | 694 | |
|
696 | 695 | if not dest: |
|
697 | 696 | raise error.InputError(_(b"empty destination path is not valid")) |
|
698 | 697 | |
|
699 | 698 | destvfs = vfsmod.vfs(dest, expandpath=True) |
|
700 | 699 | if destvfs.lexists(): |
|
701 | 700 | if not destvfs.isdir(): |
|
702 | 701 | raise error.InputError( |
|
703 | 702 | _(b"destination '%s' already exists") % dest |
|
704 | 703 | ) |
|
705 | 704 | elif destvfs.listdir(): |
|
706 | 705 | raise error.InputError( |
|
707 | 706 | _(b"destination '%s' is not empty") % dest |
|
708 | 707 | ) |
|
709 | 708 | |
|
710 | 709 | createopts = {} |
|
711 | 710 | narrow = False |
|
712 | 711 | |
|
713 | 712 | if storeincludepats is not None: |
|
714 | 713 | narrowspec.validatepatterns(storeincludepats) |
|
715 | 714 | narrow = True |
|
716 | 715 | |
|
717 | 716 | if storeexcludepats is not None: |
|
718 | 717 | narrowspec.validatepatterns(storeexcludepats) |
|
719 | 718 | narrow = True |
|
720 | 719 | |
|
721 | 720 | if narrow: |
|
722 | 721 | # Include everything by default if only exclusion patterns defined. |
|
723 | 722 | if storeexcludepats and not storeincludepats: |
|
724 | 723 | storeincludepats = {b'path:.'} |
|
725 | 724 | |
|
726 | 725 | createopts[b'narrowfiles'] = True |
|
727 | 726 | |
|
728 | 727 | if depth: |
|
729 | 728 | createopts[b'shallowfilestore'] = True |
|
730 | 729 | |
|
731 | 730 | if srcpeer.capable(b'lfs-serve'): |
|
732 | 731 | # Repository creation honors the config if it disabled the extension, so |
|
733 | 732 | # we can't just announce that lfs will be enabled. This check avoids |
|
734 | 733 | # saying that lfs will be enabled, and then saying it's an unknown |
|
735 | 734 | # feature. The lfs creation option is set in either case so that a |
|
736 | 735 | # requirement is added. If the extension is explicitly disabled but the |
|
737 | 736 | # requirement is set, the clone aborts early, before transferring any |
|
738 | 737 | # data. |
|
739 | 738 | createopts[b'lfs'] = True |
|
740 | 739 | |
|
741 | 740 | if extensions.disabled_help(b'lfs'): |
|
742 | 741 | ui.status( |
|
743 | 742 | _( |
|
744 | 743 | b'(remote is using large file support (lfs), but it is ' |
|
745 | 744 | b'explicitly disabled in the local configuration)\n' |
|
746 | 745 | ) |
|
747 | 746 | ) |
|
748 | 747 | else: |
|
749 | 748 | ui.status( |
|
750 | 749 | _( |
|
751 | 750 | b'(remote is using large file support (lfs); lfs will ' |
|
752 | 751 | b'be enabled for this repository)\n' |
|
753 | 752 | ) |
|
754 | 753 | ) |
|
755 | 754 | |
|
756 | 755 | shareopts = shareopts or {} |
|
757 | 756 | sharepool = shareopts.get(b'pool') |
|
758 | 757 | sharenamemode = shareopts.get(b'mode') |
|
759 | 758 | if sharepool and islocal(dest): |
|
760 | 759 | sharepath = None |
|
761 | 760 | if sharenamemode == b'identity': |
|
762 | 761 | # Resolve the name from the initial changeset in the remote |
|
763 | 762 | # repository. This returns nullid when the remote is empty. It |
|
764 | 763 | # raises RepoLookupError if revision 0 is filtered or otherwise |
|
765 | 764 | # not available. If we fail to resolve, sharing is not enabled. |
|
766 | 765 | try: |
|
767 | 766 | with srcpeer.commandexecutor() as e: |
|
768 | 767 | rootnode = e.callcommand( |
|
769 | 768 | b'lookup', |
|
770 | 769 | { |
|
771 | 770 | b'key': b'0', |
|
772 | 771 | }, |
|
773 | 772 | ).result() |
|
774 | 773 | |
|
775 | 774 | if rootnode != sha1nodeconstants.nullid: |
|
776 | 775 | sharepath = os.path.join(sharepool, hex(rootnode)) |
|
777 | 776 | else: |
|
778 | 777 | ui.status( |
|
779 | 778 | _( |
|
780 | 779 | b'(not using pooled storage: ' |
|
781 | 780 | b'remote appears to be empty)\n' |
|
782 | 781 | ) |
|
783 | 782 | ) |
|
784 | 783 | except error.RepoLookupError: |
|
785 | 784 | ui.status( |
|
786 | 785 | _( |
|
787 | 786 | b'(not using pooled storage: ' |
|
788 | 787 | b'unable to resolve identity of remote)\n' |
|
789 | 788 | ) |
|
790 | 789 | ) |
|
791 | 790 | elif sharenamemode == b'remote': |
|
792 | 791 | sharepath = os.path.join( |
|
793 | 792 | sharepool, hex(hashutil.sha1(source).digest()) |
|
794 | 793 | ) |
|
795 | 794 | else: |
|
796 | 795 | raise error.Abort( |
|
797 | 796 | _(b'unknown share naming mode: %s') % sharenamemode |
|
798 | 797 | ) |
|
799 | 798 | |
|
800 | 799 | # TODO this is a somewhat arbitrary restriction. |
|
801 | 800 | if narrow: |
|
802 | 801 | ui.status( |
|
803 | 802 | _(b'(pooled storage not supported for narrow clones)\n') |
|
804 | 803 | ) |
|
805 | 804 | sharepath = None |
|
806 | 805 | |
|
807 | 806 | if sharepath: |
|
808 | 807 | return clonewithshare( |
|
809 | 808 | ui, |
|
810 | 809 | peeropts, |
|
811 | 810 | sharepath, |
|
812 | 811 | source, |
|
813 | 812 | srcpeer, |
|
814 | 813 | dest, |
|
815 | 814 | pull=pull, |
|
816 | 815 | rev=revs, |
|
817 | 816 | update=update, |
|
818 | 817 | stream=stream, |
|
819 | 818 | ) |
|
820 | 819 | |
|
821 | 820 | srcrepo = srcpeer.local() |
|
822 | 821 | |
|
823 | 822 | abspath = origsource |
|
824 | 823 | if islocal(origsource): |
|
825 | 824 | abspath = os.path.abspath(urlutil.urllocalpath(origsource)) |
|
826 | 825 | |
|
827 | 826 | if islocal(dest): |
|
828 | 827 | if os.path.exists(dest): |
|
829 | 828 | # only clean up directories we create ourselves |
|
830 | 829 | hgdir = os.path.realpath(os.path.join(dest, b".hg")) |
|
831 | 830 | cleandir = hgdir |
|
832 | 831 | else: |
|
833 | 832 | cleandir = dest |
|
834 | 833 | |
|
835 | 834 | copy = False |
|
836 | 835 | if ( |
|
837 | 836 | srcrepo |
|
838 | 837 | and srcrepo.cancopy() |
|
839 | 838 | and islocal(dest) |
|
840 | 839 | and not phases.hassecret(srcrepo) |
|
841 | 840 | ): |
|
842 | 841 | copy = not pull and not revs |
|
843 | 842 | |
|
844 | 843 | # TODO this is a somewhat arbitrary restriction. |
|
845 | 844 | if narrow: |
|
846 | 845 | copy = False |
|
847 | 846 | |
|
848 | 847 | if copy: |
|
849 | 848 | try: |
|
850 | 849 | # we use a lock here because if we race with commit, we |
|
851 | 850 | # can end up with extra data in the cloned revlogs that's |
|
852 | 851 | # not pointed to by changesets, thus causing verify to |
|
853 | 852 | # fail |
|
854 | 853 | srclock = srcrepo.lock(wait=False) |
|
855 | 854 | except error.LockError: |
|
856 | 855 | copy = False |
|
857 | 856 | |
|
858 | 857 | if copy: |
|
859 | 858 | srcrepo.hook(b'preoutgoing', throw=True, source=b'clone') |
|
860 | 859 | |
|
861 | 860 | destrootpath = urlutil.urllocalpath(dest) |
|
862 | 861 | dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo) |
|
863 | 862 | localrepo.createrepository( |
|
864 | 863 | ui, |
|
865 | 864 | destrootpath, |
|
866 | 865 | requirements=dest_reqs, |
|
867 | 866 | ) |
|
868 | 867 | destrepo = localrepo.makelocalrepository(ui, destrootpath) |
|
868 | destlock = destrepo.lock() | |
|
869 | from . import streamclone # avoid cycle | |
|
869 | 870 | |
|
870 | destpath = destrepo.vfs.base | |
|
871 | destlock = copystore(ui, srcrepo, destpath) | |
|
872 | # copy bookmarks over | |
|
873 | srcbookmarks = srcrepo.vfs.join(b'bookmarks') | |
|
874 | dstbookmarks = os.path.join(destpath, b'bookmarks') | |
|
875 | if os.path.exists(srcbookmarks): | |
|
876 | util.copyfile(srcbookmarks, dstbookmarks) | |
|
877 | ||
|
878 | dstcachedir = os.path.join(destpath, b'cache') | |
|
879 | for cache in cacheutil.cachetocopy(srcrepo): | |
|
880 | _copycache(srcrepo, dstcachedir, cache) | |
|
871 | streamclone.local_copy(srcrepo, destrepo) | |
|
881 | 872 | |
|
882 | 873 | # we need to re-init the repo after manually copying the data |
|
883 | 874 | # into it |
|
884 | 875 | destpeer = peer(srcrepo, peeropts, dest) |
|
885 | 876 | srcrepo.hook( |
|
886 | 877 | b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex |
|
887 | 878 | ) |
|
888 | 879 | else: |
|
889 | 880 | try: |
|
890 | 881 | # only pass ui when no srcrepo |
|
891 | 882 | destpeer = peer( |
|
892 | 883 | srcrepo or ui, |
|
893 | 884 | peeropts, |
|
894 | 885 | dest, |
|
895 | 886 | create=True, |
|
896 | 887 | createopts=createopts, |
|
897 | 888 | ) |
|
898 | 889 | except OSError as inst: |
|
899 | 890 | if inst.errno == errno.EEXIST: |
|
900 | 891 | cleandir = None |
|
901 | 892 | raise error.Abort( |
|
902 | 893 | _(b"destination '%s' already exists") % dest |
|
903 | 894 | ) |
|
904 | 895 | raise |
|
905 | 896 | |
|
906 | 897 | if revs: |
|
907 | 898 | if not srcpeer.capable(b'lookup'): |
|
908 | 899 | raise error.Abort( |
|
909 | 900 | _( |
|
910 | 901 | b"src repository does not support " |
|
911 | 902 | b"revision lookup and so doesn't " |
|
912 | 903 | b"support clone by revision" |
|
913 | 904 | ) |
|
914 | 905 | ) |
|
915 | 906 | |
|
916 | 907 | # TODO this is batchable. |
|
917 | 908 | remoterevs = [] |
|
918 | 909 | for rev in revs: |
|
919 | 910 | with srcpeer.commandexecutor() as e: |
|
920 | 911 | remoterevs.append( |
|
921 | 912 | e.callcommand( |
|
922 | 913 | b'lookup', |
|
923 | 914 | { |
|
924 | 915 | b'key': rev, |
|
925 | 916 | }, |
|
926 | 917 | ).result() |
|
927 | 918 | ) |
|
928 | 919 | revs = remoterevs |
|
929 | 920 | |
|
930 | 921 | checkout = revs[0] |
|
931 | 922 | else: |
|
932 | 923 | revs = None |
|
933 | 924 | local = destpeer.local() |
|
934 | 925 | if local: |
|
935 | 926 | if narrow: |
|
936 | 927 | with local.wlock(), local.lock(): |
|
937 | 928 | local.setnarrowpats(storeincludepats, storeexcludepats) |
|
938 | 929 | narrowspec.copytoworkingcopy(local) |
|
939 | 930 | |
|
940 | 931 | u = urlutil.url(abspath) |
|
941 | 932 | defaulturl = bytes(u) |
|
942 | 933 | local.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
|
943 | 934 | if not stream: |
|
944 | 935 | if pull: |
|
945 | 936 | stream = False |
|
946 | 937 | else: |
|
947 | 938 | stream = None |
|
948 | 939 | # internal config: ui.quietbookmarkmove |
|
949 | 940 | overrides = {(b'ui', b'quietbookmarkmove'): True} |
|
950 | 941 | with local.ui.configoverride(overrides, b'clone'): |
|
951 | 942 | exchange.pull( |
|
952 | 943 | local, |
|
953 | 944 | srcpeer, |
|
954 | 945 | revs, |
|
955 | 946 | streamclonerequested=stream, |
|
956 | 947 | includepats=storeincludepats, |
|
957 | 948 | excludepats=storeexcludepats, |
|
958 | 949 | depth=depth, |
|
959 | 950 | ) |
|
960 | 951 | elif srcrepo: |
|
961 | 952 | # TODO lift restriction once exchange.push() accepts narrow |
|
962 | 953 | # push. |
|
963 | 954 | if narrow: |
|
964 | 955 | raise error.Abort( |
|
965 | 956 | _( |
|
966 | 957 | b'narrow clone not available for ' |
|
967 | 958 | b'remote destinations' |
|
968 | 959 | ) |
|
969 | 960 | ) |
|
970 | 961 | |
|
971 | 962 | exchange.push( |
|
972 | 963 | srcrepo, |
|
973 | 964 | destpeer, |
|
974 | 965 | revs=revs, |
|
975 | 966 | bookmarks=srcrepo._bookmarks.keys(), |
|
976 | 967 | ) |
|
977 | 968 | else: |
|
978 | 969 | raise error.Abort( |
|
979 | 970 | _(b"clone from remote to remote not supported") |
|
980 | 971 | ) |
|
981 | 972 | |
|
982 | 973 | cleandir = None |
|
983 | 974 | |
|
984 | 975 | destrepo = destpeer.local() |
|
985 | 976 | if destrepo: |
|
986 | 977 | template = uimod.samplehgrcs[b'cloned'] |
|
987 | 978 | u = urlutil.url(abspath) |
|
988 | 979 | u.passwd = None |
|
989 | 980 | defaulturl = bytes(u) |
|
990 | 981 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl)) |
|
991 | 982 | destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
|
992 | 983 | |
|
993 | 984 | if ui.configbool(b'experimental', b'remotenames'): |
|
994 | 985 | logexchange.pullremotenames(destrepo, srcpeer) |
|
995 | 986 | |
|
996 | 987 | if update: |
|
997 | 988 | if update is not True: |
|
998 | 989 | with srcpeer.commandexecutor() as e: |
|
999 | 990 | checkout = e.callcommand( |
|
1000 | 991 | b'lookup', |
|
1001 | 992 | { |
|
1002 | 993 | b'key': update, |
|
1003 | 994 | }, |
|
1004 | 995 | ).result() |
|
1005 | 996 | |
|
1006 | 997 | uprev = None |
|
1007 | 998 | status = None |
|
1008 | 999 | if checkout is not None: |
|
1009 | 1000 | # Some extensions (at least hg-git and hg-subversion) have |
|
1010 | 1001 | # a peer.lookup() implementation that returns a name instead |
|
1011 | 1002 | # of a nodeid. We work around it here until we've figured |
|
1012 | 1003 | # out a better solution. |
|
1013 | 1004 | if len(checkout) == 20 and checkout in destrepo: |
|
1014 | 1005 | uprev = checkout |
|
1015 | 1006 | elif scmutil.isrevsymbol(destrepo, checkout): |
|
1016 | 1007 | uprev = scmutil.revsymbol(destrepo, checkout).node() |
|
1017 | 1008 | else: |
|
1018 | 1009 | if update is not True: |
|
1019 | 1010 | try: |
|
1020 | 1011 | uprev = destrepo.lookup(update) |
|
1021 | 1012 | except error.RepoLookupError: |
|
1022 | 1013 | pass |
|
1023 | 1014 | if uprev is None: |
|
1024 | 1015 | try: |
|
1025 | 1016 | if destrepo._activebookmark: |
|
1026 | 1017 | uprev = destrepo.lookup(destrepo._activebookmark) |
|
1027 | 1018 | update = destrepo._activebookmark |
|
1028 | 1019 | else: |
|
1029 | 1020 | uprev = destrepo._bookmarks[b'@'] |
|
1030 | 1021 | update = b'@' |
|
1031 | 1022 | bn = destrepo[uprev].branch() |
|
1032 | 1023 | if bn == b'default': |
|
1033 | 1024 | status = _(b"updating to bookmark %s\n" % update) |
|
1034 | 1025 | else: |
|
1035 | 1026 | status = ( |
|
1036 | 1027 | _(b"updating to bookmark %s on branch %s\n") |
|
1037 | 1028 | ) % (update, bn) |
|
1038 | 1029 | except KeyError: |
|
1039 | 1030 | try: |
|
1040 | 1031 | uprev = destrepo.branchtip(b'default') |
|
1041 | 1032 | except error.RepoLookupError: |
|
1042 | 1033 | uprev = destrepo.lookup(b'tip') |
|
1043 | 1034 | if not status: |
|
1044 | 1035 | bn = destrepo[uprev].branch() |
|
1045 | 1036 | status = _(b"updating to branch %s\n") % bn |
|
1046 | 1037 | destrepo.ui.status(status) |
|
1047 | 1038 | _update(destrepo, uprev) |
|
1048 | 1039 | if update in destrepo._bookmarks: |
|
1049 | 1040 | bookmarks.activate(destrepo, update) |
|
1050 | 1041 | if destlock is not None: |
|
1051 | 1042 | release(destlock) |
|
1052 | 1043 | # here is a tiny windows were someone could end up writing the |
|
1053 | 1044 | # repository before the cache are sure to be warm. This is "fine" |
|
1054 | 1045 | # as the only "bad" outcome would be some slowness. That potential |
|
1055 | 1046 | # slowness already affect reader. |
|
1056 | 1047 | with destrepo.lock(): |
|
1057 | 1048 | destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE) |
|
1058 | 1049 | finally: |
|
1059 | 1050 | release(srclock, destlock) |
|
1060 | 1051 | if cleandir is not None: |
|
1061 | 1052 | shutil.rmtree(cleandir, True) |
|
1062 | 1053 | if srcpeer is not None: |
|
1063 | 1054 | srcpeer.close() |
|
1064 | 1055 | if destpeer and destpeer.local() is None: |
|
1065 | 1056 | destpeer.close() |
|
1066 | 1057 | return srcpeer, destpeer |
|
1067 | 1058 | |
|
1068 | 1059 | |
|
1069 | 1060 | def _showstats(repo, stats, quietempty=False): |
|
1070 | 1061 | if quietempty and stats.isempty(): |
|
1071 | 1062 | return |
|
1072 | 1063 | repo.ui.status( |
|
1073 | 1064 | _( |
|
1074 | 1065 | b"%d files updated, %d files merged, " |
|
1075 | 1066 | b"%d files removed, %d files unresolved\n" |
|
1076 | 1067 | ) |
|
1077 | 1068 | % ( |
|
1078 | 1069 | stats.updatedcount, |
|
1079 | 1070 | stats.mergedcount, |
|
1080 | 1071 | stats.removedcount, |
|
1081 | 1072 | stats.unresolvedcount, |
|
1082 | 1073 | ) |
|
1083 | 1074 | ) |
|
1084 | 1075 | |
|
1085 | 1076 | |
|
1086 | 1077 | def updaterepo(repo, node, overwrite, updatecheck=None): |
|
1087 | 1078 | """Update the working directory to node. |
|
1088 | 1079 | |
|
1089 | 1080 | When overwrite is set, changes are clobbered, merged else |
|
1090 | 1081 | |
|
1091 | 1082 | returns stats (see pydoc mercurial.merge.applyupdates)""" |
|
1092 | 1083 | repo.ui.deprecwarn( |
|
1093 | 1084 | b'prefer merge.update() or merge.clean_update() over hg.updaterepo()', |
|
1094 | 1085 | b'5.7', |
|
1095 | 1086 | ) |
|
1096 | 1087 | return mergemod._update( |
|
1097 | 1088 | repo, |
|
1098 | 1089 | node, |
|
1099 | 1090 | branchmerge=False, |
|
1100 | 1091 | force=overwrite, |
|
1101 | 1092 | labels=[b'working copy', b'destination'], |
|
1102 | 1093 | updatecheck=updatecheck, |
|
1103 | 1094 | ) |
|
1104 | 1095 | |
|
1105 | 1096 | |
|
1106 | 1097 | def update(repo, node, quietempty=False, updatecheck=None): |
|
1107 | 1098 | """update the working directory to node""" |
|
1108 | 1099 | stats = mergemod.update(repo[node], updatecheck=updatecheck) |
|
1109 | 1100 | _showstats(repo, stats, quietempty) |
|
1110 | 1101 | if stats.unresolvedcount: |
|
1111 | 1102 | repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n")) |
|
1112 | 1103 | return stats.unresolvedcount > 0 |
|
1113 | 1104 | |
|
1114 | 1105 | |
|
1115 | 1106 | # naming conflict in clone() |
|
1116 | 1107 | _update = update |
|
1117 | 1108 | |
|
1118 | 1109 | |
|
1119 | 1110 | def clean(repo, node, show_stats=True, quietempty=False): |
|
1120 | 1111 | """forcibly switch the working directory to node, clobbering changes""" |
|
1121 | 1112 | stats = mergemod.clean_update(repo[node]) |
|
1122 | 1113 | assert stats.unresolvedcount == 0 |
|
1123 | 1114 | if show_stats: |
|
1124 | 1115 | _showstats(repo, stats, quietempty) |
|
1125 | 1116 | return False |
|
1126 | 1117 | |
|
1127 | 1118 | |
|
1128 | 1119 | # naming conflict in updatetotally() |
|
1129 | 1120 | _clean = clean |
|
1130 | 1121 | |
|
1131 | 1122 | _VALID_UPDATECHECKS = { |
|
1132 | 1123 | mergemod.UPDATECHECK_ABORT, |
|
1133 | 1124 | mergemod.UPDATECHECK_NONE, |
|
1134 | 1125 | mergemod.UPDATECHECK_LINEAR, |
|
1135 | 1126 | mergemod.UPDATECHECK_NO_CONFLICT, |
|
1136 | 1127 | } |
|
1137 | 1128 | |
|
1138 | 1129 | |
|
1139 | 1130 | def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): |
|
1140 | 1131 | """Update the working directory with extra care for non-file components |
|
1141 | 1132 | |
|
1142 | 1133 | This takes care of non-file components below: |
|
1143 | 1134 | |
|
1144 | 1135 | :bookmark: might be advanced or (in)activated |
|
1145 | 1136 | |
|
1146 | 1137 | This takes arguments below: |
|
1147 | 1138 | |
|
1148 | 1139 | :checkout: to which revision the working directory is updated |
|
1149 | 1140 | :brev: a name, which might be a bookmark to be activated after updating |
|
1150 | 1141 | :clean: whether changes in the working directory can be discarded |
|
1151 | 1142 | :updatecheck: how to deal with a dirty working directory |
|
1152 | 1143 | |
|
1153 | 1144 | Valid values for updatecheck are the UPDATECHECK_* constants |
|
1154 | 1145 | defined in the merge module. Passing `None` will result in using the |
|
1155 | 1146 | configured default. |
|
1156 | 1147 | |
|
1157 | 1148 | * ABORT: abort if the working directory is dirty |
|
1158 | 1149 | * NONE: don't check (merge working directory changes into destination) |
|
1159 | 1150 | * LINEAR: check that update is linear before merging working directory |
|
1160 | 1151 | changes into destination |
|
1161 | 1152 | * NO_CONFLICT: check that the update does not result in file merges |
|
1162 | 1153 | |
|
1163 | 1154 | This returns whether conflict is detected at updating or not. |
|
1164 | 1155 | """ |
|
1165 | 1156 | if updatecheck is None: |
|
1166 | 1157 | updatecheck = ui.config(b'commands', b'update.check') |
|
1167 | 1158 | if updatecheck not in _VALID_UPDATECHECKS: |
|
1168 | 1159 | # If not configured, or invalid value configured |
|
1169 | 1160 | updatecheck = mergemod.UPDATECHECK_LINEAR |
|
1170 | 1161 | if updatecheck not in _VALID_UPDATECHECKS: |
|
1171 | 1162 | raise ValueError( |
|
1172 | 1163 | r'Invalid updatecheck value %r (can accept %r)' |
|
1173 | 1164 | % (updatecheck, _VALID_UPDATECHECKS) |
|
1174 | 1165 | ) |
|
1175 | 1166 | with repo.wlock(): |
|
1176 | 1167 | movemarkfrom = None |
|
1177 | 1168 | warndest = False |
|
1178 | 1169 | if checkout is None: |
|
1179 | 1170 | updata = destutil.destupdate(repo, clean=clean) |
|
1180 | 1171 | checkout, movemarkfrom, brev = updata |
|
1181 | 1172 | warndest = True |
|
1182 | 1173 | |
|
1183 | 1174 | if clean: |
|
1184 | 1175 | ret = _clean(repo, checkout) |
|
1185 | 1176 | else: |
|
1186 | 1177 | if updatecheck == mergemod.UPDATECHECK_ABORT: |
|
1187 | 1178 | cmdutil.bailifchanged(repo, merge=False) |
|
1188 | 1179 | updatecheck = mergemod.UPDATECHECK_NONE |
|
1189 | 1180 | ret = _update(repo, checkout, updatecheck=updatecheck) |
|
1190 | 1181 | |
|
1191 | 1182 | if not ret and movemarkfrom: |
|
1192 | 1183 | if movemarkfrom == repo[b'.'].node(): |
|
1193 | 1184 | pass # no-op update |
|
1194 | 1185 | elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()): |
|
1195 | 1186 | b = ui.label(repo._activebookmark, b'bookmarks.active') |
|
1196 | 1187 | ui.status(_(b"updating bookmark %s\n") % b) |
|
1197 | 1188 | else: |
|
1198 | 1189 | # this can happen with a non-linear update |
|
1199 | 1190 | b = ui.label(repo._activebookmark, b'bookmarks') |
|
1200 | 1191 | ui.status(_(b"(leaving bookmark %s)\n") % b) |
|
1201 | 1192 | bookmarks.deactivate(repo) |
|
1202 | 1193 | elif brev in repo._bookmarks: |
|
1203 | 1194 | if brev != repo._activebookmark: |
|
1204 | 1195 | b = ui.label(brev, b'bookmarks.active') |
|
1205 | 1196 | ui.status(_(b"(activating bookmark %s)\n") % b) |
|
1206 | 1197 | bookmarks.activate(repo, brev) |
|
1207 | 1198 | elif brev: |
|
1208 | 1199 | if repo._activebookmark: |
|
1209 | 1200 | b = ui.label(repo._activebookmark, b'bookmarks') |
|
1210 | 1201 | ui.status(_(b"(leaving bookmark %s)\n") % b) |
|
1211 | 1202 | bookmarks.deactivate(repo) |
|
1212 | 1203 | |
|
1213 | 1204 | if warndest: |
|
1214 | 1205 | destutil.statusotherdests(ui, repo) |
|
1215 | 1206 | |
|
1216 | 1207 | return ret |
|
1217 | 1208 | |
|
1218 | 1209 | |
|
1219 | 1210 | def merge( |
|
1220 | 1211 | ctx, |
|
1221 | 1212 | force=False, |
|
1222 | 1213 | remind=True, |
|
1223 | 1214 | labels=None, |
|
1224 | 1215 | ): |
|
1225 | 1216 | """Branch merge with node, resolving changes. Return true if any |
|
1226 | 1217 | unresolved conflicts.""" |
|
1227 | 1218 | repo = ctx.repo() |
|
1228 | 1219 | stats = mergemod.merge(ctx, force=force, labels=labels) |
|
1229 | 1220 | _showstats(repo, stats) |
|
1230 | 1221 | if stats.unresolvedcount: |
|
1231 | 1222 | repo.ui.status( |
|
1232 | 1223 | _( |
|
1233 | 1224 | b"use 'hg resolve' to retry unresolved file merges " |
|
1234 | 1225 | b"or 'hg merge --abort' to abandon\n" |
|
1235 | 1226 | ) |
|
1236 | 1227 | ) |
|
1237 | 1228 | elif remind: |
|
1238 | 1229 | repo.ui.status(_(b"(branch merge, don't forget to commit)\n")) |
|
1239 | 1230 | return stats.unresolvedcount > 0 |
|
1240 | 1231 | |
|
1241 | 1232 | |
|
1242 | 1233 | def abortmerge(ui, repo): |
|
1243 | 1234 | ms = mergestatemod.mergestate.read(repo) |
|
1244 | 1235 | if ms.active(): |
|
1245 | 1236 | # there were conflicts |
|
1246 | 1237 | node = ms.localctx.hex() |
|
1247 | 1238 | else: |
|
1248 | 1239 | # there were no conficts, mergestate was not stored |
|
1249 | 1240 | node = repo[b'.'].hex() |
|
1250 | 1241 | |
|
1251 | 1242 | repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12]) |
|
1252 | 1243 | stats = mergemod.clean_update(repo[node]) |
|
1253 | 1244 | assert stats.unresolvedcount == 0 |
|
1254 | 1245 | _showstats(repo, stats) |
|
1255 | 1246 | |
|
1256 | 1247 | |
|
1257 | 1248 | def _incoming( |
|
1258 | 1249 | displaychlist, |
|
1259 | 1250 | subreporecurse, |
|
1260 | 1251 | ui, |
|
1261 | 1252 | repo, |
|
1262 | 1253 | source, |
|
1263 | 1254 | opts, |
|
1264 | 1255 | buffered=False, |
|
1265 | 1256 | subpath=None, |
|
1266 | 1257 | ): |
|
1267 | 1258 | """ |
|
1268 | 1259 | Helper for incoming / gincoming. |
|
1269 | 1260 | displaychlist gets called with |
|
1270 | 1261 | (remoterepo, incomingchangesetlist, displayer) parameters, |
|
1271 | 1262 | and is supposed to contain only code that can't be unified. |
|
1272 | 1263 | """ |
|
1273 | 1264 | srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch')) |
|
1274 | 1265 | srcs = list(srcs) |
|
1275 | 1266 | if len(srcs) != 1: |
|
1276 | 1267 | msg = _(b'for now, incoming supports only a single source, %d provided') |
|
1277 | 1268 | msg %= len(srcs) |
|
1278 | 1269 | raise error.Abort(msg) |
|
1279 | 1270 | source, branches = srcs[0] |
|
1280 | 1271 | if subpath is not None: |
|
1281 | 1272 | subpath = urlutil.url(subpath) |
|
1282 | 1273 | if subpath.isabs(): |
|
1283 | 1274 | source = bytes(subpath) |
|
1284 | 1275 | else: |
|
1285 | 1276 | p = urlutil.url(source) |
|
1286 | 1277 | p.path = os.path.normpath(b'%s/%s' % (p.path, subpath)) |
|
1287 | 1278 | source = bytes(p) |
|
1288 | 1279 | other = peer(repo, opts, source) |
|
1289 | 1280 | cleanupfn = other.close |
|
1290 | 1281 | try: |
|
1291 | 1282 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source)) |
|
1292 | 1283 | revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev')) |
|
1293 | 1284 | |
|
1294 | 1285 | if revs: |
|
1295 | 1286 | revs = [other.lookup(rev) for rev in revs] |
|
1296 | 1287 | other, chlist, cleanupfn = bundlerepo.getremotechanges( |
|
1297 | 1288 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] |
|
1298 | 1289 | ) |
|
1299 | 1290 | |
|
1300 | 1291 | if not chlist: |
|
1301 | 1292 | ui.status(_(b"no changes found\n")) |
|
1302 | 1293 | return subreporecurse() |
|
1303 | 1294 | ui.pager(b'incoming') |
|
1304 | 1295 | displayer = logcmdutil.changesetdisplayer( |
|
1305 | 1296 | ui, other, opts, buffered=buffered |
|
1306 | 1297 | ) |
|
1307 | 1298 | displaychlist(other, chlist, displayer) |
|
1308 | 1299 | displayer.close() |
|
1309 | 1300 | finally: |
|
1310 | 1301 | cleanupfn() |
|
1311 | 1302 | subreporecurse() |
|
1312 | 1303 | return 0 # exit code is zero since we found incoming changes |
|
1313 | 1304 | |
|
1314 | 1305 | |
|
1315 | 1306 | def incoming(ui, repo, source, opts, subpath=None): |
|
1316 | 1307 | def subreporecurse(): |
|
1317 | 1308 | ret = 1 |
|
1318 | 1309 | if opts.get(b'subrepos'): |
|
1319 | 1310 | ctx = repo[None] |
|
1320 | 1311 | for subpath in sorted(ctx.substate): |
|
1321 | 1312 | sub = ctx.sub(subpath) |
|
1322 | 1313 | ret = min(ret, sub.incoming(ui, source, opts)) |
|
1323 | 1314 | return ret |
|
1324 | 1315 | |
|
1325 | 1316 | def display(other, chlist, displayer): |
|
1326 | 1317 | limit = logcmdutil.getlimit(opts) |
|
1327 | 1318 | if opts.get(b'newest_first'): |
|
1328 | 1319 | chlist.reverse() |
|
1329 | 1320 | count = 0 |
|
1330 | 1321 | for n in chlist: |
|
1331 | 1322 | if limit is not None and count >= limit: |
|
1332 | 1323 | break |
|
1333 | 1324 | parents = [ |
|
1334 | 1325 | p for p in other.changelog.parents(n) if p != repo.nullid |
|
1335 | 1326 | ] |
|
1336 | 1327 | if opts.get(b'no_merges') and len(parents) == 2: |
|
1337 | 1328 | continue |
|
1338 | 1329 | count += 1 |
|
1339 | 1330 | displayer.show(other[n]) |
|
1340 | 1331 | |
|
1341 | 1332 | return _incoming( |
|
1342 | 1333 | display, subreporecurse, ui, repo, source, opts, subpath=subpath |
|
1343 | 1334 | ) |
|
1344 | 1335 | |
|
1345 | 1336 | |
|
1346 | 1337 | def _outgoing(ui, repo, dests, opts, subpath=None): |
|
1347 | 1338 | out = set() |
|
1348 | 1339 | others = [] |
|
1349 | 1340 | for path in urlutil.get_push_paths(repo, ui, dests): |
|
1350 | 1341 | dest = path.pushloc or path.loc |
|
1351 | 1342 | if subpath is not None: |
|
1352 | 1343 | subpath = urlutil.url(subpath) |
|
1353 | 1344 | if subpath.isabs(): |
|
1354 | 1345 | dest = bytes(subpath) |
|
1355 | 1346 | else: |
|
1356 | 1347 | p = urlutil.url(dest) |
|
1357 | 1348 | p.path = os.path.normpath(b'%s/%s' % (p.path, subpath)) |
|
1358 | 1349 | dest = bytes(p) |
|
1359 | 1350 | branches = path.branch, opts.get(b'branch') or [] |
|
1360 | 1351 | |
|
1361 | 1352 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest)) |
|
1362 | 1353 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev')) |
|
1363 | 1354 | if revs: |
|
1364 | 1355 | revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
|
1365 | 1356 | |
|
1366 | 1357 | other = peer(repo, opts, dest) |
|
1367 | 1358 | try: |
|
1368 | 1359 | outgoing = discovery.findcommonoutgoing( |
|
1369 | 1360 | repo, other, revs, force=opts.get(b'force') |
|
1370 | 1361 | ) |
|
1371 | 1362 | o = outgoing.missing |
|
1372 | 1363 | out.update(o) |
|
1373 | 1364 | if not o: |
|
1374 | 1365 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
|
1375 | 1366 | others.append(other) |
|
1376 | 1367 | except: # re-raises |
|
1377 | 1368 | other.close() |
|
1378 | 1369 | raise |
|
1379 | 1370 | # make sure this is ordered by revision number |
|
1380 | 1371 | outgoing_revs = list(out) |
|
1381 | 1372 | cl = repo.changelog |
|
1382 | 1373 | outgoing_revs.sort(key=cl.rev) |
|
1383 | 1374 | return outgoing_revs, others |
|
1384 | 1375 | |
|
1385 | 1376 | |
|
1386 | 1377 | def _outgoing_recurse(ui, repo, dests, opts): |
|
1387 | 1378 | ret = 1 |
|
1388 | 1379 | if opts.get(b'subrepos'): |
|
1389 | 1380 | ctx = repo[None] |
|
1390 | 1381 | for subpath in sorted(ctx.substate): |
|
1391 | 1382 | sub = ctx.sub(subpath) |
|
1392 | 1383 | ret = min(ret, sub.outgoing(ui, dests, opts)) |
|
1393 | 1384 | return ret |
|
1394 | 1385 | |
|
1395 | 1386 | |
|
1396 | 1387 | def _outgoing_filter(repo, revs, opts): |
|
1397 | 1388 | """apply revision filtering/ordering option for outgoing""" |
|
1398 | 1389 | limit = logcmdutil.getlimit(opts) |
|
1399 | 1390 | no_merges = opts.get(b'no_merges') |
|
1400 | 1391 | if opts.get(b'newest_first'): |
|
1401 | 1392 | revs.reverse() |
|
1402 | 1393 | if limit is None and not no_merges: |
|
1403 | 1394 | for r in revs: |
|
1404 | 1395 | yield r |
|
1405 | 1396 | return |
|
1406 | 1397 | |
|
1407 | 1398 | count = 0 |
|
1408 | 1399 | cl = repo.changelog |
|
1409 | 1400 | for n in revs: |
|
1410 | 1401 | if limit is not None and count >= limit: |
|
1411 | 1402 | break |
|
1412 | 1403 | parents = [p for p in cl.parents(n) if p != repo.nullid] |
|
1413 | 1404 | if no_merges and len(parents) == 2: |
|
1414 | 1405 | continue |
|
1415 | 1406 | count += 1 |
|
1416 | 1407 | yield n |
|
1417 | 1408 | |
|
1418 | 1409 | |
|
1419 | 1410 | def outgoing(ui, repo, dests, opts, subpath=None): |
|
1420 | 1411 | if opts.get(b'graph'): |
|
1421 | 1412 | logcmdutil.checkunsupportedgraphflags([], opts) |
|
1422 | 1413 | o, others = _outgoing(ui, repo, dests, opts, subpath=subpath) |
|
1423 | 1414 | ret = 1 |
|
1424 | 1415 | try: |
|
1425 | 1416 | if o: |
|
1426 | 1417 | ret = 0 |
|
1427 | 1418 | |
|
1428 | 1419 | if opts.get(b'graph'): |
|
1429 | 1420 | revdag = logcmdutil.graphrevs(repo, o, opts) |
|
1430 | 1421 | ui.pager(b'outgoing') |
|
1431 | 1422 | displayer = logcmdutil.changesetdisplayer( |
|
1432 | 1423 | ui, repo, opts, buffered=True |
|
1433 | 1424 | ) |
|
1434 | 1425 | logcmdutil.displaygraph( |
|
1435 | 1426 | ui, repo, revdag, displayer, graphmod.asciiedges |
|
1436 | 1427 | ) |
|
1437 | 1428 | else: |
|
1438 | 1429 | ui.pager(b'outgoing') |
|
1439 | 1430 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
1440 | 1431 | for n in _outgoing_filter(repo, o, opts): |
|
1441 | 1432 | displayer.show(repo[n]) |
|
1442 | 1433 | displayer.close() |
|
1443 | 1434 | for oth in others: |
|
1444 | 1435 | cmdutil.outgoinghooks(ui, repo, oth, opts, o) |
|
1445 | 1436 | ret = min(ret, _outgoing_recurse(ui, repo, dests, opts)) |
|
1446 | 1437 | return ret # exit code is zero since we found outgoing changes |
|
1447 | 1438 | finally: |
|
1448 | 1439 | for oth in others: |
|
1449 | 1440 | oth.close() |
|
1450 | 1441 | |
|
1451 | 1442 | |
|
1452 | 1443 | def verify(repo, level=None): |
|
1453 | 1444 | """verify the consistency of a repository""" |
|
1454 | 1445 | ret = verifymod.verify(repo, level=level) |
|
1455 | 1446 | |
|
1456 | 1447 | # Broken subrepo references in hidden csets don't seem worth worrying about, |
|
1457 | 1448 | # since they can't be pushed/pulled, and --hidden can be used if they are a |
|
1458 | 1449 | # concern. |
|
1459 | 1450 | |
|
1460 | 1451 | # pathto() is needed for -R case |
|
1461 | 1452 | revs = repo.revs( |
|
1462 | 1453 | b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate') |
|
1463 | 1454 | ) |
|
1464 | 1455 | |
|
1465 | 1456 | if revs: |
|
1466 | 1457 | repo.ui.status(_(b'checking subrepo links\n')) |
|
1467 | 1458 | for rev in revs: |
|
1468 | 1459 | ctx = repo[rev] |
|
1469 | 1460 | try: |
|
1470 | 1461 | for subpath in ctx.substate: |
|
1471 | 1462 | try: |
|
1472 | 1463 | ret = ( |
|
1473 | 1464 | ctx.sub(subpath, allowcreate=False).verify() or ret |
|
1474 | 1465 | ) |
|
1475 | 1466 | except error.RepoError as e: |
|
1476 | 1467 | repo.ui.warn(b'%d: %s\n' % (rev, e)) |
|
1477 | 1468 | except Exception: |
|
1478 | 1469 | repo.ui.warn( |
|
1479 | 1470 | _(b'.hgsubstate is corrupt in revision %s\n') |
|
1480 | 1471 | % short(ctx.node()) |
|
1481 | 1472 | ) |
|
1482 | 1473 | |
|
1483 | 1474 | return ret |
|
1484 | 1475 | |
|
1485 | 1476 | |
|
1486 | 1477 | def remoteui(src, opts): |
|
1487 | 1478 | """build a remote ui from ui or repo and opts""" |
|
1488 | 1479 | if util.safehasattr(src, b'baseui'): # looks like a repository |
|
1489 | 1480 | dst = src.baseui.copy() # drop repo-specific config |
|
1490 | 1481 | src = src.ui # copy target options from repo |
|
1491 | 1482 | else: # assume it's a global ui object |
|
1492 | 1483 | dst = src.copy() # keep all global options |
|
1493 | 1484 | |
|
1494 | 1485 | # copy ssh-specific options |
|
1495 | 1486 | for o in b'ssh', b'remotecmd': |
|
1496 | 1487 | v = opts.get(o) or src.config(b'ui', o) |
|
1497 | 1488 | if v: |
|
1498 | 1489 | dst.setconfig(b"ui", o, v, b'copied') |
|
1499 | 1490 | |
|
1500 | 1491 | # copy bundle-specific options |
|
1501 | 1492 | r = src.config(b'bundle', b'mainreporoot') |
|
1502 | 1493 | if r: |
|
1503 | 1494 | dst.setconfig(b'bundle', b'mainreporoot', r, b'copied') |
|
1504 | 1495 | |
|
1505 | 1496 | # copy selected local settings to the remote ui |
|
1506 | 1497 | for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'): |
|
1507 | 1498 | for key, val in src.configitems(sect): |
|
1508 | 1499 | dst.setconfig(sect, key, val, b'copied') |
|
1509 | 1500 | v = src.config(b'web', b'cacerts') |
|
1510 | 1501 | if v: |
|
1511 | 1502 | dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied') |
|
1512 | 1503 | |
|
1513 | 1504 | return dst |
|
1514 | 1505 | |
|
1515 | 1506 | |
|
1516 | 1507 | # Files of interest |
|
1517 | 1508 | # Used to check if the repository has changed looking at mtime and size of |
|
1518 | 1509 | # these files. |
|
1519 | 1510 | foi = [ |
|
1520 | 1511 | (b'spath', b'00changelog.i'), |
|
1521 | 1512 | (b'spath', b'phaseroots'), # ! phase can change content at the same size |
|
1522 | 1513 | (b'spath', b'obsstore'), |
|
1523 | 1514 | (b'path', b'bookmarks'), # ! bookmark can change content at the same size |
|
1524 | 1515 | ] |
|
1525 | 1516 | |
|
1526 | 1517 | |
|
1527 | 1518 | class cachedlocalrepo(object): |
|
1528 | 1519 | """Holds a localrepository that can be cached and reused.""" |
|
1529 | 1520 | |
|
1530 | 1521 | def __init__(self, repo): |
|
1531 | 1522 | """Create a new cached repo from an existing repo. |
|
1532 | 1523 | |
|
1533 | 1524 | We assume the passed in repo was recently created. If the |
|
1534 | 1525 | repo has changed between when it was created and when it was |
|
1535 | 1526 | turned into a cache, it may not refresh properly. |
|
1536 | 1527 | """ |
|
1537 | 1528 | assert isinstance(repo, localrepo.localrepository) |
|
1538 | 1529 | self._repo = repo |
|
1539 | 1530 | self._state, self.mtime = self._repostate() |
|
1540 | 1531 | self._filtername = repo.filtername |
|
1541 | 1532 | |
|
1542 | 1533 | def fetch(self): |
|
1543 | 1534 | """Refresh (if necessary) and return a repository. |
|
1544 | 1535 | |
|
1545 | 1536 | If the cached instance is out of date, it will be recreated |
|
1546 | 1537 | automatically and returned. |
|
1547 | 1538 | |
|
1548 | 1539 | Returns a tuple of the repo and a boolean indicating whether a new |
|
1549 | 1540 | repo instance was created. |
|
1550 | 1541 | """ |
|
1551 | 1542 | # We compare the mtimes and sizes of some well-known files to |
|
1552 | 1543 | # determine if the repo changed. This is not precise, as mtimes |
|
1553 | 1544 | # are susceptible to clock skew and imprecise filesystems and |
|
1554 | 1545 | # file content can change while maintaining the same size. |
|
1555 | 1546 | |
|
1556 | 1547 | state, mtime = self._repostate() |
|
1557 | 1548 | if state == self._state: |
|
1558 | 1549 | return self._repo, False |
|
1559 | 1550 | |
|
1560 | 1551 | repo = repository(self._repo.baseui, self._repo.url()) |
|
1561 | 1552 | if self._filtername: |
|
1562 | 1553 | self._repo = repo.filtered(self._filtername) |
|
1563 | 1554 | else: |
|
1564 | 1555 | self._repo = repo.unfiltered() |
|
1565 | 1556 | self._state = state |
|
1566 | 1557 | self.mtime = mtime |
|
1567 | 1558 | |
|
1568 | 1559 | return self._repo, True |
|
1569 | 1560 | |
|
1570 | 1561 | def _repostate(self): |
|
1571 | 1562 | state = [] |
|
1572 | 1563 | maxmtime = -1 |
|
1573 | 1564 | for attr, fname in foi: |
|
1574 | 1565 | prefix = getattr(self._repo, attr) |
|
1575 | 1566 | p = os.path.join(prefix, fname) |
|
1576 | 1567 | try: |
|
1577 | 1568 | st = os.stat(p) |
|
1578 | 1569 | except OSError: |
|
1579 | 1570 | st = os.stat(prefix) |
|
1580 | 1571 | state.append((st[stat.ST_MTIME], st.st_size)) |
|
1581 | 1572 | maxmtime = max(maxmtime, st[stat.ST_MTIME]) |
|
1582 | 1573 | |
|
1583 | 1574 | return tuple(state), maxmtime |
|
1584 | 1575 | |
|
1585 | 1576 | def copy(self): |
|
1586 | 1577 | """Obtain a copy of this class instance. |
|
1587 | 1578 | |
|
1588 | 1579 | A new localrepository instance is obtained. The new instance should be |
|
1589 | 1580 | completely independent of the original. |
|
1590 | 1581 | """ |
|
1591 | 1582 | repo = repository(self._repo.baseui, self._repo.origroot) |
|
1592 | 1583 | if self._filtername: |
|
1593 | 1584 | repo = repo.filtered(self._filtername) |
|
1594 | 1585 | else: |
|
1595 | 1586 | repo = repo.unfiltered() |
|
1596 | 1587 | c = cachedlocalrepo(repo) |
|
1597 | 1588 | c._state = self._state |
|
1598 | 1589 | c.mtime = self.mtime |
|
1599 | 1590 | return c |
@@ -1,773 +1,879 b'' | |||
|
1 | 1 | # streamclone.py - producing and consuming streaming repository data |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import contextlib |
|
11 | import errno | |
|
11 | 12 | import os |
|
12 | 13 | import struct |
|
13 | 14 | |
|
14 | 15 | from .i18n import _ |
|
15 | 16 | from .pycompat import open |
|
16 | 17 | from .interfaces import repository |
|
17 | 18 | from . import ( |
|
19 | bookmarks, | |
|
18 | 20 | cacheutil, |
|
19 | 21 | error, |
|
20 | 22 | narrowspec, |
|
21 | 23 | phases, |
|
22 | 24 | pycompat, |
|
23 | 25 | requirements as requirementsmod, |
|
24 | 26 | scmutil, |
|
25 | 27 | store, |
|
26 | 28 | util, |
|
27 | 29 | ) |
|
30 | from .utils import ( | |
|
31 | stringutil, | |
|
32 | ) | |
|
28 | 33 | |
|
29 | 34 | |
|
30 | 35 | def canperformstreamclone(pullop, bundle2=False): |
|
31 | 36 | """Whether it is possible to perform a streaming clone as part of pull. |
|
32 | 37 | |
|
33 | 38 | ``bundle2`` will cause the function to consider stream clone through |
|
34 | 39 | bundle2 and only through bundle2. |
|
35 | 40 | |
|
36 | 41 | Returns a tuple of (supported, requirements). ``supported`` is True if |
|
37 | 42 | streaming clone is supported and False otherwise. ``requirements`` is |
|
38 | 43 | a set of repo requirements from the remote, or ``None`` if stream clone |
|
39 | 44 | isn't supported. |
|
40 | 45 | """ |
|
41 | 46 | repo = pullop.repo |
|
42 | 47 | remote = pullop.remote |
|
43 | 48 | |
|
44 | 49 | bundle2supported = False |
|
45 | 50 | if pullop.canusebundle2: |
|
46 | 51 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): |
|
47 | 52 | bundle2supported = True |
|
48 | 53 | # else |
|
49 | 54 | # Server doesn't support bundle2 stream clone or doesn't support |
|
50 | 55 | # the versions we support. Fall back and possibly allow legacy. |
|
51 | 56 | |
|
52 | 57 | # Ensures legacy code path uses available bundle2. |
|
53 | 58 | if bundle2supported and not bundle2: |
|
54 | 59 | return False, None |
|
55 | 60 | # Ensures bundle2 doesn't try to do a stream clone if it isn't supported. |
|
56 | 61 | elif bundle2 and not bundle2supported: |
|
57 | 62 | return False, None |
|
58 | 63 | |
|
59 | 64 | # Streaming clone only works on empty repositories. |
|
60 | 65 | if len(repo): |
|
61 | 66 | return False, None |
|
62 | 67 | |
|
63 | 68 | # Streaming clone only works if all data is being requested. |
|
64 | 69 | if pullop.heads: |
|
65 | 70 | return False, None |
|
66 | 71 | |
|
67 | 72 | streamrequested = pullop.streamclonerequested |
|
68 | 73 | |
|
69 | 74 | # If we don't have a preference, let the server decide for us. This |
|
70 | 75 | # likely only comes into play in LANs. |
|
71 | 76 | if streamrequested is None: |
|
72 | 77 | # The server can advertise whether to prefer streaming clone. |
|
73 | 78 | streamrequested = remote.capable(b'stream-preferred') |
|
74 | 79 | |
|
75 | 80 | if not streamrequested: |
|
76 | 81 | return False, None |
|
77 | 82 | |
|
78 | 83 | # In order for stream clone to work, the client has to support all the |
|
79 | 84 | # requirements advertised by the server. |
|
80 | 85 | # |
|
81 | 86 | # The server advertises its requirements via the "stream" and "streamreqs" |
|
82 | 87 | # capability. "stream" (a value-less capability) is advertised if and only |
|
83 | 88 | # if the only requirement is "revlogv1." Else, the "streamreqs" capability |
|
84 | 89 | # is advertised and contains a comma-delimited list of requirements. |
|
85 | 90 | requirements = set() |
|
86 | 91 | if remote.capable(b'stream'): |
|
87 | 92 | requirements.add(requirementsmod.REVLOGV1_REQUIREMENT) |
|
88 | 93 | else: |
|
89 | 94 | streamreqs = remote.capable(b'streamreqs') |
|
90 | 95 | # This is weird and shouldn't happen with modern servers. |
|
91 | 96 | if not streamreqs: |
|
92 | 97 | pullop.repo.ui.warn( |
|
93 | 98 | _( |
|
94 | 99 | b'warning: stream clone requested but server has them ' |
|
95 | 100 | b'disabled\n' |
|
96 | 101 | ) |
|
97 | 102 | ) |
|
98 | 103 | return False, None |
|
99 | 104 | |
|
100 | 105 | streamreqs = set(streamreqs.split(b',')) |
|
101 | 106 | # Server requires something we don't support. Bail. |
|
102 | 107 | missingreqs = streamreqs - repo.supportedformats |
|
103 | 108 | if missingreqs: |
|
104 | 109 | pullop.repo.ui.warn( |
|
105 | 110 | _( |
|
106 | 111 | b'warning: stream clone requested but client is missing ' |
|
107 | 112 | b'requirements: %s\n' |
|
108 | 113 | ) |
|
109 | 114 | % b', '.join(sorted(missingreqs)) |
|
110 | 115 | ) |
|
111 | 116 | pullop.repo.ui.warn( |
|
112 | 117 | _( |
|
113 | 118 | b'(see https://www.mercurial-scm.org/wiki/MissingRequirement ' |
|
114 | 119 | b'for more information)\n' |
|
115 | 120 | ) |
|
116 | 121 | ) |
|
117 | 122 | return False, None |
|
118 | 123 | requirements = streamreqs |
|
119 | 124 | |
|
120 | 125 | return True, requirements |
|
121 | 126 | |
|
122 | 127 | |
|
123 | 128 | def maybeperformlegacystreamclone(pullop): |
|
124 | 129 | """Possibly perform a legacy stream clone operation. |
|
125 | 130 | |
|
126 | 131 | Legacy stream clones are performed as part of pull but before all other |
|
127 | 132 | operations. |
|
128 | 133 | |
|
129 | 134 | A legacy stream clone will not be performed if a bundle2 stream clone is |
|
130 | 135 | supported. |
|
131 | 136 | """ |
|
132 | 137 | from . import localrepo |
|
133 | 138 | |
|
134 | 139 | supported, requirements = canperformstreamclone(pullop) |
|
135 | 140 | |
|
136 | 141 | if not supported: |
|
137 | 142 | return |
|
138 | 143 | |
|
139 | 144 | repo = pullop.repo |
|
140 | 145 | remote = pullop.remote |
|
141 | 146 | |
|
142 | 147 | # Save remote branchmap. We will use it later to speed up branchcache |
|
143 | 148 | # creation. |
|
144 | 149 | rbranchmap = None |
|
145 | 150 | if remote.capable(b'branchmap'): |
|
146 | 151 | with remote.commandexecutor() as e: |
|
147 | 152 | rbranchmap = e.callcommand(b'branchmap', {}).result() |
|
148 | 153 | |
|
149 | 154 | repo.ui.status(_(b'streaming all changes\n')) |
|
150 | 155 | |
|
151 | 156 | with remote.commandexecutor() as e: |
|
152 | 157 | fp = e.callcommand(b'stream_out', {}).result() |
|
153 | 158 | |
|
154 | 159 | # TODO strictly speaking, this code should all be inside the context |
|
155 | 160 | # manager because the context manager is supposed to ensure all wire state |
|
156 | 161 | # is flushed when exiting. But the legacy peers don't do this, so it |
|
157 | 162 | # doesn't matter. |
|
158 | 163 | l = fp.readline() |
|
159 | 164 | try: |
|
160 | 165 | resp = int(l) |
|
161 | 166 | except ValueError: |
|
162 | 167 | raise error.ResponseError( |
|
163 | 168 | _(b'unexpected response from remote server:'), l |
|
164 | 169 | ) |
|
165 | 170 | if resp == 1: |
|
166 | 171 | raise error.Abort(_(b'operation forbidden by server')) |
|
167 | 172 | elif resp == 2: |
|
168 | 173 | raise error.Abort(_(b'locking the remote repository failed')) |
|
169 | 174 | elif resp != 0: |
|
170 | 175 | raise error.Abort(_(b'the server sent an unknown error code')) |
|
171 | 176 | |
|
172 | 177 | l = fp.readline() |
|
173 | 178 | try: |
|
174 | 179 | filecount, bytecount = map(int, l.split(b' ', 1)) |
|
175 | 180 | except (ValueError, TypeError): |
|
176 | 181 | raise error.ResponseError( |
|
177 | 182 | _(b'unexpected response from remote server:'), l |
|
178 | 183 | ) |
|
179 | 184 | |
|
180 | 185 | with repo.lock(): |
|
181 | 186 | consumev1(repo, fp, filecount, bytecount) |
|
182 | 187 | |
|
183 | 188 | # new requirements = old non-format requirements + |
|
184 | 189 | # new format-related remote requirements |
|
185 | 190 | # requirements from the streamed-in repository |
|
186 | 191 | repo.requirements = requirements | ( |
|
187 | 192 | repo.requirements - repo.supportedformats |
|
188 | 193 | ) |
|
189 | 194 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
190 | 195 | repo.ui, repo.requirements, repo.features |
|
191 | 196 | ) |
|
192 | 197 | scmutil.writereporequirements(repo) |
|
193 | 198 | |
|
194 | 199 | if rbranchmap: |
|
195 | 200 | repo._branchcaches.replace(repo, rbranchmap) |
|
196 | 201 | |
|
197 | 202 | repo.invalidate() |
|
198 | 203 | |
|
199 | 204 | |
|
200 | 205 | def allowservergeneration(repo): |
|
201 | 206 | """Whether streaming clones are allowed from the server.""" |
|
202 | 207 | if repository.REPO_FEATURE_STREAM_CLONE not in repo.features: |
|
203 | 208 | return False |
|
204 | 209 | |
|
205 | 210 | if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True): |
|
206 | 211 | return False |
|
207 | 212 | |
|
208 | 213 | # The way stream clone works makes it impossible to hide secret changesets. |
|
209 | 214 | # So don't allow this by default. |
|
210 | 215 | secret = phases.hassecret(repo) |
|
211 | 216 | if secret: |
|
212 | 217 | return repo.ui.configbool(b'server', b'uncompressedallowsecret') |
|
213 | 218 | |
|
214 | 219 | return True |
|
215 | 220 | |
|
216 | 221 | |
|
217 | 222 | # This is it's own function so extensions can override it. |
|
218 | 223 | def _walkstreamfiles(repo, matcher=None): |
|
219 | 224 | return repo.store.walk(matcher) |
|
220 | 225 | |
|
221 | 226 | |
|
222 | 227 | def generatev1(repo): |
|
223 | 228 | """Emit content for version 1 of a streaming clone. |
|
224 | 229 | |
|
225 | 230 | This returns a 3-tuple of (file count, byte size, data iterator). |
|
226 | 231 | |
|
227 | 232 | The data iterator consists of N entries for each file being transferred. |
|
228 | 233 | Each file entry starts as a line with the file name and integer size |
|
229 | 234 | delimited by a null byte. |
|
230 | 235 | |
|
231 | 236 | The raw file data follows. Following the raw file data is the next file |
|
232 | 237 | entry, or EOF. |
|
233 | 238 | |
|
234 | 239 | When used on the wire protocol, an additional line indicating protocol |
|
235 | 240 | success will be prepended to the stream. This function is not responsible |
|
236 | 241 | for adding it. |
|
237 | 242 | |
|
238 | 243 | This function will obtain a repository lock to ensure a consistent view of |
|
239 | 244 | the store is captured. It therefore may raise LockError. |
|
240 | 245 | """ |
|
241 | 246 | entries = [] |
|
242 | 247 | total_bytes = 0 |
|
243 | 248 | # Get consistent snapshot of repo, lock during scan. |
|
244 | 249 | with repo.lock(): |
|
245 | 250 | repo.ui.debug(b'scanning\n') |
|
246 | 251 | for file_type, name, ename, size in _walkstreamfiles(repo): |
|
247 | 252 | if size: |
|
248 | 253 | entries.append((name, size)) |
|
249 | 254 | total_bytes += size |
|
250 | 255 | _test_sync_point_walk_1(repo) |
|
251 | 256 | _test_sync_point_walk_2(repo) |
|
252 | 257 | |
|
253 | 258 | repo.ui.debug( |
|
254 | 259 | b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes) |
|
255 | 260 | ) |
|
256 | 261 | |
|
257 | 262 | svfs = repo.svfs |
|
258 | 263 | debugflag = repo.ui.debugflag |
|
259 | 264 | |
|
260 | 265 | def emitrevlogdata(): |
|
261 | 266 | for name, size in entries: |
|
262 | 267 | if debugflag: |
|
263 | 268 | repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size)) |
|
264 | 269 | # partially encode name over the wire for backwards compat |
|
265 | 270 | yield b'%s\0%d\n' % (store.encodedir(name), size) |
|
266 | 271 | # auditing at this stage is both pointless (paths are already |
|
267 | 272 | # trusted by the local repo) and expensive |
|
268 | 273 | with svfs(name, b'rb', auditpath=False) as fp: |
|
269 | 274 | if size <= 65536: |
|
270 | 275 | yield fp.read(size) |
|
271 | 276 | else: |
|
272 | 277 | for chunk in util.filechunkiter(fp, limit=size): |
|
273 | 278 | yield chunk |
|
274 | 279 | |
|
275 | 280 | return len(entries), total_bytes, emitrevlogdata() |
|
276 | 281 | |
|
277 | 282 | |
|
278 | 283 | def generatev1wireproto(repo): |
|
279 | 284 | """Emit content for version 1 of streaming clone suitable for the wire. |
|
280 | 285 | |
|
281 | 286 | This is the data output from ``generatev1()`` with 2 header lines. The |
|
282 | 287 | first line indicates overall success. The 2nd contains the file count and |
|
283 | 288 | byte size of payload. |
|
284 | 289 | |
|
285 | 290 | The success line contains "0" for success, "1" for stream generation not |
|
286 | 291 | allowed, and "2" for error locking the repository (possibly indicating |
|
287 | 292 | a permissions error for the server process). |
|
288 | 293 | """ |
|
289 | 294 | if not allowservergeneration(repo): |
|
290 | 295 | yield b'1\n' |
|
291 | 296 | return |
|
292 | 297 | |
|
293 | 298 | try: |
|
294 | 299 | filecount, bytecount, it = generatev1(repo) |
|
295 | 300 | except error.LockError: |
|
296 | 301 | yield b'2\n' |
|
297 | 302 | return |
|
298 | 303 | |
|
299 | 304 | # Indicates successful response. |
|
300 | 305 | yield b'0\n' |
|
301 | 306 | yield b'%d %d\n' % (filecount, bytecount) |
|
302 | 307 | for chunk in it: |
|
303 | 308 | yield chunk |
|
304 | 309 | |
|
305 | 310 | |
|
306 | 311 | def generatebundlev1(repo, compression=b'UN'): |
|
307 | 312 | """Emit content for version 1 of a stream clone bundle. |
|
308 | 313 | |
|
309 | 314 | The first 4 bytes of the output ("HGS1") denote this as stream clone |
|
310 | 315 | bundle version 1. |
|
311 | 316 | |
|
312 | 317 | The next 2 bytes indicate the compression type. Only "UN" is currently |
|
313 | 318 | supported. |
|
314 | 319 | |
|
315 | 320 | The next 16 bytes are two 64-bit big endian unsigned integers indicating |
|
316 | 321 | file count and byte count, respectively. |
|
317 | 322 | |
|
318 | 323 | The next 2 bytes is a 16-bit big endian unsigned short declaring the length |
|
319 | 324 | of the requirements string, including a trailing \0. The following N bytes |
|
320 | 325 | are the requirements string, which is ASCII containing a comma-delimited |
|
321 | 326 | list of repo requirements that are needed to support the data. |
|
322 | 327 | |
|
323 | 328 | The remaining content is the output of ``generatev1()`` (which may be |
|
324 | 329 | compressed in the future). |
|
325 | 330 | |
|
326 | 331 | Returns a tuple of (requirements, data generator). |
|
327 | 332 | """ |
|
328 | 333 | if compression != b'UN': |
|
329 | 334 | raise ValueError(b'we do not support the compression argument yet') |
|
330 | 335 | |
|
331 | 336 | requirements = repo.requirements & repo.supportedformats |
|
332 | 337 | requires = b','.join(sorted(requirements)) |
|
333 | 338 | |
|
334 | 339 | def gen(): |
|
335 | 340 | yield b'HGS1' |
|
336 | 341 | yield compression |
|
337 | 342 | |
|
338 | 343 | filecount, bytecount, it = generatev1(repo) |
|
339 | 344 | repo.ui.status( |
|
340 | 345 | _(b'writing %d bytes for %d files\n') % (bytecount, filecount) |
|
341 | 346 | ) |
|
342 | 347 | |
|
343 | 348 | yield struct.pack(b'>QQ', filecount, bytecount) |
|
344 | 349 | yield struct.pack(b'>H', len(requires) + 1) |
|
345 | 350 | yield requires + b'\0' |
|
346 | 351 | |
|
347 | 352 | # This is where we'll add compression in the future. |
|
348 | 353 | assert compression == b'UN' |
|
349 | 354 | |
|
350 | 355 | progress = repo.ui.makeprogress( |
|
351 | 356 | _(b'bundle'), total=bytecount, unit=_(b'bytes') |
|
352 | 357 | ) |
|
353 | 358 | progress.update(0) |
|
354 | 359 | |
|
355 | 360 | for chunk in it: |
|
356 | 361 | progress.increment(step=len(chunk)) |
|
357 | 362 | yield chunk |
|
358 | 363 | |
|
359 | 364 | progress.complete() |
|
360 | 365 | |
|
361 | 366 | return requirements, gen() |
|
362 | 367 | |
|
363 | 368 | |
|
364 | 369 | def consumev1(repo, fp, filecount, bytecount): |
|
365 | 370 | """Apply the contents from version 1 of a streaming clone file handle. |
|
366 | 371 | |
|
367 | 372 | This takes the output from "stream_out" and applies it to the specified |
|
368 | 373 | repository. |
|
369 | 374 | |
|
370 | 375 | Like "stream_out," the status line added by the wire protocol is not |
|
371 | 376 | handled by this function. |
|
372 | 377 | """ |
|
373 | 378 | with repo.lock(): |
|
374 | 379 | repo.ui.status( |
|
375 | 380 | _(b'%d files to transfer, %s of data\n') |
|
376 | 381 | % (filecount, util.bytecount(bytecount)) |
|
377 | 382 | ) |
|
378 | 383 | progress = repo.ui.makeprogress( |
|
379 | 384 | _(b'clone'), total=bytecount, unit=_(b'bytes') |
|
380 | 385 | ) |
|
381 | 386 | progress.update(0) |
|
382 | 387 | start = util.timer() |
|
383 | 388 | |
|
384 | 389 | # TODO: get rid of (potential) inconsistency |
|
385 | 390 | # |
|
386 | 391 | # If transaction is started and any @filecache property is |
|
387 | 392 | # changed at this point, it causes inconsistency between |
|
388 | 393 | # in-memory cached property and streamclone-ed file on the |
|
389 | 394 | # disk. Nested transaction prevents transaction scope "clone" |
|
390 | 395 | # below from writing in-memory changes out at the end of it, |
|
391 | 396 | # even though in-memory changes are discarded at the end of it |
|
392 | 397 | # regardless of transaction nesting. |
|
393 | 398 | # |
|
394 | 399 | # But transaction nesting can't be simply prohibited, because |
|
395 | 400 | # nesting occurs also in ordinary case (e.g. enabling |
|
396 | 401 | # clonebundles). |
|
397 | 402 | |
|
398 | 403 | with repo.transaction(b'clone'): |
|
399 | 404 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): |
|
400 | 405 | for i in pycompat.xrange(filecount): |
|
401 | 406 | # XXX doesn't support '\n' or '\r' in filenames |
|
402 | 407 | l = fp.readline() |
|
403 | 408 | try: |
|
404 | 409 | name, size = l.split(b'\0', 1) |
|
405 | 410 | size = int(size) |
|
406 | 411 | except (ValueError, TypeError): |
|
407 | 412 | raise error.ResponseError( |
|
408 | 413 | _(b'unexpected response from remote server:'), l |
|
409 | 414 | ) |
|
410 | 415 | if repo.ui.debugflag: |
|
411 | 416 | repo.ui.debug( |
|
412 | 417 | b'adding %s (%s)\n' % (name, util.bytecount(size)) |
|
413 | 418 | ) |
|
414 | 419 | # for backwards compat, name was partially encoded |
|
415 | 420 | path = store.decodedir(name) |
|
416 | 421 | with repo.svfs(path, b'w', backgroundclose=True) as ofp: |
|
417 | 422 | for chunk in util.filechunkiter(fp, limit=size): |
|
418 | 423 | progress.increment(step=len(chunk)) |
|
419 | 424 | ofp.write(chunk) |
|
420 | 425 | |
|
421 | 426 | # force @filecache properties to be reloaded from |
|
422 | 427 | # streamclone-ed file at next access |
|
423 | 428 | repo.invalidate(clearfilecache=True) |
|
424 | 429 | |
|
425 | 430 | elapsed = util.timer() - start |
|
426 | 431 | if elapsed <= 0: |
|
427 | 432 | elapsed = 0.001 |
|
428 | 433 | progress.complete() |
|
429 | 434 | repo.ui.status( |
|
430 | 435 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
431 | 436 | % ( |
|
432 | 437 | util.bytecount(bytecount), |
|
433 | 438 | elapsed, |
|
434 | 439 | util.bytecount(bytecount / elapsed), |
|
435 | 440 | ) |
|
436 | 441 | ) |
|
437 | 442 | |
|
438 | 443 | |
|
439 | 444 | def readbundle1header(fp): |
|
440 | 445 | compression = fp.read(2) |
|
441 | 446 | if compression != b'UN': |
|
442 | 447 | raise error.Abort( |
|
443 | 448 | _( |
|
444 | 449 | b'only uncompressed stream clone bundles are ' |
|
445 | 450 | b'supported; got %s' |
|
446 | 451 | ) |
|
447 | 452 | % compression |
|
448 | 453 | ) |
|
449 | 454 | |
|
450 | 455 | filecount, bytecount = struct.unpack(b'>QQ', fp.read(16)) |
|
451 | 456 | requireslen = struct.unpack(b'>H', fp.read(2))[0] |
|
452 | 457 | requires = fp.read(requireslen) |
|
453 | 458 | |
|
454 | 459 | if not requires.endswith(b'\0'): |
|
455 | 460 | raise error.Abort( |
|
456 | 461 | _( |
|
457 | 462 | b'malformed stream clone bundle: ' |
|
458 | 463 | b'requirements not properly encoded' |
|
459 | 464 | ) |
|
460 | 465 | ) |
|
461 | 466 | |
|
462 | 467 | requirements = set(requires.rstrip(b'\0').split(b',')) |
|
463 | 468 | |
|
464 | 469 | return filecount, bytecount, requirements |
|
465 | 470 | |
|
466 | 471 | |
|
467 | 472 | def applybundlev1(repo, fp): |
|
468 | 473 | """Apply the content from a stream clone bundle version 1. |
|
469 | 474 | |
|
470 | 475 | We assume the 4 byte header has been read and validated and the file handle |
|
471 | 476 | is at the 2 byte compression identifier. |
|
472 | 477 | """ |
|
473 | 478 | if len(repo): |
|
474 | 479 | raise error.Abort( |
|
475 | 480 | _(b'cannot apply stream clone bundle on non-empty repo') |
|
476 | 481 | ) |
|
477 | 482 | |
|
478 | 483 | filecount, bytecount, requirements = readbundle1header(fp) |
|
479 | 484 | missingreqs = requirements - repo.supportedformats |
|
480 | 485 | if missingreqs: |
|
481 | 486 | raise error.Abort( |
|
482 | 487 | _(b'unable to apply stream clone: unsupported format: %s') |
|
483 | 488 | % b', '.join(sorted(missingreqs)) |
|
484 | 489 | ) |
|
485 | 490 | |
|
486 | 491 | consumev1(repo, fp, filecount, bytecount) |
|
487 | 492 | |
|
488 | 493 | |
|
489 | 494 | class streamcloneapplier(object): |
|
490 | 495 | """Class to manage applying streaming clone bundles. |
|
491 | 496 | |
|
492 | 497 | We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle |
|
493 | 498 | readers to perform bundle type-specific functionality. |
|
494 | 499 | """ |
|
495 | 500 | |
|
496 | 501 | def __init__(self, fh): |
|
497 | 502 | self._fh = fh |
|
498 | 503 | |
|
499 | 504 | def apply(self, repo): |
|
500 | 505 | return applybundlev1(repo, self._fh) |
|
501 | 506 | |
|
502 | 507 | |
|
503 | 508 | # type of file to stream |
|
504 | 509 | _fileappend = 0 # append only file |
|
505 | 510 | _filefull = 1 # full snapshot file |
|
506 | 511 | |
|
507 | 512 | # Source of the file |
|
508 | 513 | _srcstore = b's' # store (svfs) |
|
509 | 514 | _srccache = b'c' # cache (cache) |
|
510 | 515 | |
|
511 | 516 | # This is it's own function so extensions can override it. |
|
512 | 517 | def _walkstreamfullstorefiles(repo): |
|
513 | 518 | """list snapshot file from the store""" |
|
514 | 519 | fnames = [] |
|
515 | 520 | if not repo.publishing(): |
|
516 | 521 | fnames.append(b'phaseroots') |
|
517 | 522 | return fnames |
|
518 | 523 | |
|
519 | 524 | |
|
520 | 525 | def _filterfull(entry, copy, vfsmap): |
|
521 | 526 | """actually copy the snapshot files""" |
|
522 | 527 | src, name, ftype, data = entry |
|
523 | 528 | if ftype != _filefull: |
|
524 | 529 | return entry |
|
525 | 530 | return (src, name, ftype, copy(vfsmap[src].join(name))) |
|
526 | 531 | |
|
527 | 532 | |
|
528 | 533 | @contextlib.contextmanager |
|
529 | 534 | def maketempcopies(): |
|
530 | 535 | """return a function to temporary copy file""" |
|
531 | 536 | files = [] |
|
532 | 537 | try: |
|
533 | 538 | |
|
534 | 539 | def copy(src): |
|
535 | 540 | fd, dst = pycompat.mkstemp() |
|
536 | 541 | os.close(fd) |
|
537 | 542 | files.append(dst) |
|
538 | 543 | util.copyfiles(src, dst, hardlink=True) |
|
539 | 544 | return dst |
|
540 | 545 | |
|
541 | 546 | yield copy |
|
542 | 547 | finally: |
|
543 | 548 | for tmp in files: |
|
544 | 549 | util.tryunlink(tmp) |
|
545 | 550 | |
|
546 | 551 | |
|
547 | 552 | def _makemap(repo): |
|
548 | 553 | """make a (src -> vfs) map for the repo""" |
|
549 | 554 | vfsmap = { |
|
550 | 555 | _srcstore: repo.svfs, |
|
551 | 556 | _srccache: repo.cachevfs, |
|
552 | 557 | } |
|
553 | 558 | # we keep repo.vfs out of the on purpose, ther are too many danger there |
|
554 | 559 | # (eg: .hg/hgrc) |
|
555 | 560 | assert repo.vfs not in vfsmap.values() |
|
556 | 561 | |
|
557 | 562 | return vfsmap |
|
558 | 563 | |
|
559 | 564 | |
|
560 | 565 | def _emit2(repo, entries, totalfilesize): |
|
561 | 566 | """actually emit the stream bundle""" |
|
562 | 567 | vfsmap = _makemap(repo) |
|
563 | 568 | progress = repo.ui.makeprogress( |
|
564 | 569 | _(b'bundle'), total=totalfilesize, unit=_(b'bytes') |
|
565 | 570 | ) |
|
566 | 571 | progress.update(0) |
|
567 | 572 | with maketempcopies() as copy, progress: |
|
568 | 573 | # copy is delayed until we are in the try |
|
569 | 574 | entries = [_filterfull(e, copy, vfsmap) for e in entries] |
|
570 | 575 | yield None # this release the lock on the repository |
|
571 | 576 | seen = 0 |
|
572 | 577 | |
|
573 | 578 | for src, name, ftype, data in entries: |
|
574 | 579 | vfs = vfsmap[src] |
|
575 | 580 | yield src |
|
576 | 581 | yield util.uvarintencode(len(name)) |
|
577 | 582 | if ftype == _fileappend: |
|
578 | 583 | fp = vfs(name) |
|
579 | 584 | size = data |
|
580 | 585 | elif ftype == _filefull: |
|
581 | 586 | fp = open(data, b'rb') |
|
582 | 587 | size = util.fstat(fp).st_size |
|
583 | 588 | try: |
|
584 | 589 | yield util.uvarintencode(size) |
|
585 | 590 | yield name |
|
586 | 591 | if size <= 65536: |
|
587 | 592 | chunks = (fp.read(size),) |
|
588 | 593 | else: |
|
589 | 594 | chunks = util.filechunkiter(fp, limit=size) |
|
590 | 595 | for chunk in chunks: |
|
591 | 596 | seen += len(chunk) |
|
592 | 597 | progress.update(seen) |
|
593 | 598 | yield chunk |
|
594 | 599 | finally: |
|
595 | 600 | fp.close() |
|
596 | 601 | |
|
597 | 602 | |
|
598 | 603 | def _test_sync_point_walk_1(repo): |
|
599 | 604 | """a function for synchronisation during tests""" |
|
600 | 605 | |
|
601 | 606 | |
|
602 | 607 | def _test_sync_point_walk_2(repo): |
|
603 | 608 | """a function for synchronisation during tests""" |
|
604 | 609 | |
|
605 | 610 | |
|
606 | 611 | def _v2_walk(repo, includes, excludes, includeobsmarkers): |
|
607 | 612 | """emit a seris of files information useful to clone a repo |
|
608 | 613 | |
|
609 | 614 | return (entries, totalfilesize) |
|
610 | 615 | |
|
611 | 616 | entries is a list of tuple (vfs-key, file-path, file-type, size) |
|
612 | 617 | |
|
613 | 618 | - `vfs-key`: is a key to the right vfs to write the file (see _makemap) |
|
614 | 619 | - `name`: file path of the file to copy (to be feed to the vfss) |
|
615 | 620 | - `file-type`: do this file need to be copied with the source lock ? |
|
616 | 621 | - `size`: the size of the file (or None) |
|
617 | 622 | """ |
|
618 | 623 | assert repo._currentlock(repo._lockref) is not None |
|
619 | 624 | entries = [] |
|
620 | 625 | totalfilesize = 0 |
|
621 | 626 | |
|
622 | 627 | matcher = None |
|
623 | 628 | if includes or excludes: |
|
624 | 629 | matcher = narrowspec.match(repo.root, includes, excludes) |
|
625 | 630 | |
|
626 | 631 | for rl_type, name, ename, size in _walkstreamfiles(repo, matcher): |
|
627 | 632 | if size: |
|
628 | 633 | ft = _fileappend |
|
629 | 634 | if rl_type & store.FILEFLAGS_VOLATILE: |
|
630 | 635 | ft = _filefull |
|
631 | 636 | entries.append((_srcstore, name, ft, size)) |
|
632 | 637 | totalfilesize += size |
|
633 | 638 | for name in _walkstreamfullstorefiles(repo): |
|
634 | 639 | if repo.svfs.exists(name): |
|
635 | 640 | totalfilesize += repo.svfs.lstat(name).st_size |
|
636 | 641 | entries.append((_srcstore, name, _filefull, None)) |
|
637 | 642 | if includeobsmarkers and repo.svfs.exists(b'obsstore'): |
|
638 | 643 | totalfilesize += repo.svfs.lstat(b'obsstore').st_size |
|
639 | 644 | entries.append((_srcstore, b'obsstore', _filefull, None)) |
|
640 | 645 | for name in cacheutil.cachetocopy(repo): |
|
641 | 646 | if repo.cachevfs.exists(name): |
|
642 | 647 | totalfilesize += repo.cachevfs.lstat(name).st_size |
|
643 | 648 | entries.append((_srccache, name, _filefull, None)) |
|
644 | 649 | return entries, totalfilesize |
|
645 | 650 | |
|
646 | 651 | |
|
647 | 652 | def generatev2(repo, includes, excludes, includeobsmarkers): |
|
648 | 653 | """Emit content for version 2 of a streaming clone. |
|
649 | 654 | |
|
650 | 655 | the data stream consists the following entries: |
|
651 | 656 | 1) A char representing the file destination (eg: store or cache) |
|
652 | 657 | 2) A varint containing the length of the filename |
|
653 | 658 | 3) A varint containing the length of file data |
|
654 | 659 | 4) N bytes containing the filename (the internal, store-agnostic form) |
|
655 | 660 | 5) N bytes containing the file data |
|
656 | 661 | |
|
657 | 662 | Returns a 3-tuple of (file count, file size, data iterator). |
|
658 | 663 | """ |
|
659 | 664 | |
|
660 | 665 | with repo.lock(): |
|
661 | 666 | |
|
662 | 667 | repo.ui.debug(b'scanning\n') |
|
663 | 668 | |
|
664 | 669 | entries, totalfilesize = _v2_walk( |
|
665 | 670 | repo, |
|
666 | 671 | includes=includes, |
|
667 | 672 | excludes=excludes, |
|
668 | 673 | includeobsmarkers=includeobsmarkers, |
|
669 | 674 | ) |
|
670 | 675 | |
|
671 | 676 | chunks = _emit2(repo, entries, totalfilesize) |
|
672 | 677 | first = next(chunks) |
|
673 | 678 | assert first is None |
|
674 | 679 | _test_sync_point_walk_1(repo) |
|
675 | 680 | _test_sync_point_walk_2(repo) |
|
676 | 681 | |
|
677 | 682 | return len(entries), totalfilesize, chunks |
|
678 | 683 | |
|
679 | 684 | |
|
680 | 685 | @contextlib.contextmanager |
|
681 | 686 | def nested(*ctxs): |
|
682 | 687 | this = ctxs[0] |
|
683 | 688 | rest = ctxs[1:] |
|
684 | 689 | with this: |
|
685 | 690 | if rest: |
|
686 | 691 | with nested(*rest): |
|
687 | 692 | yield |
|
688 | 693 | else: |
|
689 | 694 | yield |
|
690 | 695 | |
|
691 | 696 | |
|
692 | 697 | def consumev2(repo, fp, filecount, filesize): |
|
693 | 698 | """Apply the contents from a version 2 streaming clone. |
|
694 | 699 | |
|
695 | 700 | Data is read from an object that only needs to provide a ``read(size)`` |
|
696 | 701 | method. |
|
697 | 702 | """ |
|
698 | 703 | with repo.lock(): |
|
699 | 704 | repo.ui.status( |
|
700 | 705 | _(b'%d files to transfer, %s of data\n') |
|
701 | 706 | % (filecount, util.bytecount(filesize)) |
|
702 | 707 | ) |
|
703 | 708 | |
|
704 | 709 | start = util.timer() |
|
705 | 710 | progress = repo.ui.makeprogress( |
|
706 | 711 | _(b'clone'), total=filesize, unit=_(b'bytes') |
|
707 | 712 | ) |
|
708 | 713 | progress.update(0) |
|
709 | 714 | |
|
710 | 715 | vfsmap = _makemap(repo) |
|
711 | 716 | |
|
712 | 717 | with repo.transaction(b'clone'): |
|
713 | 718 | ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values()) |
|
714 | 719 | with nested(*ctxs): |
|
715 | 720 | for i in range(filecount): |
|
716 | 721 | src = util.readexactly(fp, 1) |
|
717 | 722 | vfs = vfsmap[src] |
|
718 | 723 | namelen = util.uvarintdecodestream(fp) |
|
719 | 724 | datalen = util.uvarintdecodestream(fp) |
|
720 | 725 | |
|
721 | 726 | name = util.readexactly(fp, namelen) |
|
722 | 727 | |
|
723 | 728 | if repo.ui.debugflag: |
|
724 | 729 | repo.ui.debug( |
|
725 | 730 | b'adding [%s] %s (%s)\n' |
|
726 | 731 | % (src, name, util.bytecount(datalen)) |
|
727 | 732 | ) |
|
728 | 733 | |
|
729 | 734 | with vfs(name, b'w') as ofp: |
|
730 | 735 | for chunk in util.filechunkiter(fp, limit=datalen): |
|
731 | 736 | progress.increment(step=len(chunk)) |
|
732 | 737 | ofp.write(chunk) |
|
733 | 738 | |
|
734 | 739 | # force @filecache properties to be reloaded from |
|
735 | 740 | # streamclone-ed file at next access |
|
736 | 741 | repo.invalidate(clearfilecache=True) |
|
737 | 742 | |
|
738 | 743 | elapsed = util.timer() - start |
|
739 | 744 | if elapsed <= 0: |
|
740 | 745 | elapsed = 0.001 |
|
741 | 746 | repo.ui.status( |
|
742 | 747 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
743 | 748 | % ( |
|
744 | 749 | util.bytecount(progress.pos), |
|
745 | 750 | elapsed, |
|
746 | 751 | util.bytecount(progress.pos / elapsed), |
|
747 | 752 | ) |
|
748 | 753 | ) |
|
749 | 754 | progress.complete() |
|
750 | 755 | |
|
751 | 756 | |
|
752 | 757 | def applybundlev2(repo, fp, filecount, filesize, requirements): |
|
753 | 758 | from . import localrepo |
|
754 | 759 | |
|
755 | 760 | missingreqs = [r for r in requirements if r not in repo.supported] |
|
756 | 761 | if missingreqs: |
|
757 | 762 | raise error.Abort( |
|
758 | 763 | _(b'unable to apply stream clone: unsupported format: %s') |
|
759 | 764 | % b', '.join(sorted(missingreqs)) |
|
760 | 765 | ) |
|
761 | 766 | |
|
762 | 767 | consumev2(repo, fp, filecount, filesize) |
|
763 | 768 | |
|
764 | 769 | # new requirements = old non-format requirements + |
|
765 | 770 | # new format-related remote requirements |
|
766 | 771 | # requirements from the streamed-in repository |
|
767 | 772 | repo.requirements = set(requirements) | ( |
|
768 | 773 | repo.requirements - repo.supportedformats |
|
769 | 774 | ) |
|
770 | 775 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
771 | 776 | repo.ui, repo.requirements, repo.features |
|
772 | 777 | ) |
|
773 | 778 | scmutil.writereporequirements(repo) |
|
779 | ||
|
780 | ||
|
781 | def _copy_files(src_vfs_map, dst_vfs_map, entries, progress): | |
|
782 | hardlink = [True] | |
|
783 | ||
|
784 | def copy_used(): | |
|
785 | hardlink[0] = False | |
|
786 | progress.topic = _(b'copying') | |
|
787 | ||
|
788 | for k, path, size in entries: | |
|
789 | src_vfs = src_vfs_map[k] | |
|
790 | dst_vfs = dst_vfs_map[k] | |
|
791 | src_path = src_vfs.join(path) | |
|
792 | dst_path = dst_vfs.join(path) | |
|
793 | dirname = dst_vfs.dirname(path) | |
|
794 | if not dst_vfs.exists(dirname): | |
|
795 | dst_vfs.makedirs(dirname) | |
|
796 | dst_vfs.register_file(path) | |
|
797 | # XXX we could use the #nb_bytes argument. | |
|
798 | util.copyfile( | |
|
799 | src_path, | |
|
800 | dst_path, | |
|
801 | hardlink=hardlink[0], | |
|
802 | no_hardlink_cb=copy_used, | |
|
803 | check_fs_hardlink=False, | |
|
804 | ) | |
|
805 | progress.increment() | |
|
806 | return hardlink[0] | |
|
807 | ||
|
808 | ||
|
809 | def local_copy(src_repo, dest_repo): | |
|
810 | """copy all content from one local repository to another | |
|
811 | ||
|
812 | This is useful for local clone""" | |
|
813 | src_store_requirements = { | |
|
814 | r | |
|
815 | for r in src_repo.requirements | |
|
816 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS | |
|
817 | } | |
|
818 | dest_store_requirements = { | |
|
819 | r | |
|
820 | for r in dest_repo.requirements | |
|
821 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS | |
|
822 | } | |
|
823 | assert src_store_requirements == dest_store_requirements | |
|
824 | ||
|
825 | with dest_repo.lock(): | |
|
826 | with src_repo.lock(): | |
|
827 | entries, totalfilesize = _v2_walk( | |
|
828 | src_repo, | |
|
829 | includes=None, | |
|
830 | excludes=None, | |
|
831 | includeobsmarkers=True, | |
|
832 | ) | |
|
833 | src_vfs_map = _makemap(src_repo) | |
|
834 | dest_vfs_map = _makemap(dest_repo) | |
|
835 | progress = src_repo.ui.makeprogress( | |
|
836 | topic=_(b'linking'), | |
|
837 | total=len(entries), | |
|
838 | unit=_(b'files'), | |
|
839 | ) | |
|
840 | # copy files | |
|
841 | # | |
|
842 | # We could copy the full file while the source repository is locked | |
|
843 | # and the other one without the lock. However, in the linking case, | |
|
844 | # this would also requires checks that nobody is appending any data | |
|
845 | # to the files while we do the clone, so this is not done yet. We | |
|
846 | # could do this blindly when copying files. | |
|
847 | files = ((k, path, size) for k, path, ftype, size in entries) | |
|
848 | hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress) | |
|
849 | ||
|
850 | # copy bookmarks over | |
|
851 | src_book_vfs = bookmarks.bookmarksvfs(src_repo) | |
|
852 | srcbookmarks = src_book_vfs.join(b'bookmarks') | |
|
853 | dst_book_vfs = bookmarks.bookmarksvfs(dest_repo) | |
|
854 | dstbookmarks = dst_book_vfs.join(b'bookmarks') | |
|
855 | if os.path.exists(srcbookmarks): | |
|
856 | util.copyfile(srcbookmarks, dstbookmarks) | |
|
857 | progress.complete() | |
|
858 | if hardlink: | |
|
859 | msg = b'linked %d files\n' | |
|
860 | else: | |
|
861 | msg = b'copied %d files\n' | |
|
862 | src_repo.ui.debug(msg % len(entries)) | |
|
863 | ||
|
864 | with dest_repo.transaction(b"localclone") as tr: | |
|
865 | dest_repo.store.write(tr) | |
|
866 | ||
|
867 | # clean up transaction file as they do not make sense | |
|
868 | undo_files = [(dest_repo.svfs, b'undo.backupfiles')] | |
|
869 | undo_files.extend(dest_repo.undofiles()) | |
|
870 | for undovfs, undofile in undo_files: | |
|
871 | try: | |
|
872 | undovfs.unlink(undofile) | |
|
873 | except OSError as e: | |
|
874 | if e.errno != errno.ENOENT: | |
|
875 | msg = _(b'error removing %s: %s\n') | |
|
876 | path = undovfs.join(undofile) | |
|
877 | e_msg = stringutil.forcebytestr(e) | |
|
878 | msg %= (path, e_msg) | |
|
879 | dest_repo.ui.warn(msg) |
@@ -1,1324 +1,1308 b'' | |||
|
1 | 1 | #testcases sshv1 sshv2 |
|
2 | 2 | |
|
3 | 3 | #if sshv2 |
|
4 | 4 | $ cat >> $HGRCPATH << EOF |
|
5 | 5 | > [experimental] |
|
6 | 6 | > sshpeer.advertise-v2 = true |
|
7 | 7 | > sshserver.support-v2 = true |
|
8 | 8 | > EOF |
|
9 | 9 | #endif |
|
10 | 10 | |
|
11 | 11 | Prepare repo a: |
|
12 | 12 | |
|
13 | 13 | $ hg init a |
|
14 | 14 | $ cd a |
|
15 | 15 | $ echo a > a |
|
16 | 16 | $ hg add a |
|
17 | 17 | $ hg commit -m test |
|
18 | 18 | $ echo first line > b |
|
19 | 19 | $ hg add b |
|
20 | 20 | |
|
21 | 21 | Create a non-inlined filelog: |
|
22 | 22 | |
|
23 | 23 | $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))' |
|
24 | 24 | $ for j in 0 1 2 3 4 5 6 7 8 9; do |
|
25 | 25 | > cat data1 >> b |
|
26 | 26 | > hg commit -m test |
|
27 | 27 | > done |
|
28 | 28 | |
|
29 | 29 | List files in store/data (should show a 'b.d'): |
|
30 | 30 | |
|
31 | 31 | #if reporevlogstore |
|
32 | 32 | $ for i in .hg/store/data/*; do |
|
33 | 33 | > echo $i |
|
34 | 34 | > done |
|
35 | 35 | .hg/store/data/a.i |
|
36 | 36 | .hg/store/data/b.d |
|
37 | 37 | .hg/store/data/b.i |
|
38 | 38 | #endif |
|
39 | 39 | |
|
40 | 40 | Trigger branchcache creation: |
|
41 | 41 | |
|
42 | 42 | $ hg branches |
|
43 | 43 | default 10:a7949464abda |
|
44 | 44 | $ ls .hg/cache |
|
45 | 45 | branch2-served |
|
46 | 46 | rbc-names-v1 |
|
47 | 47 | rbc-revs-v1 |
|
48 | 48 | |
|
49 | 49 | Default operation: |
|
50 | 50 | |
|
51 | 51 | $ hg clone . ../b |
|
52 | 52 | updating to branch default |
|
53 | 53 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
54 | 54 | $ cd ../b |
|
55 | 55 | |
|
56 | 56 | Ensure branchcache got copied over: |
|
57 | 57 | |
|
58 | 58 | $ ls .hg/cache |
|
59 | 59 | branch2-base |
|
60 | 60 | branch2-immutable |
|
61 | 61 | branch2-served |
|
62 | 62 | branch2-served.hidden |
|
63 | 63 | branch2-visible |
|
64 | 64 | branch2-visible-hidden |
|
65 | 65 | rbc-names-v1 |
|
66 | 66 | rbc-revs-v1 |
|
67 | 67 | tags2 |
|
68 | 68 | tags2-served |
|
69 | 69 | |
|
70 | 70 | $ cat a |
|
71 | 71 | a |
|
72 | 72 | $ hg verify |
|
73 | 73 | checking changesets |
|
74 | 74 | checking manifests |
|
75 | 75 | crosschecking files in changesets and manifests |
|
76 | 76 | checking files |
|
77 | 77 | checked 11 changesets with 11 changes to 2 files |
|
78 | 78 | |
|
79 | 79 | Invalid dest '' must abort: |
|
80 | 80 | |
|
81 | 81 | $ hg clone . '' |
|
82 | 82 | abort: empty destination path is not valid |
|
83 | 83 | [10] |
|
84 | 84 | |
|
85 | 85 | No update, with debug option: |
|
86 | 86 | |
|
87 | 87 | #if hardlink |
|
88 | 88 | $ hg --debug clone -U . ../c --config progress.debug=true |
|
89 | linking: 1 files | |
|
90 | linking: 2 files | |
|
91 | linking: 3 files | |
|
92 | linking: 4 files | |
|
93 | linking: 5 files | |
|
94 | linking: 6 files | |
|
95 | linking: 7 files | |
|
96 | linking: 8 files | |
|
97 | linked 8 files (reporevlogstore !) | |
|
98 | linking: 9 files (reposimplestore !) | |
|
99 |
linking: 1 |
|
|
100 |
linking: 11 files ( |
|
|
101 |
linking: 1 |
|
|
102 |
linking: 1 |
|
|
103 | linking: 14 files (reposimplestore !) | |
|
104 | linking: 15 files (reposimplestore !) | |
|
105 | linking: 16 files (reposimplestore !) | |
|
106 | linking: 17 files (reposimplestore !) | |
|
107 | linking: 18 files (reposimplestore !) | |
|
108 | linked 18 files (reposimplestore !) | |
|
89 | linking: 1/15 files (6.67%) | |
|
90 | linking: 2/15 files (13.33%) | |
|
91 | linking: 3/15 files (20.00%) | |
|
92 | linking: 4/15 files (26.67%) | |
|
93 | linking: 5/15 files (33.33%) | |
|
94 | linking: 6/15 files (40.00%) | |
|
95 | linking: 7/15 files (46.67%) | |
|
96 | linking: 8/15 files (53.33%) | |
|
97 | linking: 9/15 files (60.00%) | |
|
98 | linking: 10/15 files (66.67%) | |
|
99 | linking: 11/15 files (73.33%) | |
|
100 | linking: 12/15 files (80.00%) | |
|
101 | linking: 13/15 files (86.67%) | |
|
102 | linking: 14/15 files (93.33%) | |
|
103 | linking: 15/15 files (100.00%) | |
|
104 | linked 15 files | |
|
109 | 105 | updating the branch cache |
|
110 | 106 | #else |
|
111 | 107 | $ hg --debug clone -U . ../c --config progress.debug=true |
|
112 | 108 | linking: 1 files |
|
113 | 109 | copying: 2 files |
|
114 | 110 | copying: 3 files |
|
115 | 111 | copying: 4 files |
|
116 | 112 | copying: 5 files |
|
117 | 113 | copying: 6 files |
|
118 | 114 | copying: 7 files |
|
119 | 115 | copying: 8 files |
|
120 | copied 8 files (reporevlogstore !) | |
|
121 | copying: 9 files (reposimplestore !) | |
|
122 | copying: 10 files (reposimplestore !) | |
|
123 | copying: 11 files (reposimplestore !) | |
|
124 | copying: 12 files (reposimplestore !) | |
|
125 | copying: 13 files (reposimplestore !) | |
|
126 | copying: 14 files (reposimplestore !) | |
|
127 | copying: 15 files (reposimplestore !) | |
|
128 | copying: 16 files (reposimplestore !) | |
|
129 | copying: 17 files (reposimplestore !) | |
|
130 | copying: 18 files (reposimplestore !) | |
|
131 | copied 18 files (reposimplestore !) | |
|
132 | 116 | #endif |
|
133 | 117 | $ cd ../c |
|
134 | 118 | |
|
135 | 119 | Ensure branchcache got copied over: |
|
136 | 120 | |
|
137 | 121 | $ ls .hg/cache |
|
138 | 122 | branch2-base |
|
139 | 123 | branch2-immutable |
|
140 | 124 | branch2-served |
|
141 | 125 | branch2-served.hidden |
|
142 | 126 | branch2-visible |
|
143 | 127 | branch2-visible-hidden |
|
144 | 128 | rbc-names-v1 |
|
145 | 129 | rbc-revs-v1 |
|
146 | 130 | tags2 |
|
147 | 131 | tags2-served |
|
148 | 132 | |
|
149 | 133 | $ cat a 2>/dev/null || echo "a not present" |
|
150 | 134 | a not present |
|
151 | 135 | $ hg verify |
|
152 | 136 | checking changesets |
|
153 | 137 | checking manifests |
|
154 | 138 | crosschecking files in changesets and manifests |
|
155 | 139 | checking files |
|
156 | 140 | checked 11 changesets with 11 changes to 2 files |
|
157 | 141 | |
|
158 | 142 | Default destination: |
|
159 | 143 | |
|
160 | 144 | $ mkdir ../d |
|
161 | 145 | $ cd ../d |
|
162 | 146 | $ hg clone ../a |
|
163 | 147 | destination directory: a |
|
164 | 148 | updating to branch default |
|
165 | 149 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
166 | 150 | $ cd a |
|
167 | 151 | $ hg cat a |
|
168 | 152 | a |
|
169 | 153 | $ cd ../.. |
|
170 | 154 | |
|
171 | 155 | Check that we drop the 'file:' from the path before writing the .hgrc: |
|
172 | 156 | |
|
173 | 157 | $ hg clone file:a e |
|
174 | 158 | updating to branch default |
|
175 | 159 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
176 | 160 | $ grep 'file:' e/.hg/hgrc |
|
177 | 161 | [1] |
|
178 | 162 | |
|
179 | 163 | Check that path aliases are expanded: |
|
180 | 164 | |
|
181 | 165 | $ hg clone -q -U --config 'paths.foobar=a#0' foobar f |
|
182 | 166 | $ hg -R f showconfig paths.default |
|
183 | 167 | $TESTTMP/a#0 |
|
184 | 168 | |
|
185 | 169 | Use --pull: |
|
186 | 170 | |
|
187 | 171 | $ hg clone --pull a g |
|
188 | 172 | requesting all changes |
|
189 | 173 | adding changesets |
|
190 | 174 | adding manifests |
|
191 | 175 | adding file changes |
|
192 | 176 | added 11 changesets with 11 changes to 2 files |
|
193 | 177 | new changesets acb14030fe0a:a7949464abda |
|
194 | 178 | updating to branch default |
|
195 | 179 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
196 | 180 | $ hg -R g verify |
|
197 | 181 | checking changesets |
|
198 | 182 | checking manifests |
|
199 | 183 | crosschecking files in changesets and manifests |
|
200 | 184 | checking files |
|
201 | 185 | checked 11 changesets with 11 changes to 2 files |
|
202 | 186 | |
|
203 | 187 | Invalid dest '' with --pull must abort (issue2528): |
|
204 | 188 | |
|
205 | 189 | $ hg clone --pull a '' |
|
206 | 190 | abort: empty destination path is not valid |
|
207 | 191 | [10] |
|
208 | 192 | |
|
209 | 193 | Clone to '.': |
|
210 | 194 | |
|
211 | 195 | $ mkdir h |
|
212 | 196 | $ cd h |
|
213 | 197 | $ hg clone ../a . |
|
214 | 198 | updating to branch default |
|
215 | 199 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
216 | 200 | $ cd .. |
|
217 | 201 | |
|
218 | 202 | |
|
219 | 203 | *** Tests for option -u *** |
|
220 | 204 | |
|
221 | 205 | Adding some more history to repo a: |
|
222 | 206 | |
|
223 | 207 | $ cd a |
|
224 | 208 | $ hg tag ref1 |
|
225 | 209 | $ echo the quick brown fox >a |
|
226 | 210 | $ hg ci -m "hacked default" |
|
227 | 211 | $ hg up ref1 |
|
228 | 212 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
229 | 213 | $ hg branch stable |
|
230 | 214 | marked working directory as branch stable |
|
231 | 215 | (branches are permanent and global, did you want a bookmark?) |
|
232 | 216 | $ echo some text >a |
|
233 | 217 | $ hg ci -m "starting branch stable" |
|
234 | 218 | $ hg tag ref2 |
|
235 | 219 | $ echo some more text >a |
|
236 | 220 | $ hg ci -m "another change for branch stable" |
|
237 | 221 | $ hg up ref2 |
|
238 | 222 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
239 | 223 | $ hg parents |
|
240 | 224 | changeset: 13:e8ece76546a6 |
|
241 | 225 | branch: stable |
|
242 | 226 | tag: ref2 |
|
243 | 227 | parent: 10:a7949464abda |
|
244 | 228 | user: test |
|
245 | 229 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
246 | 230 | summary: starting branch stable |
|
247 | 231 | |
|
248 | 232 | |
|
249 | 233 | Repo a has two heads: |
|
250 | 234 | |
|
251 | 235 | $ hg heads |
|
252 | 236 | changeset: 15:0aae7cf88f0d |
|
253 | 237 | branch: stable |
|
254 | 238 | tag: tip |
|
255 | 239 | user: test |
|
256 | 240 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
257 | 241 | summary: another change for branch stable |
|
258 | 242 | |
|
259 | 243 | changeset: 12:f21241060d6a |
|
260 | 244 | user: test |
|
261 | 245 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
262 | 246 | summary: hacked default |
|
263 | 247 | |
|
264 | 248 | |
|
265 | 249 | $ cd .. |
|
266 | 250 | |
|
267 | 251 | |
|
268 | 252 | Testing --noupdate with --updaterev (must abort): |
|
269 | 253 | |
|
270 | 254 | $ hg clone --noupdate --updaterev 1 a ua |
|
271 | 255 | abort: cannot specify both --noupdate and --updaterev |
|
272 | 256 | [10] |
|
273 | 257 | |
|
274 | 258 | |
|
275 | 259 | Testing clone -u: |
|
276 | 260 | |
|
277 | 261 | $ hg clone -u . a ua |
|
278 | 262 | updating to branch stable |
|
279 | 263 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
280 | 264 | |
|
281 | 265 | Repo ua has both heads: |
|
282 | 266 | |
|
283 | 267 | $ hg -R ua heads |
|
284 | 268 | changeset: 15:0aae7cf88f0d |
|
285 | 269 | branch: stable |
|
286 | 270 | tag: tip |
|
287 | 271 | user: test |
|
288 | 272 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
289 | 273 | summary: another change for branch stable |
|
290 | 274 | |
|
291 | 275 | changeset: 12:f21241060d6a |
|
292 | 276 | user: test |
|
293 | 277 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
294 | 278 | summary: hacked default |
|
295 | 279 | |
|
296 | 280 | |
|
297 | 281 | Same revision checked out in repo a and ua: |
|
298 | 282 | |
|
299 | 283 | $ hg -R a parents --template "{node|short}\n" |
|
300 | 284 | e8ece76546a6 |
|
301 | 285 | $ hg -R ua parents --template "{node|short}\n" |
|
302 | 286 | e8ece76546a6 |
|
303 | 287 | |
|
304 | 288 | $ rm -r ua |
|
305 | 289 | |
|
306 | 290 | |
|
307 | 291 | Testing clone --pull -u: |
|
308 | 292 | |
|
309 | 293 | $ hg clone --pull -u . a ua |
|
310 | 294 | requesting all changes |
|
311 | 295 | adding changesets |
|
312 | 296 | adding manifests |
|
313 | 297 | adding file changes |
|
314 | 298 | added 16 changesets with 16 changes to 3 files (+1 heads) |
|
315 | 299 | new changesets acb14030fe0a:0aae7cf88f0d |
|
316 | 300 | updating to branch stable |
|
317 | 301 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
318 | 302 | |
|
319 | 303 | Repo ua has both heads: |
|
320 | 304 | |
|
321 | 305 | $ hg -R ua heads |
|
322 | 306 | changeset: 15:0aae7cf88f0d |
|
323 | 307 | branch: stable |
|
324 | 308 | tag: tip |
|
325 | 309 | user: test |
|
326 | 310 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
327 | 311 | summary: another change for branch stable |
|
328 | 312 | |
|
329 | 313 | changeset: 12:f21241060d6a |
|
330 | 314 | user: test |
|
331 | 315 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
332 | 316 | summary: hacked default |
|
333 | 317 | |
|
334 | 318 | |
|
335 | 319 | Same revision checked out in repo a and ua: |
|
336 | 320 | |
|
337 | 321 | $ hg -R a parents --template "{node|short}\n" |
|
338 | 322 | e8ece76546a6 |
|
339 | 323 | $ hg -R ua parents --template "{node|short}\n" |
|
340 | 324 | e8ece76546a6 |
|
341 | 325 | |
|
342 | 326 | $ rm -r ua |
|
343 | 327 | |
|
344 | 328 | |
|
345 | 329 | Testing clone -u <branch>: |
|
346 | 330 | |
|
347 | 331 | $ hg clone -u stable a ua |
|
348 | 332 | updating to branch stable |
|
349 | 333 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
350 | 334 | |
|
351 | 335 | Repo ua has both heads: |
|
352 | 336 | |
|
353 | 337 | $ hg -R ua heads |
|
354 | 338 | changeset: 15:0aae7cf88f0d |
|
355 | 339 | branch: stable |
|
356 | 340 | tag: tip |
|
357 | 341 | user: test |
|
358 | 342 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
359 | 343 | summary: another change for branch stable |
|
360 | 344 | |
|
361 | 345 | changeset: 12:f21241060d6a |
|
362 | 346 | user: test |
|
363 | 347 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
364 | 348 | summary: hacked default |
|
365 | 349 | |
|
366 | 350 | |
|
367 | 351 | Branch 'stable' is checked out: |
|
368 | 352 | |
|
369 | 353 | $ hg -R ua parents |
|
370 | 354 | changeset: 15:0aae7cf88f0d |
|
371 | 355 | branch: stable |
|
372 | 356 | tag: tip |
|
373 | 357 | user: test |
|
374 | 358 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
375 | 359 | summary: another change for branch stable |
|
376 | 360 | |
|
377 | 361 | |
|
378 | 362 | $ rm -r ua |
|
379 | 363 | |
|
380 | 364 | |
|
381 | 365 | Testing default checkout: |
|
382 | 366 | |
|
383 | 367 | $ hg clone a ua |
|
384 | 368 | updating to branch default |
|
385 | 369 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
386 | 370 | |
|
387 | 371 | Repo ua has both heads: |
|
388 | 372 | |
|
389 | 373 | $ hg -R ua heads |
|
390 | 374 | changeset: 15:0aae7cf88f0d |
|
391 | 375 | branch: stable |
|
392 | 376 | tag: tip |
|
393 | 377 | user: test |
|
394 | 378 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
395 | 379 | summary: another change for branch stable |
|
396 | 380 | |
|
397 | 381 | changeset: 12:f21241060d6a |
|
398 | 382 | user: test |
|
399 | 383 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
400 | 384 | summary: hacked default |
|
401 | 385 | |
|
402 | 386 | |
|
403 | 387 | Branch 'default' is checked out: |
|
404 | 388 | |
|
405 | 389 | $ hg -R ua parents |
|
406 | 390 | changeset: 12:f21241060d6a |
|
407 | 391 | user: test |
|
408 | 392 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
409 | 393 | summary: hacked default |
|
410 | 394 | |
|
411 | 395 | Test clone with a branch named "@" (issue3677) |
|
412 | 396 | |
|
413 | 397 | $ hg -R ua branch @ |
|
414 | 398 | marked working directory as branch @ |
|
415 | 399 | $ hg -R ua commit -m 'created branch @' |
|
416 | 400 | $ hg clone ua atbranch |
|
417 | 401 | updating to branch default |
|
418 | 402 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
419 | 403 | $ hg -R atbranch heads |
|
420 | 404 | changeset: 16:798b6d97153e |
|
421 | 405 | branch: @ |
|
422 | 406 | tag: tip |
|
423 | 407 | parent: 12:f21241060d6a |
|
424 | 408 | user: test |
|
425 | 409 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
426 | 410 | summary: created branch @ |
|
427 | 411 | |
|
428 | 412 | changeset: 15:0aae7cf88f0d |
|
429 | 413 | branch: stable |
|
430 | 414 | user: test |
|
431 | 415 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
432 | 416 | summary: another change for branch stable |
|
433 | 417 | |
|
434 | 418 | changeset: 12:f21241060d6a |
|
435 | 419 | user: test |
|
436 | 420 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
437 | 421 | summary: hacked default |
|
438 | 422 | |
|
439 | 423 | $ hg -R atbranch parents |
|
440 | 424 | changeset: 12:f21241060d6a |
|
441 | 425 | user: test |
|
442 | 426 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
443 | 427 | summary: hacked default |
|
444 | 428 | |
|
445 | 429 | |
|
446 | 430 | $ rm -r ua atbranch |
|
447 | 431 | |
|
448 | 432 | |
|
449 | 433 | Testing #<branch>: |
|
450 | 434 | |
|
451 | 435 | $ hg clone -u . a#stable ua |
|
452 | 436 | adding changesets |
|
453 | 437 | adding manifests |
|
454 | 438 | adding file changes |
|
455 | 439 | added 14 changesets with 14 changes to 3 files |
|
456 | 440 | new changesets acb14030fe0a:0aae7cf88f0d |
|
457 | 441 | updating to branch stable |
|
458 | 442 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
459 | 443 | |
|
460 | 444 | Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6): |
|
461 | 445 | |
|
462 | 446 | $ hg -R ua heads |
|
463 | 447 | changeset: 13:0aae7cf88f0d |
|
464 | 448 | branch: stable |
|
465 | 449 | tag: tip |
|
466 | 450 | user: test |
|
467 | 451 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
468 | 452 | summary: another change for branch stable |
|
469 | 453 | |
|
470 | 454 | changeset: 10:a7949464abda |
|
471 | 455 | user: test |
|
472 | 456 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
473 | 457 | summary: test |
|
474 | 458 | |
|
475 | 459 | |
|
476 | 460 | Same revision checked out in repo a and ua: |
|
477 | 461 | |
|
478 | 462 | $ hg -R a parents --template "{node|short}\n" |
|
479 | 463 | e8ece76546a6 |
|
480 | 464 | $ hg -R ua parents --template "{node|short}\n" |
|
481 | 465 | e8ece76546a6 |
|
482 | 466 | |
|
483 | 467 | $ rm -r ua |
|
484 | 468 | |
|
485 | 469 | |
|
486 | 470 | Testing -u -r <branch>: |
|
487 | 471 | |
|
488 | 472 | $ hg clone -u . -r stable a ua |
|
489 | 473 | adding changesets |
|
490 | 474 | adding manifests |
|
491 | 475 | adding file changes |
|
492 | 476 | added 14 changesets with 14 changes to 3 files |
|
493 | 477 | new changesets acb14030fe0a:0aae7cf88f0d |
|
494 | 478 | updating to branch stable |
|
495 | 479 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
496 | 480 | |
|
497 | 481 | Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6): |
|
498 | 482 | |
|
499 | 483 | $ hg -R ua heads |
|
500 | 484 | changeset: 13:0aae7cf88f0d |
|
501 | 485 | branch: stable |
|
502 | 486 | tag: tip |
|
503 | 487 | user: test |
|
504 | 488 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
505 | 489 | summary: another change for branch stable |
|
506 | 490 | |
|
507 | 491 | changeset: 10:a7949464abda |
|
508 | 492 | user: test |
|
509 | 493 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
510 | 494 | summary: test |
|
511 | 495 | |
|
512 | 496 | |
|
513 | 497 | Same revision checked out in repo a and ua: |
|
514 | 498 | |
|
515 | 499 | $ hg -R a parents --template "{node|short}\n" |
|
516 | 500 | e8ece76546a6 |
|
517 | 501 | $ hg -R ua parents --template "{node|short}\n" |
|
518 | 502 | e8ece76546a6 |
|
519 | 503 | |
|
520 | 504 | $ rm -r ua |
|
521 | 505 | |
|
522 | 506 | |
|
523 | 507 | Testing -r <branch>: |
|
524 | 508 | |
|
525 | 509 | $ hg clone -r stable a ua |
|
526 | 510 | adding changesets |
|
527 | 511 | adding manifests |
|
528 | 512 | adding file changes |
|
529 | 513 | added 14 changesets with 14 changes to 3 files |
|
530 | 514 | new changesets acb14030fe0a:0aae7cf88f0d |
|
531 | 515 | updating to branch stable |
|
532 | 516 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
533 | 517 | |
|
534 | 518 | Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6): |
|
535 | 519 | |
|
536 | 520 | $ hg -R ua heads |
|
537 | 521 | changeset: 13:0aae7cf88f0d |
|
538 | 522 | branch: stable |
|
539 | 523 | tag: tip |
|
540 | 524 | user: test |
|
541 | 525 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
542 | 526 | summary: another change for branch stable |
|
543 | 527 | |
|
544 | 528 | changeset: 10:a7949464abda |
|
545 | 529 | user: test |
|
546 | 530 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
547 | 531 | summary: test |
|
548 | 532 | |
|
549 | 533 | |
|
550 | 534 | Branch 'stable' is checked out: |
|
551 | 535 | |
|
552 | 536 | $ hg -R ua parents |
|
553 | 537 | changeset: 13:0aae7cf88f0d |
|
554 | 538 | branch: stable |
|
555 | 539 | tag: tip |
|
556 | 540 | user: test |
|
557 | 541 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
558 | 542 | summary: another change for branch stable |
|
559 | 543 | |
|
560 | 544 | |
|
561 | 545 | $ rm -r ua |
|
562 | 546 | |
|
563 | 547 | |
|
564 | 548 | Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not |
|
565 | 549 | iterable in addbranchrevs() |
|
566 | 550 | |
|
567 | 551 | $ cat <<EOF > simpleclone.py |
|
568 | 552 | > from mercurial import hg, ui as uimod |
|
569 | 553 | > myui = uimod.ui.load() |
|
570 | 554 | > repo = hg.repository(myui, b'a') |
|
571 | 555 | > hg.clone(myui, {}, repo, dest=b"ua") |
|
572 | 556 | > EOF |
|
573 | 557 | |
|
574 | 558 | $ "$PYTHON" simpleclone.py |
|
575 | 559 | updating to branch default |
|
576 | 560 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
577 | 561 | |
|
578 | 562 | $ rm -r ua |
|
579 | 563 | |
|
580 | 564 | $ cat <<EOF > branchclone.py |
|
581 | 565 | > from mercurial import extensions, hg, ui as uimod |
|
582 | 566 | > myui = uimod.ui.load() |
|
583 | 567 | > extensions.loadall(myui) |
|
584 | 568 | > extensions.populateui(myui) |
|
585 | 569 | > repo = hg.repository(myui, b'a') |
|
586 | 570 | > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"]) |
|
587 | 571 | > EOF |
|
588 | 572 | |
|
589 | 573 | $ "$PYTHON" branchclone.py |
|
590 | 574 | adding changesets |
|
591 | 575 | adding manifests |
|
592 | 576 | adding file changes |
|
593 | 577 | added 14 changesets with 14 changes to 3 files |
|
594 | 578 | new changesets acb14030fe0a:0aae7cf88f0d |
|
595 | 579 | updating to branch stable |
|
596 | 580 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
597 | 581 | $ rm -r ua |
|
598 | 582 | |
|
599 | 583 | |
|
600 | 584 | Test clone with special '@' bookmark: |
|
601 | 585 | $ cd a |
|
602 | 586 | $ hg bookmark -r a7949464abda @ # branch point of stable from default |
|
603 | 587 | $ hg clone . ../i |
|
604 | 588 | updating to bookmark @ |
|
605 | 589 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
606 | 590 | $ hg id -i ../i |
|
607 | 591 | a7949464abda |
|
608 | 592 | $ rm -r ../i |
|
609 | 593 | |
|
610 | 594 | $ hg bookmark -f -r stable @ |
|
611 | 595 | $ hg bookmarks |
|
612 | 596 | @ 15:0aae7cf88f0d |
|
613 | 597 | $ hg clone . ../i |
|
614 | 598 | updating to bookmark @ on branch stable |
|
615 | 599 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
616 | 600 | $ hg id -i ../i |
|
617 | 601 | 0aae7cf88f0d |
|
618 | 602 | $ cd "$TESTTMP" |
|
619 | 603 | |
|
620 | 604 | |
|
621 | 605 | Testing failures: |
|
622 | 606 | |
|
623 | 607 | $ mkdir fail |
|
624 | 608 | $ cd fail |
|
625 | 609 | |
|
626 | 610 | No local source |
|
627 | 611 | |
|
628 | 612 | $ hg clone a b |
|
629 | 613 | abort: repository a not found |
|
630 | 614 | [255] |
|
631 | 615 | |
|
632 | 616 | Invalid URL |
|
633 | 617 | |
|
634 | 618 | $ hg clone http://invalid:url/a b |
|
635 | 619 | abort: error: nonnumeric port: 'url' |
|
636 | 620 | [100] |
|
637 | 621 | |
|
638 | 622 | No remote source |
|
639 | 623 | |
|
640 | 624 | #if windows |
|
641 | 625 | $ hg clone http://$LOCALIP:3121/a b |
|
642 | 626 | abort: error: * (glob) |
|
643 | 627 | [100] |
|
644 | 628 | #else |
|
645 | 629 | $ hg clone http://$LOCALIP:3121/a b |
|
646 | 630 | abort: error: *refused* (glob) |
|
647 | 631 | [100] |
|
648 | 632 | #endif |
|
649 | 633 | $ rm -rf b # work around bug with http clone |
|
650 | 634 | |
|
651 | 635 | |
|
652 | 636 | #if unix-permissions no-root |
|
653 | 637 | |
|
654 | 638 | Inaccessible source |
|
655 | 639 | |
|
656 | 640 | $ mkdir a |
|
657 | 641 | $ chmod 000 a |
|
658 | 642 | $ hg clone a b |
|
659 | 643 | abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob) |
|
660 | 644 | [255] |
|
661 | 645 | |
|
662 | 646 | Inaccessible destination |
|
663 | 647 | |
|
664 | 648 | $ hg init b |
|
665 | 649 | $ cd b |
|
666 | 650 | $ hg clone . ../a |
|
667 | 651 | abort: Permission denied: *../a* (glob) |
|
668 | 652 | [255] |
|
669 | 653 | $ cd .. |
|
670 | 654 | $ chmod 700 a |
|
671 | 655 | $ rm -r a b |
|
672 | 656 | |
|
673 | 657 | #endif |
|
674 | 658 | |
|
675 | 659 | |
|
676 | 660 | #if fifo |
|
677 | 661 | |
|
678 | 662 | Source of wrong type |
|
679 | 663 | |
|
680 | 664 | $ mkfifo a |
|
681 | 665 | $ hg clone a b |
|
682 | 666 | abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob) |
|
683 | 667 | [255] |
|
684 | 668 | $ rm a |
|
685 | 669 | |
|
686 | 670 | #endif |
|
687 | 671 | |
|
688 | 672 | Default destination, same directory |
|
689 | 673 | |
|
690 | 674 | $ hg init q |
|
691 | 675 | $ hg clone q |
|
692 | 676 | destination directory: q |
|
693 | 677 | abort: destination 'q' is not empty |
|
694 | 678 | [10] |
|
695 | 679 | |
|
696 | 680 | destination directory not empty |
|
697 | 681 | |
|
698 | 682 | $ mkdir a |
|
699 | 683 | $ echo stuff > a/a |
|
700 | 684 | $ hg clone q a |
|
701 | 685 | abort: destination 'a' is not empty |
|
702 | 686 | [10] |
|
703 | 687 | |
|
704 | 688 | |
|
705 | 689 | #if unix-permissions no-root |
|
706 | 690 | |
|
707 | 691 | leave existing directory in place after clone failure |
|
708 | 692 | |
|
709 | 693 | $ hg init c |
|
710 | 694 | $ cd c |
|
711 | 695 | $ echo c > c |
|
712 | 696 | $ hg commit -A -m test |
|
713 | 697 | adding c |
|
714 | 698 | $ chmod -rx .hg/store/data |
|
715 | 699 | $ cd .. |
|
716 | 700 | $ mkdir d |
|
717 | 701 | $ hg clone c d 2> err |
|
718 | 702 | [255] |
|
719 | 703 | $ test -d d |
|
720 | 704 | $ test -d d/.hg |
|
721 | 705 | [1] |
|
722 | 706 | |
|
723 | 707 | re-enable perm to allow deletion |
|
724 | 708 | |
|
725 | 709 | $ chmod +rx c/.hg/store/data |
|
726 | 710 | |
|
727 | 711 | #endif |
|
728 | 712 | |
|
729 | 713 | $ cd .. |
|
730 | 714 | |
|
731 | 715 | Test clone from the repository in (emulated) revlog format 0 (issue4203): |
|
732 | 716 | |
|
733 | 717 | $ mkdir issue4203 |
|
734 | 718 | $ mkdir -p src/.hg |
|
735 | 719 | $ echo foo > src/foo |
|
736 | 720 | $ hg -R src add src/foo |
|
737 | 721 | $ hg -R src commit -m '#0' |
|
738 | 722 | $ hg -R src log -q |
|
739 | 723 | 0:e1bab28bca43 |
|
740 | 724 | $ hg -R src debugrevlog -c | egrep 'format|flags' |
|
741 | 725 | format : 0 |
|
742 | 726 | flags : (none) |
|
743 | 727 | $ hg root -R src -T json | sed 's|\\\\|\\|g' |
|
744 | 728 | [ |
|
745 | 729 | { |
|
746 | 730 | "hgpath": "$TESTTMP/src/.hg", |
|
747 | 731 | "reporoot": "$TESTTMP/src", |
|
748 | 732 | "storepath": "$TESTTMP/src/.hg" |
|
749 | 733 | } |
|
750 | 734 | ] |
|
751 | 735 | $ hg clone -U -q src dst |
|
752 | 736 | $ hg -R dst log -q |
|
753 | 737 | 0:e1bab28bca43 |
|
754 | 738 | |
|
755 | 739 | Create repositories to test auto sharing functionality |
|
756 | 740 | |
|
757 | 741 | $ cat >> $HGRCPATH << EOF |
|
758 | 742 | > [extensions] |
|
759 | 743 | > share= |
|
760 | 744 | > EOF |
|
761 | 745 | |
|
762 | 746 | $ hg init empty |
|
763 | 747 | $ hg init source1a |
|
764 | 748 | $ cd source1a |
|
765 | 749 | $ echo initial1 > foo |
|
766 | 750 | $ hg -q commit -A -m initial |
|
767 | 751 | $ echo second > foo |
|
768 | 752 | $ hg commit -m second |
|
769 | 753 | $ cd .. |
|
770 | 754 | |
|
771 | 755 | $ hg init filteredrev0 |
|
772 | 756 | $ cd filteredrev0 |
|
773 | 757 | $ cat >> .hg/hgrc << EOF |
|
774 | 758 | > [experimental] |
|
775 | 759 | > evolution.createmarkers=True |
|
776 | 760 | > EOF |
|
777 | 761 | $ echo initial1 > foo |
|
778 | 762 | $ hg -q commit -A -m initial0 |
|
779 | 763 | $ hg -q up -r null |
|
780 | 764 | $ echo initial2 > foo |
|
781 | 765 | $ hg -q commit -A -m initial1 |
|
782 | 766 | $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8 |
|
783 | 767 | 1 new obsolescence markers |
|
784 | 768 | obsoleted 1 changesets |
|
785 | 769 | $ cd .. |
|
786 | 770 | |
|
787 | 771 | $ hg -q clone --pull source1a source1b |
|
788 | 772 | $ cd source1a |
|
789 | 773 | $ hg bookmark bookA |
|
790 | 774 | $ echo 1a > foo |
|
791 | 775 | $ hg commit -m 1a |
|
792 | 776 | $ cd ../source1b |
|
793 | 777 | $ hg -q up -r 0 |
|
794 | 778 | $ echo head1 > foo |
|
795 | 779 | $ hg commit -m head1 |
|
796 | 780 | created new head |
|
797 | 781 | $ hg bookmark head1 |
|
798 | 782 | $ hg -q up -r 0 |
|
799 | 783 | $ echo head2 > foo |
|
800 | 784 | $ hg commit -m head2 |
|
801 | 785 | created new head |
|
802 | 786 | $ hg bookmark head2 |
|
803 | 787 | $ hg -q up -r 0 |
|
804 | 788 | $ hg branch branch1 |
|
805 | 789 | marked working directory as branch branch1 |
|
806 | 790 | (branches are permanent and global, did you want a bookmark?) |
|
807 | 791 | $ echo branch1 > foo |
|
808 | 792 | $ hg commit -m branch1 |
|
809 | 793 | $ hg -q up -r 0 |
|
810 | 794 | $ hg branch branch2 |
|
811 | 795 | marked working directory as branch branch2 |
|
812 | 796 | $ echo branch2 > foo |
|
813 | 797 | $ hg commit -m branch2 |
|
814 | 798 | $ cd .. |
|
815 | 799 | $ hg init source2 |
|
816 | 800 | $ cd source2 |
|
817 | 801 | $ echo initial2 > foo |
|
818 | 802 | $ hg -q commit -A -m initial2 |
|
819 | 803 | $ echo second > foo |
|
820 | 804 | $ hg commit -m second |
|
821 | 805 | $ cd .. |
|
822 | 806 | |
|
823 | 807 | Clone with auto share from an empty repo should not result in share |
|
824 | 808 | |
|
825 | 809 | $ mkdir share |
|
826 | 810 | $ hg --config share.pool=share clone empty share-empty |
|
827 | 811 | (not using pooled storage: remote appears to be empty) |
|
828 | 812 | updating to branch default |
|
829 | 813 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
830 | 814 | $ ls share |
|
831 | 815 | $ test -d share-empty/.hg/store |
|
832 | 816 | $ test -f share-empty/.hg/sharedpath |
|
833 | 817 | [1] |
|
834 | 818 | |
|
835 | 819 | Clone with auto share from a repo with filtered revision 0 should not result in share |
|
836 | 820 | |
|
837 | 821 | $ hg --config share.pool=share clone filteredrev0 share-filtered |
|
838 | 822 | (not using pooled storage: unable to resolve identity of remote) |
|
839 | 823 | requesting all changes |
|
840 | 824 | adding changesets |
|
841 | 825 | adding manifests |
|
842 | 826 | adding file changes |
|
843 | 827 | added 1 changesets with 1 changes to 1 files |
|
844 | 828 | new changesets e082c1832e09 |
|
845 | 829 | updating to branch default |
|
846 | 830 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
847 | 831 | |
|
848 | 832 | Clone from repo with content should result in shared store being created |
|
849 | 833 | |
|
850 | 834 | $ hg --config share.pool=share clone source1a share-dest1a |
|
851 | 835 | (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
852 | 836 | requesting all changes |
|
853 | 837 | adding changesets |
|
854 | 838 | adding manifests |
|
855 | 839 | adding file changes |
|
856 | 840 | added 3 changesets with 3 changes to 1 files |
|
857 | 841 | new changesets b5f04eac9d8f:e5bfe23c0b47 |
|
858 | 842 | searching for changes |
|
859 | 843 | no changes found |
|
860 | 844 | adding remote bookmark bookA |
|
861 | 845 | updating working directory |
|
862 | 846 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
863 | 847 | |
|
864 | 848 | The shared repo should have been created |
|
865 | 849 | |
|
866 | 850 | $ ls share |
|
867 | 851 | b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1 |
|
868 | 852 | |
|
869 | 853 | The destination should point to it |
|
870 | 854 | |
|
871 | 855 | $ cat share-dest1a/.hg/sharedpath; echo |
|
872 | 856 | $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg |
|
873 | 857 | |
|
874 | 858 | The destination should have bookmarks |
|
875 | 859 | |
|
876 | 860 | $ hg -R share-dest1a bookmarks |
|
877 | 861 | bookA 2:e5bfe23c0b47 |
|
878 | 862 | |
|
879 | 863 | The default path should be the remote, not the share |
|
880 | 864 | |
|
881 | 865 | $ hg -R share-dest1a config paths.default |
|
882 | 866 | $TESTTMP/source1a |
|
883 | 867 | |
|
884 | 868 | Clone with existing share dir should result in pull + share |
|
885 | 869 | |
|
886 | 870 | $ hg --config share.pool=share clone source1b share-dest1b |
|
887 | 871 | (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
888 | 872 | searching for changes |
|
889 | 873 | adding changesets |
|
890 | 874 | adding manifests |
|
891 | 875 | adding file changes |
|
892 | 876 | adding remote bookmark head1 |
|
893 | 877 | adding remote bookmark head2 |
|
894 | 878 | added 4 changesets with 4 changes to 1 files (+4 heads) |
|
895 | 879 | new changesets 4a8dc1ab4c13:6bacf4683960 |
|
896 | 880 | updating working directory |
|
897 | 881 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
898 | 882 | |
|
899 | 883 | $ ls share |
|
900 | 884 | b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1 |
|
901 | 885 | |
|
902 | 886 | $ cat share-dest1b/.hg/sharedpath; echo |
|
903 | 887 | $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg |
|
904 | 888 | |
|
905 | 889 | We only get bookmarks from the remote, not everything in the share |
|
906 | 890 | |
|
907 | 891 | $ hg -R share-dest1b bookmarks |
|
908 | 892 | head1 3:4a8dc1ab4c13 |
|
909 | 893 | head2 4:99f71071f117 |
|
910 | 894 | |
|
911 | 895 | Default path should be source, not share. |
|
912 | 896 | |
|
913 | 897 | $ hg -R share-dest1b config paths.default |
|
914 | 898 | $TESTTMP/source1b |
|
915 | 899 | |
|
916 | 900 | Checked out revision should be head of default branch |
|
917 | 901 | |
|
918 | 902 | $ hg -R share-dest1b log -r . |
|
919 | 903 | changeset: 4:99f71071f117 |
|
920 | 904 | bookmark: head2 |
|
921 | 905 | parent: 0:b5f04eac9d8f |
|
922 | 906 | user: test |
|
923 | 907 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
924 | 908 | summary: head2 |
|
925 | 909 | |
|
926 | 910 | |
|
927 | 911 | Clone from unrelated repo should result in new share |
|
928 | 912 | |
|
929 | 913 | $ hg --config share.pool=share clone source2 share-dest2 |
|
930 | 914 | (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e) |
|
931 | 915 | requesting all changes |
|
932 | 916 | adding changesets |
|
933 | 917 | adding manifests |
|
934 | 918 | adding file changes |
|
935 | 919 | added 2 changesets with 2 changes to 1 files |
|
936 | 920 | new changesets 22aeff664783:63cf6c3dba4a |
|
937 | 921 | searching for changes |
|
938 | 922 | no changes found |
|
939 | 923 | updating working directory |
|
940 | 924 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
941 | 925 | |
|
942 | 926 | $ ls share |
|
943 | 927 | 22aeff664783fd44c6d9b435618173c118c3448e |
|
944 | 928 | b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1 |
|
945 | 929 | |
|
946 | 930 | remote naming mode works as advertised |
|
947 | 931 | |
|
948 | 932 | $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a |
|
949 | 933 | (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde) |
|
950 | 934 | requesting all changes |
|
951 | 935 | adding changesets |
|
952 | 936 | adding manifests |
|
953 | 937 | adding file changes |
|
954 | 938 | added 3 changesets with 3 changes to 1 files |
|
955 | 939 | new changesets b5f04eac9d8f:e5bfe23c0b47 |
|
956 | 940 | searching for changes |
|
957 | 941 | no changes found |
|
958 | 942 | adding remote bookmark bookA |
|
959 | 943 | updating working directory |
|
960 | 944 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
961 | 945 | |
|
962 | 946 | $ ls shareremote |
|
963 | 947 | 195bb1fcdb595c14a6c13e0269129ed78f6debde |
|
964 | 948 | |
|
965 | 949 | $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b |
|
966 | 950 | (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46) |
|
967 | 951 | requesting all changes |
|
968 | 952 | adding changesets |
|
969 | 953 | adding manifests |
|
970 | 954 | adding file changes |
|
971 | 955 | added 6 changesets with 6 changes to 1 files (+4 heads) |
|
972 | 956 | new changesets b5f04eac9d8f:6bacf4683960 |
|
973 | 957 | searching for changes |
|
974 | 958 | no changes found |
|
975 | 959 | adding remote bookmark head1 |
|
976 | 960 | adding remote bookmark head2 |
|
977 | 961 | updating working directory |
|
978 | 962 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
979 | 963 | |
|
980 | 964 | $ ls shareremote |
|
981 | 965 | 195bb1fcdb595c14a6c13e0269129ed78f6debde |
|
982 | 966 | c0d4f83847ca2a873741feb7048a45085fd47c46 |
|
983 | 967 | |
|
984 | 968 | request to clone a single revision is respected in sharing mode |
|
985 | 969 | |
|
986 | 970 | $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev |
|
987 | 971 | (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
988 | 972 | adding changesets |
|
989 | 973 | adding manifests |
|
990 | 974 | adding file changes |
|
991 | 975 | added 2 changesets with 2 changes to 1 files |
|
992 | 976 | new changesets b5f04eac9d8f:4a8dc1ab4c13 |
|
993 | 977 | no changes found |
|
994 | 978 | adding remote bookmark head1 |
|
995 | 979 | updating working directory |
|
996 | 980 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
997 | 981 | |
|
998 | 982 | $ hg -R share-1arev log -G |
|
999 | 983 | @ changeset: 1:4a8dc1ab4c13 |
|
1000 | 984 | | bookmark: head1 |
|
1001 | 985 | | tag: tip |
|
1002 | 986 | | user: test |
|
1003 | 987 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1004 | 988 | | summary: head1 |
|
1005 | 989 | | |
|
1006 | 990 | o changeset: 0:b5f04eac9d8f |
|
1007 | 991 | user: test |
|
1008 | 992 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1009 | 993 | summary: initial |
|
1010 | 994 | |
|
1011 | 995 | |
|
1012 | 996 | making another clone should only pull down requested rev |
|
1013 | 997 | |
|
1014 | 998 | $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev |
|
1015 | 999 | (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
1016 | 1000 | searching for changes |
|
1017 | 1001 | adding changesets |
|
1018 | 1002 | adding manifests |
|
1019 | 1003 | adding file changes |
|
1020 | 1004 | adding remote bookmark head1 |
|
1021 | 1005 | adding remote bookmark head2 |
|
1022 | 1006 | added 1 changesets with 1 changes to 1 files (+1 heads) |
|
1023 | 1007 | new changesets 99f71071f117 |
|
1024 | 1008 | updating working directory |
|
1025 | 1009 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1026 | 1010 | |
|
1027 | 1011 | $ hg -R share-1brev log -G |
|
1028 | 1012 | @ changeset: 2:99f71071f117 |
|
1029 | 1013 | | bookmark: head2 |
|
1030 | 1014 | | tag: tip |
|
1031 | 1015 | | parent: 0:b5f04eac9d8f |
|
1032 | 1016 | | user: test |
|
1033 | 1017 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1034 | 1018 | | summary: head2 |
|
1035 | 1019 | | |
|
1036 | 1020 | | o changeset: 1:4a8dc1ab4c13 |
|
1037 | 1021 | |/ bookmark: head1 |
|
1038 | 1022 | | user: test |
|
1039 | 1023 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1040 | 1024 | | summary: head1 |
|
1041 | 1025 | | |
|
1042 | 1026 | o changeset: 0:b5f04eac9d8f |
|
1043 | 1027 | user: test |
|
1044 | 1028 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1045 | 1029 | summary: initial |
|
1046 | 1030 | |
|
1047 | 1031 | |
|
1048 | 1032 | Request to clone a single branch is respected in sharing mode |
|
1049 | 1033 | |
|
1050 | 1034 | $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1 |
|
1051 | 1035 | (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
1052 | 1036 | adding changesets |
|
1053 | 1037 | adding manifests |
|
1054 | 1038 | adding file changes |
|
1055 | 1039 | added 2 changesets with 2 changes to 1 files |
|
1056 | 1040 | new changesets b5f04eac9d8f:5f92a6c1a1b1 |
|
1057 | 1041 | no changes found |
|
1058 | 1042 | updating working directory |
|
1059 | 1043 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1060 | 1044 | |
|
1061 | 1045 | $ hg -R share-1bbranch1 log -G |
|
1062 | 1046 | o changeset: 1:5f92a6c1a1b1 |
|
1063 | 1047 | | branch: branch1 |
|
1064 | 1048 | | tag: tip |
|
1065 | 1049 | | user: test |
|
1066 | 1050 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1067 | 1051 | | summary: branch1 |
|
1068 | 1052 | | |
|
1069 | 1053 | @ changeset: 0:b5f04eac9d8f |
|
1070 | 1054 | user: test |
|
1071 | 1055 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1072 | 1056 | summary: initial |
|
1073 | 1057 | |
|
1074 | 1058 | |
|
1075 | 1059 | $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2 |
|
1076 | 1060 | (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
1077 | 1061 | searching for changes |
|
1078 | 1062 | adding changesets |
|
1079 | 1063 | adding manifests |
|
1080 | 1064 | adding file changes |
|
1081 | 1065 | added 1 changesets with 1 changes to 1 files (+1 heads) |
|
1082 | 1066 | new changesets 6bacf4683960 |
|
1083 | 1067 | updating working directory |
|
1084 | 1068 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1085 | 1069 | |
|
1086 | 1070 | $ hg -R share-1bbranch2 log -G |
|
1087 | 1071 | o changeset: 2:6bacf4683960 |
|
1088 | 1072 | | branch: branch2 |
|
1089 | 1073 | | tag: tip |
|
1090 | 1074 | | parent: 0:b5f04eac9d8f |
|
1091 | 1075 | | user: test |
|
1092 | 1076 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1093 | 1077 | | summary: branch2 |
|
1094 | 1078 | | |
|
1095 | 1079 | | o changeset: 1:5f92a6c1a1b1 |
|
1096 | 1080 | |/ branch: branch1 |
|
1097 | 1081 | | user: test |
|
1098 | 1082 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1099 | 1083 | | summary: branch1 |
|
1100 | 1084 | | |
|
1101 | 1085 | @ changeset: 0:b5f04eac9d8f |
|
1102 | 1086 | user: test |
|
1103 | 1087 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1104 | 1088 | summary: initial |
|
1105 | 1089 | |
|
1106 | 1090 | |
|
1107 | 1091 | -U is respected in share clone mode |
|
1108 | 1092 | |
|
1109 | 1093 | $ hg --config share.pool=share clone -U source1a share-1anowc |
|
1110 | 1094 | (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
1111 | 1095 | searching for changes |
|
1112 | 1096 | no changes found |
|
1113 | 1097 | adding remote bookmark bookA |
|
1114 | 1098 | |
|
1115 | 1099 | $ ls -A share-1anowc |
|
1116 | 1100 | .hg |
|
1117 | 1101 | |
|
1118 | 1102 | Test that auto sharing doesn't cause failure of "hg clone local remote" |
|
1119 | 1103 | |
|
1120 | 1104 | $ cd $TESTTMP |
|
1121 | 1105 | $ hg -R a id -r 0 |
|
1122 | 1106 | acb14030fe0a |
|
1123 | 1107 | $ hg id -R remote -r 0 |
|
1124 | 1108 | abort: repository remote not found |
|
1125 | 1109 | [255] |
|
1126 | 1110 | $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote |
|
1127 | 1111 | $ hg -R remote id -r 0 |
|
1128 | 1112 | acb14030fe0a |
|
1129 | 1113 | |
|
1130 | 1114 | Cloning into pooled storage doesn't race (issue5104) |
|
1131 | 1115 | |
|
1132 | 1116 | $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 & |
|
1133 | 1117 | $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1 |
|
1134 | 1118 | $ wait |
|
1135 | 1119 | |
|
1136 | 1120 | $ hg -R share-destrace1 log -r tip |
|
1137 | 1121 | changeset: 2:e5bfe23c0b47 |
|
1138 | 1122 | bookmark: bookA |
|
1139 | 1123 | tag: tip |
|
1140 | 1124 | user: test |
|
1141 | 1125 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1142 | 1126 | summary: 1a |
|
1143 | 1127 | |
|
1144 | 1128 | |
|
1145 | 1129 | $ hg -R share-destrace2 log -r tip |
|
1146 | 1130 | changeset: 2:e5bfe23c0b47 |
|
1147 | 1131 | bookmark: bookA |
|
1148 | 1132 | tag: tip |
|
1149 | 1133 | user: test |
|
1150 | 1134 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1151 | 1135 | summary: 1a |
|
1152 | 1136 | |
|
1153 | 1137 | One repo should be new, the other should be shared from the pool. We |
|
1154 | 1138 | don't care which is which, so we just make sure we always print the |
|
1155 | 1139 | one containing "new pooled" first, then one one containing "existing |
|
1156 | 1140 | pooled". |
|
1157 | 1141 | |
|
1158 | 1142 | $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock |
|
1159 | 1143 | (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
1160 | 1144 | requesting all changes |
|
1161 | 1145 | adding changesets |
|
1162 | 1146 | adding manifests |
|
1163 | 1147 | adding file changes |
|
1164 | 1148 | added 3 changesets with 3 changes to 1 files |
|
1165 | 1149 | new changesets b5f04eac9d8f:e5bfe23c0b47 |
|
1166 | 1150 | searching for changes |
|
1167 | 1151 | no changes found |
|
1168 | 1152 | adding remote bookmark bookA |
|
1169 | 1153 | updating working directory |
|
1170 | 1154 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1171 | 1155 | |
|
1172 | 1156 | $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock |
|
1173 | 1157 | (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) |
|
1174 | 1158 | searching for changes |
|
1175 | 1159 | no changes found |
|
1176 | 1160 | adding remote bookmark bookA |
|
1177 | 1161 | updating working directory |
|
1178 | 1162 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1179 | 1163 | |
|
1180 | 1164 | SEC: check for unsafe ssh url |
|
1181 | 1165 | |
|
1182 | 1166 | $ cat >> $HGRCPATH << EOF |
|
1183 | 1167 | > [ui] |
|
1184 | 1168 | > ssh = sh -c "read l; read l; read l" |
|
1185 | 1169 | > EOF |
|
1186 | 1170 | |
|
1187 | 1171 | $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path' |
|
1188 | 1172 | abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' |
|
1189 | 1173 | [255] |
|
1190 | 1174 | $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path' |
|
1191 | 1175 | abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' |
|
1192 | 1176 | [255] |
|
1193 | 1177 | $ hg clone 'ssh://fakehost|touch%20owned/path' |
|
1194 | 1178 | abort: no suitable response from remote hg |
|
1195 | 1179 | [255] |
|
1196 | 1180 | $ hg clone 'ssh://fakehost%7Ctouch%20owned/path' |
|
1197 | 1181 | abort: no suitable response from remote hg |
|
1198 | 1182 | [255] |
|
1199 | 1183 | |
|
1200 | 1184 | $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path' |
|
1201 | 1185 | abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path' |
|
1202 | 1186 | [255] |
|
1203 | 1187 | |
|
1204 | 1188 | #if windows |
|
1205 | 1189 | $ hg clone "ssh://%26touch%20owned%20/" --debug |
|
1206 | 1190 | running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio" |
|
1207 | 1191 | sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) |
|
1208 | 1192 | sending hello command |
|
1209 | 1193 | sending between command |
|
1210 | 1194 | abort: no suitable response from remote hg |
|
1211 | 1195 | [255] |
|
1212 | 1196 | $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug |
|
1213 | 1197 | running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio" |
|
1214 | 1198 | sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) |
|
1215 | 1199 | sending hello command |
|
1216 | 1200 | sending between command |
|
1217 | 1201 | abort: no suitable response from remote hg |
|
1218 | 1202 | [255] |
|
1219 | 1203 | #else |
|
1220 | 1204 | $ hg clone "ssh://%3btouch%20owned%20/" --debug |
|
1221 | 1205 | running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio' |
|
1222 | 1206 | sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) |
|
1223 | 1207 | sending hello command |
|
1224 | 1208 | sending between command |
|
1225 | 1209 | abort: no suitable response from remote hg |
|
1226 | 1210 | [255] |
|
1227 | 1211 | $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug |
|
1228 | 1212 | running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio' |
|
1229 | 1213 | sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) |
|
1230 | 1214 | sending hello command |
|
1231 | 1215 | sending between command |
|
1232 | 1216 | abort: no suitable response from remote hg |
|
1233 | 1217 | [255] |
|
1234 | 1218 | #endif |
|
1235 | 1219 | |
|
1236 | 1220 | $ hg clone "ssh://v-alid.example.com/" --debug |
|
1237 | 1221 | running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re) |
|
1238 | 1222 | sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) |
|
1239 | 1223 | sending hello command |
|
1240 | 1224 | sending between command |
|
1241 | 1225 | abort: no suitable response from remote hg |
|
1242 | 1226 | [255] |
|
1243 | 1227 | |
|
1244 | 1228 | We should not have created a file named owned - if it exists, the |
|
1245 | 1229 | attack succeeded. |
|
1246 | 1230 | $ if test -f owned; then echo 'you got owned'; fi |
|
1247 | 1231 | |
|
1248 | 1232 | Cloning without fsmonitor enabled does not print a warning for small repos |
|
1249 | 1233 | |
|
1250 | 1234 | $ hg clone a fsmonitor-default |
|
1251 | 1235 | updating to bookmark @ on branch stable |
|
1252 | 1236 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1253 | 1237 | |
|
1254 | 1238 | Lower the warning threshold to simulate a large repo |
|
1255 | 1239 | |
|
1256 | 1240 | $ cat >> $HGRCPATH << EOF |
|
1257 | 1241 | > [fsmonitor] |
|
1258 | 1242 | > warn_update_file_count = 2 |
|
1259 | 1243 | > warn_update_file_count_rust = 2 |
|
1260 | 1244 | > EOF |
|
1261 | 1245 | |
|
1262 | 1246 | We should see a warning about no fsmonitor on supported platforms |
|
1263 | 1247 | |
|
1264 | 1248 | #if linuxormacos no-fsmonitor |
|
1265 | 1249 | $ hg clone a nofsmonitor |
|
1266 | 1250 | updating to bookmark @ on branch stable |
|
1267 | 1251 | (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") |
|
1268 | 1252 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1269 | 1253 | #else |
|
1270 | 1254 | $ hg clone a nofsmonitor |
|
1271 | 1255 | updating to bookmark @ on branch stable |
|
1272 | 1256 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1273 | 1257 | #endif |
|
1274 | 1258 | |
|
1275 | 1259 | We should not see warning about fsmonitor when it is enabled |
|
1276 | 1260 | |
|
1277 | 1261 | #if fsmonitor |
|
1278 | 1262 | $ hg clone a fsmonitor-enabled |
|
1279 | 1263 | updating to bookmark @ on branch stable |
|
1280 | 1264 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1281 | 1265 | #endif |
|
1282 | 1266 | |
|
1283 | 1267 | We can disable the fsmonitor warning |
|
1284 | 1268 | |
|
1285 | 1269 | $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning |
|
1286 | 1270 | updating to bookmark @ on branch stable |
|
1287 | 1271 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1288 | 1272 | |
|
1289 | 1273 | Loaded fsmonitor but disabled in config should still print warning |
|
1290 | 1274 | |
|
1291 | 1275 | #if linuxormacos fsmonitor |
|
1292 | 1276 | $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off |
|
1293 | 1277 | updating to bookmark @ on branch stable |
|
1294 | 1278 | (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !) |
|
1295 | 1279 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1296 | 1280 | #endif |
|
1297 | 1281 | |
|
1298 | 1282 | Warning not printed if working directory isn't empty |
|
1299 | 1283 | |
|
1300 | 1284 | $ hg -q clone a fsmonitor-update |
|
1301 | 1285 | (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?) |
|
1302 | 1286 | $ cd fsmonitor-update |
|
1303 | 1287 | $ hg up acb14030fe0a |
|
1304 | 1288 | 1 files updated, 0 files merged, 2 files removed, 0 files unresolved |
|
1305 | 1289 | (leaving bookmark @) |
|
1306 | 1290 | $ hg up cf0fe1914066 |
|
1307 | 1291 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1308 | 1292 | |
|
1309 | 1293 | `hg update` from null revision also prints |
|
1310 | 1294 | |
|
1311 | 1295 | $ hg up null |
|
1312 | 1296 | 0 files updated, 0 files merged, 2 files removed, 0 files unresolved |
|
1313 | 1297 | |
|
1314 | 1298 | #if linuxormacos no-fsmonitor |
|
1315 | 1299 | $ hg up cf0fe1914066 |
|
1316 | 1300 | (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") |
|
1317 | 1301 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1318 | 1302 | #else |
|
1319 | 1303 | $ hg up cf0fe1914066 |
|
1320 | 1304 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1321 | 1305 | #endif |
|
1322 | 1306 | |
|
1323 | 1307 | $ cd .. |
|
1324 | 1308 |
@@ -1,57 +1,58 b'' | |||
|
1 | 1 | Create an empty repo: |
|
2 | 2 | |
|
3 | 3 | $ hg init a |
|
4 | 4 | $ cd a |
|
5 | 5 | |
|
6 | 6 | Try some commands: |
|
7 | 7 | |
|
8 | 8 | $ hg log |
|
9 | 9 | $ hg grep wah |
|
10 | 10 | [1] |
|
11 | 11 | $ hg manifest |
|
12 | 12 | $ hg verify |
|
13 | 13 | checking changesets |
|
14 | 14 | checking manifests |
|
15 | 15 | crosschecking files in changesets and manifests |
|
16 | 16 | checking files |
|
17 | 17 | checked 0 changesets with 0 changes to 0 files |
|
18 | 18 | |
|
19 | 19 | Check the basic files created: |
|
20 | 20 | |
|
21 | 21 | $ ls .hg |
|
22 | 22 | 00changelog.i |
|
23 | 23 | cache |
|
24 | 24 | requires |
|
25 | 25 | store |
|
26 | 26 | wcache |
|
27 | 27 | |
|
28 | 28 | Should be empty: |
|
29 | 29 | |
|
30 | 30 | $ ls .hg/store |
|
31 | 31 | |
|
32 | 32 | Poke at a clone: |
|
33 | 33 | |
|
34 | 34 | $ cd .. |
|
35 | 35 | $ hg clone a b |
|
36 | 36 | updating to branch default |
|
37 | 37 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
38 | 38 | $ cd b |
|
39 | 39 | $ hg verify |
|
40 | 40 | checking changesets |
|
41 | 41 | checking manifests |
|
42 | 42 | crosschecking files in changesets and manifests |
|
43 | 43 | checking files |
|
44 | 44 | checked 0 changesets with 0 changes to 0 files |
|
45 | 45 | $ ls .hg |
|
46 | 46 | 00changelog.i |
|
47 | 47 | cache |
|
48 | dirstate | |
|
48 | 49 | hgrc |
|
49 | 50 | requires |
|
50 | 51 | store |
|
51 | 52 | wcache |
|
52 | 53 | |
|
53 | 54 | Should be empty: |
|
54 | 55 | |
|
55 | 56 | $ ls .hg/store |
|
56 | 57 | |
|
57 | 58 | $ cd .. |
@@ -1,545 +1,549 b'' | |||
|
1 | 1 | #require repofncache |
|
2 | 2 | |
|
3 | 3 | An extension which will set fncache chunksize to 1 byte to make sure that logic |
|
4 | 4 | does not break |
|
5 | 5 | |
|
6 | 6 | $ cat > chunksize.py <<EOF |
|
7 | 7 | > from __future__ import absolute_import |
|
8 | 8 | > from mercurial import store |
|
9 | 9 | > store.fncache_chunksize = 1 |
|
10 | 10 | > EOF |
|
11 | 11 | |
|
12 | 12 | $ cat >> $HGRCPATH <<EOF |
|
13 | 13 | > [extensions] |
|
14 | 14 | > chunksize = $TESTTMP/chunksize.py |
|
15 | 15 | > EOF |
|
16 | 16 | |
|
17 | 17 | Init repo1: |
|
18 | 18 | |
|
19 | 19 | $ hg init repo1 |
|
20 | 20 | $ cd repo1 |
|
21 | 21 | $ echo "some text" > a |
|
22 | 22 | $ hg add |
|
23 | 23 | adding a |
|
24 | 24 | $ hg ci -m first |
|
25 | 25 | $ cat .hg/store/fncache | sort |
|
26 | 26 | data/a.i |
|
27 | 27 | |
|
28 | 28 | Testing a.i/b: |
|
29 | 29 | |
|
30 | 30 | $ mkdir a.i |
|
31 | 31 | $ echo "some other text" > a.i/b |
|
32 | 32 | $ hg add |
|
33 | 33 | adding a.i/b |
|
34 | 34 | $ hg ci -m second |
|
35 | 35 | $ cat .hg/store/fncache | sort |
|
36 | 36 | data/a.i |
|
37 | 37 | data/a.i.hg/b.i |
|
38 | 38 | |
|
39 | 39 | Testing a.i.hg/c: |
|
40 | 40 | |
|
41 | 41 | $ mkdir a.i.hg |
|
42 | 42 | $ echo "yet another text" > a.i.hg/c |
|
43 | 43 | $ hg add |
|
44 | 44 | adding a.i.hg/c |
|
45 | 45 | $ hg ci -m third |
|
46 | 46 | $ cat .hg/store/fncache | sort |
|
47 | 47 | data/a.i |
|
48 | 48 | data/a.i.hg.hg/c.i |
|
49 | 49 | data/a.i.hg/b.i |
|
50 | 50 | |
|
51 | 51 | Testing verify: |
|
52 | 52 | |
|
53 | 53 | $ hg verify |
|
54 | 54 | checking changesets |
|
55 | 55 | checking manifests |
|
56 | 56 | crosschecking files in changesets and manifests |
|
57 | 57 | checking files |
|
58 | 58 | checked 3 changesets with 3 changes to 3 files |
|
59 | 59 | |
|
60 | 60 | $ rm .hg/store/fncache |
|
61 | 61 | |
|
62 | 62 | $ hg verify |
|
63 | 63 | checking changesets |
|
64 | 64 | checking manifests |
|
65 | 65 | crosschecking files in changesets and manifests |
|
66 | 66 | checking files |
|
67 | 67 | warning: revlog 'data/a.i' not in fncache! |
|
68 | 68 | warning: revlog 'data/a.i.hg/c.i' not in fncache! |
|
69 | 69 | warning: revlog 'data/a.i/b.i' not in fncache! |
|
70 | 70 | checked 3 changesets with 3 changes to 3 files |
|
71 | 71 | 3 warnings encountered! |
|
72 | 72 | hint: run "hg debugrebuildfncache" to recover from corrupt fncache |
|
73 | 73 | |
|
74 | 74 | Follow the hint to make sure it works |
|
75 | 75 | |
|
76 | 76 | $ hg debugrebuildfncache |
|
77 | 77 | adding data/a.i |
|
78 | 78 | adding data/a.i.hg/c.i |
|
79 | 79 | adding data/a.i/b.i |
|
80 | 80 | 3 items added, 0 removed from fncache |
|
81 | 81 | |
|
82 | 82 | $ hg verify |
|
83 | 83 | checking changesets |
|
84 | 84 | checking manifests |
|
85 | 85 | crosschecking files in changesets and manifests |
|
86 | 86 | checking files |
|
87 | 87 | checked 3 changesets with 3 changes to 3 files |
|
88 | 88 | |
|
89 | 89 | $ cd .. |
|
90 | 90 | |
|
91 | 91 | Non store repo: |
|
92 | 92 | |
|
93 | 93 | $ hg --config format.usestore=False init foo |
|
94 | 94 | $ cd foo |
|
95 | 95 | $ mkdir tst.d |
|
96 | 96 | $ echo foo > tst.d/foo |
|
97 | 97 | $ hg ci -Amfoo |
|
98 | 98 | adding tst.d/foo |
|
99 | 99 | $ find .hg | sort |
|
100 | 100 | .hg |
|
101 | 101 | .hg/00changelog.i |
|
102 | 102 | .hg/00manifest.i |
|
103 | 103 | .hg/cache |
|
104 | 104 | .hg/cache/branch2-served |
|
105 | 105 | .hg/cache/rbc-names-v1 |
|
106 | 106 | .hg/cache/rbc-revs-v1 |
|
107 | 107 | .hg/data |
|
108 | 108 | .hg/data/tst.d.hg |
|
109 | 109 | .hg/data/tst.d.hg/foo.i |
|
110 | 110 | .hg/dirstate |
|
111 | 111 | .hg/fsmonitor.state (fsmonitor !) |
|
112 | 112 | .hg/last-message.txt |
|
113 | 113 | .hg/phaseroots |
|
114 | 114 | .hg/requires |
|
115 | 115 | .hg/undo |
|
116 | 116 | .hg/undo.backup.dirstate |
|
117 | 117 | .hg/undo.backupfiles |
|
118 | 118 | .hg/undo.bookmarks |
|
119 | 119 | .hg/undo.branch |
|
120 | 120 | .hg/undo.desc |
|
121 | 121 | .hg/undo.dirstate |
|
122 | 122 | .hg/undo.phaseroots |
|
123 | 123 | .hg/wcache |
|
124 | 124 | .hg/wcache/checkisexec (execbit !) |
|
125 | 125 | .hg/wcache/checklink (symlink !) |
|
126 | 126 | .hg/wcache/checklink-target (symlink !) |
|
127 | 127 | .hg/wcache/manifestfulltextcache (reporevlogstore !) |
|
128 | 128 | $ cd .. |
|
129 | 129 | |
|
130 | 130 | Non fncache repo: |
|
131 | 131 | |
|
132 | 132 | $ hg --config format.usefncache=False init bar |
|
133 | 133 | $ cd bar |
|
134 | 134 | $ mkdir tst.d |
|
135 | 135 | $ echo foo > tst.d/Foo |
|
136 | 136 | $ hg ci -Amfoo |
|
137 | 137 | adding tst.d/Foo |
|
138 | 138 | $ find .hg | sort |
|
139 | 139 | .hg |
|
140 | 140 | .hg/00changelog.i |
|
141 | 141 | .hg/cache |
|
142 | 142 | .hg/cache/branch2-served |
|
143 | 143 | .hg/cache/rbc-names-v1 |
|
144 | 144 | .hg/cache/rbc-revs-v1 |
|
145 | 145 | .hg/dirstate |
|
146 | 146 | .hg/fsmonitor.state (fsmonitor !) |
|
147 | 147 | .hg/last-message.txt |
|
148 | 148 | .hg/requires |
|
149 | 149 | .hg/store |
|
150 | 150 | .hg/store/00changelog.i |
|
151 | 151 | .hg/store/00manifest.i |
|
152 | 152 | .hg/store/data |
|
153 | 153 | .hg/store/data/tst.d.hg |
|
154 | 154 | .hg/store/data/tst.d.hg/_foo.i |
|
155 | 155 | .hg/store/phaseroots |
|
156 | 156 | .hg/store/undo |
|
157 | 157 | .hg/store/undo.backupfiles |
|
158 | 158 | .hg/store/undo.phaseroots |
|
159 | 159 | .hg/undo.backup.dirstate |
|
160 | 160 | .hg/undo.bookmarks |
|
161 | 161 | .hg/undo.branch |
|
162 | 162 | .hg/undo.desc |
|
163 | 163 | .hg/undo.dirstate |
|
164 | 164 | .hg/wcache |
|
165 | 165 | .hg/wcache/checkisexec (execbit !) |
|
166 | 166 | .hg/wcache/checklink (symlink !) |
|
167 | 167 | .hg/wcache/checklink-target (symlink !) |
|
168 | 168 | .hg/wcache/manifestfulltextcache (reporevlogstore !) |
|
169 | 169 | $ cd .. |
|
170 | 170 | |
|
171 | 171 | Encoding of reserved / long paths in the store |
|
172 | 172 | |
|
173 | 173 | $ hg init r2 |
|
174 | 174 | $ cd r2 |
|
175 | 175 | $ cat <<EOF > .hg/hgrc |
|
176 | 176 | > [ui] |
|
177 | 177 | > portablefilenames = ignore |
|
178 | 178 | > EOF |
|
179 | 179 | |
|
180 | 180 | $ hg import -q --bypass - <<EOF |
|
181 | 181 | > # HG changeset patch |
|
182 | 182 | > # User test |
|
183 | 183 | > # Date 0 0 |
|
184 | 184 | > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7 |
|
185 | 185 | > # Parent 0000000000000000000000000000000000000000 |
|
186 | 186 | > 1 |
|
187 | 187 | > |
|
188 | 188 | > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz |
|
189 | 189 | > new file mode 100644 |
|
190 | 190 | > --- /dev/null |
|
191 | 191 | > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz |
|
192 | 192 | > @@ -0,0 +1,1 @@ |
|
193 | 193 | > +foo |
|
194 | 194 | > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT |
|
195 | 195 | > new file mode 100644 |
|
196 | 196 | > --- /dev/null |
|
197 | 197 | > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT |
|
198 | 198 | > @@ -0,0 +1,1 @@ |
|
199 | 199 | > +foo |
|
200 | 200 | > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt |
|
201 | 201 | > new file mode 100644 |
|
202 | 202 | > --- /dev/null |
|
203 | 203 | > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt |
|
204 | 204 | > @@ -0,0 +1,1 @@ |
|
205 | 205 | > +foo |
|
206 | 206 | > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c |
|
207 | 207 | > new file mode 100644 |
|
208 | 208 | > --- /dev/null |
|
209 | 209 | > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c |
|
210 | 210 | > @@ -0,0 +1,1 @@ |
|
211 | 211 | > +foo |
|
212 | 212 | > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider |
|
213 | 213 | > new file mode 100644 |
|
214 | 214 | > --- /dev/null |
|
215 | 215 | > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider |
|
216 | 216 | > @@ -0,0 +1,1 @@ |
|
217 | 217 | > +foo |
|
218 | 218 | > EOF |
|
219 | 219 | |
|
220 | 220 | $ find .hg/store -name *.i | sort |
|
221 | 221 | .hg/store/00changelog.i |
|
222 | 222 | .hg/store/00manifest.i |
|
223 | 223 | .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i |
|
224 | 224 | .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i |
|
225 | 225 | .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i |
|
226 | 226 | .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i |
|
227 | 227 | .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i |
|
228 | 228 | |
|
229 | 229 | $ cd .. |
|
230 | 230 | |
|
231 | 231 | Aborting lock does not prevent fncache writes |
|
232 | 232 | |
|
233 | 233 | $ cat > exceptionext.py <<EOF |
|
234 | 234 | > from __future__ import absolute_import |
|
235 | 235 | > import os |
|
236 | 236 | > from mercurial import commands, error, extensions |
|
237 | 237 | > |
|
238 | 238 | > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs): |
|
239 | 239 | > def releasewrap(): |
|
240 | 240 | > l.held = False # ensure __del__ is a noop |
|
241 | 241 | > raise error.Abort(b"forced lock failure") |
|
242 | 242 | > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs) |
|
243 | 243 | > return l |
|
244 | 244 | > |
|
245 | 245 | > def reposetup(ui, repo): |
|
246 | 246 | > extensions.wrapfunction(repo, '_lock', lockexception) |
|
247 | 247 | > |
|
248 | 248 | > cmdtable = {} |
|
249 | 249 | > |
|
250 | 250 | > # wrap "commit" command to prevent wlock from being '__del__()'-ed |
|
251 | 251 | > # at the end of dispatching (for intentional "forced lcok failure") |
|
252 | 252 | > def commitwrap(orig, ui, repo, *pats, **opts): |
|
253 | 253 | > repo = repo.unfiltered() # to use replaced repo._lock certainly |
|
254 | 254 | > wlock = repo.wlock() |
|
255 | 255 | > try: |
|
256 | 256 | > return orig(ui, repo, *pats, **opts) |
|
257 | 257 | > finally: |
|
258 | 258 | > # multiple 'relase()' is needed for complete releasing wlock, |
|
259 | 259 | > # because "forced" abort at last releasing store lock |
|
260 | 260 | > # prevents wlock from being released at same 'lockmod.release()' |
|
261 | 261 | > for i in range(wlock.held): |
|
262 | 262 | > wlock.release() |
|
263 | 263 | > |
|
264 | 264 | > def extsetup(ui): |
|
265 | 265 | > extensions.wrapcommand(commands.table, b"commit", commitwrap) |
|
266 | 266 | > EOF |
|
267 | 267 | $ extpath=`pwd`/exceptionext.py |
|
268 | 268 | $ hg init fncachetxn |
|
269 | 269 | $ cd fncachetxn |
|
270 | 270 | $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc |
|
271 | 271 | $ touch y |
|
272 | 272 | $ hg ci -qAm y |
|
273 | 273 | abort: forced lock failure |
|
274 | 274 | [255] |
|
275 | 275 | $ cat .hg/store/fncache |
|
276 | 276 | data/y.i |
|
277 | 277 | |
|
278 | 278 | Aborting transaction prevents fncache change |
|
279 | 279 | |
|
280 | 280 | $ cat > ../exceptionext.py <<EOF |
|
281 | 281 | > from __future__ import absolute_import |
|
282 | 282 | > import os |
|
283 | 283 | > from mercurial import commands, error, extensions, localrepo |
|
284 | 284 | > |
|
285 | 285 | > def wrapper(orig, self, *args, **kwargs): |
|
286 | 286 | > tr = orig(self, *args, **kwargs) |
|
287 | 287 | > def fail(tr): |
|
288 | 288 | > raise error.Abort(b"forced transaction failure") |
|
289 | 289 | > # zzz prefix to ensure it sorted after store.write |
|
290 | 290 | > tr.addfinalize(b'zzz-forcefails', fail) |
|
291 | 291 | > return tr |
|
292 | 292 | > |
|
293 | 293 | > def uisetup(ui): |
|
294 | 294 | > extensions.wrapfunction( |
|
295 | 295 | > localrepo.localrepository, b'transaction', wrapper) |
|
296 | 296 | > |
|
297 | 297 | > cmdtable = {} |
|
298 | 298 | > |
|
299 | 299 | > EOF |
|
300 | 300 | |
|
301 | 301 | Clean cached version |
|
302 | 302 | $ rm -f "${extpath}c" |
|
303 | 303 | $ rm -Rf "`dirname $extpath`/__pycache__" |
|
304 | 304 | |
|
305 | 305 | $ touch z |
|
306 | 306 | $ hg ci -qAm z |
|
307 | 307 | transaction abort! |
|
308 | 308 | rollback completed |
|
309 | 309 | abort: forced transaction failure |
|
310 | 310 | [255] |
|
311 | 311 | $ cat .hg/store/fncache |
|
312 | 312 | data/y.i |
|
313 | 313 | |
|
314 | 314 | Aborted transactions can be recovered later |
|
315 | 315 | |
|
316 | 316 | $ cat > ../exceptionext.py <<EOF |
|
317 | 317 | > from __future__ import absolute_import |
|
318 | 318 | > import os |
|
319 | 319 | > from mercurial import ( |
|
320 | 320 | > commands, |
|
321 | 321 | > error, |
|
322 | 322 | > extensions, |
|
323 | 323 | > localrepo, |
|
324 | 324 | > transaction, |
|
325 | 325 | > ) |
|
326 | 326 | > |
|
327 | 327 | > def trwrapper(orig, self, *args, **kwargs): |
|
328 | 328 | > tr = orig(self, *args, **kwargs) |
|
329 | 329 | > def fail(tr): |
|
330 | 330 | > raise error.Abort(b"forced transaction failure") |
|
331 | 331 | > # zzz prefix to ensure it sorted after store.write |
|
332 | 332 | > tr.addfinalize(b'zzz-forcefails', fail) |
|
333 | 333 | > return tr |
|
334 | 334 | > |
|
335 | 335 | > def abortwrapper(orig, self, *args, **kwargs): |
|
336 | 336 | > raise error.Abort(b"forced transaction failure") |
|
337 | 337 | > |
|
338 | 338 | > def uisetup(ui): |
|
339 | 339 | > extensions.wrapfunction(localrepo.localrepository, 'transaction', |
|
340 | 340 | > trwrapper) |
|
341 | 341 | > extensions.wrapfunction(transaction.transaction, '_abort', |
|
342 | 342 | > abortwrapper) |
|
343 | 343 | > |
|
344 | 344 | > cmdtable = {} |
|
345 | 345 | > |
|
346 | 346 | > EOF |
|
347 | 347 | |
|
348 | 348 | Clean cached versions |
|
349 | 349 | $ rm -f "${extpath}c" |
|
350 | 350 | $ rm -Rf "`dirname $extpath`/__pycache__" |
|
351 | 351 | |
|
352 | 352 | $ hg up -q 1 |
|
353 | 353 | $ touch z |
|
354 | 354 | $ hg ci -qAm z 2>/dev/null |
|
355 | 355 | [255] |
|
356 | 356 | $ cat .hg/store/fncache | sort |
|
357 | 357 | data/y.i |
|
358 | 358 | data/z.i |
|
359 | 359 | $ hg recover --verify |
|
360 | 360 | rolling back interrupted transaction |
|
361 | 361 | checking changesets |
|
362 | 362 | checking manifests |
|
363 | 363 | crosschecking files in changesets and manifests |
|
364 | 364 | checking files |
|
365 | 365 | checked 1 changesets with 1 changes to 1 files |
|
366 | 366 | $ cat .hg/store/fncache |
|
367 | 367 | data/y.i |
|
368 | 368 | |
|
369 | 369 | $ cd .. |
|
370 | 370 | |
|
371 | 371 | debugrebuildfncache does nothing unless repo has fncache requirement |
|
372 | 372 | |
|
373 | 373 | $ hg --config format.usefncache=false init nofncache |
|
374 | 374 | $ cd nofncache |
|
375 | 375 | $ hg debugrebuildfncache |
|
376 | 376 | (not rebuilding fncache because repository does not support fncache) |
|
377 | 377 | |
|
378 | 378 | $ cd .. |
|
379 | 379 | |
|
380 | 380 | debugrebuildfncache works on empty repository |
|
381 | 381 | |
|
382 | 382 | $ hg init empty |
|
383 | 383 | $ cd empty |
|
384 | 384 | $ hg debugrebuildfncache |
|
385 | 385 | fncache already up to date |
|
386 | 386 | $ cd .. |
|
387 | 387 | |
|
388 | 388 | debugrebuildfncache on an up to date repository no-ops |
|
389 | 389 | |
|
390 | 390 | $ hg init repo |
|
391 | 391 | $ cd repo |
|
392 | 392 | $ echo initial > foo |
|
393 | 393 | $ echo initial > .bar |
|
394 | 394 | $ hg commit -A -m initial |
|
395 | 395 | adding .bar |
|
396 | 396 | adding foo |
|
397 | 397 | |
|
398 | 398 | $ cat .hg/store/fncache | sort |
|
399 | 399 | data/.bar.i |
|
400 | 400 | data/foo.i |
|
401 | 401 | |
|
402 | 402 | $ hg debugrebuildfncache |
|
403 | 403 | fncache already up to date |
|
404 | 404 | |
|
405 | 405 | debugrebuildfncache restores deleted fncache file |
|
406 | 406 | |
|
407 | 407 | $ rm -f .hg/store/fncache |
|
408 | 408 | $ hg debugrebuildfncache |
|
409 | 409 | adding data/.bar.i |
|
410 | 410 | adding data/foo.i |
|
411 | 411 | 2 items added, 0 removed from fncache |
|
412 | 412 | |
|
413 | 413 | $ cat .hg/store/fncache | sort |
|
414 | 414 | data/.bar.i |
|
415 | 415 | data/foo.i |
|
416 | 416 | |
|
417 | 417 | Rebuild after rebuild should no-op |
|
418 | 418 | |
|
419 | 419 | $ hg debugrebuildfncache |
|
420 | 420 | fncache already up to date |
|
421 | 421 | |
|
422 | 422 | A single missing file should get restored, an extra file should be removed |
|
423 | 423 | |
|
424 | 424 | $ cat > .hg/store/fncache << EOF |
|
425 | 425 | > data/foo.i |
|
426 | 426 | > data/bad-entry.i |
|
427 | 427 | > EOF |
|
428 | 428 | |
|
429 | 429 | $ hg debugrebuildfncache |
|
430 | 430 | removing data/bad-entry.i |
|
431 | 431 | adding data/.bar.i |
|
432 | 432 | 1 items added, 1 removed from fncache |
|
433 | 433 | |
|
434 | 434 | $ cat .hg/store/fncache | sort |
|
435 | 435 | data/.bar.i |
|
436 | 436 | data/foo.i |
|
437 | 437 | |
|
438 | 438 | debugrebuildfncache recovers from truncated line in fncache |
|
439 | 439 | |
|
440 | 440 | $ printf a > .hg/store/fncache |
|
441 | 441 | $ hg debugrebuildfncache |
|
442 | 442 | fncache does not ends with a newline |
|
443 | 443 | adding data/.bar.i |
|
444 | 444 | adding data/foo.i |
|
445 | 445 | 2 items added, 0 removed from fncache |
|
446 | 446 | |
|
447 | 447 | $ cat .hg/store/fncache | sort |
|
448 | 448 | data/.bar.i |
|
449 | 449 | data/foo.i |
|
450 | 450 | |
|
451 | 451 | $ cd .. |
|
452 | 452 | |
|
453 | 453 | Try a simple variation without dotencode to ensure fncache is ignorant of encoding |
|
454 | 454 | |
|
455 | 455 | $ hg --config format.dotencode=false init nodotencode |
|
456 | 456 | $ cd nodotencode |
|
457 | 457 | $ echo initial > foo |
|
458 | 458 | $ echo initial > .bar |
|
459 | 459 | $ hg commit -A -m initial |
|
460 | 460 | adding .bar |
|
461 | 461 | adding foo |
|
462 | 462 | |
|
463 | 463 | $ cat .hg/store/fncache | sort |
|
464 | 464 | data/.bar.i |
|
465 | 465 | data/foo.i |
|
466 | 466 | |
|
467 | 467 | $ rm .hg/store/fncache |
|
468 | 468 | $ hg debugrebuildfncache |
|
469 | 469 | adding data/.bar.i |
|
470 | 470 | adding data/foo.i |
|
471 | 471 | 2 items added, 0 removed from fncache |
|
472 | 472 | |
|
473 | 473 | $ cat .hg/store/fncache | sort |
|
474 | 474 | data/.bar.i |
|
475 | 475 | data/foo.i |
|
476 | 476 | |
|
477 | 477 | $ cd .. |
|
478 | 478 | |
|
479 | 479 | In repositories that have accumulated a large number of files over time, the |
|
480 | 480 | fncache file is going to be large. If we possibly can avoid loading it, so much the better. |
|
481 | 481 | The cache should not loaded when committing changes to existing files, or when unbundling |
|
482 | 482 | changesets that only contain changes to existing files: |
|
483 | 483 | |
|
484 | 484 | $ cat > fncacheloadwarn.py << EOF |
|
485 | 485 | > from __future__ import absolute_import |
|
486 | 486 | > from mercurial import extensions, localrepo |
|
487 | 487 | > |
|
488 | 488 | > def extsetup(ui): |
|
489 | 489 | > def wrapstore(orig, requirements, *args): |
|
490 | 490 | > store = orig(requirements, *args) |
|
491 | 491 | > if b'store' in requirements and b'fncache' in requirements: |
|
492 | 492 | > instrumentfncachestore(store, ui) |
|
493 | 493 | > return store |
|
494 | 494 | > extensions.wrapfunction(localrepo, 'makestore', wrapstore) |
|
495 | 495 | > |
|
496 | 496 | > def instrumentfncachestore(fncachestore, ui): |
|
497 | 497 | > class instrumentedfncache(type(fncachestore.fncache)): |
|
498 | 498 | > def _load(self): |
|
499 | 499 | > ui.warn(b'fncache load triggered!\n') |
|
500 | 500 | > super(instrumentedfncache, self)._load() |
|
501 | 501 | > fncachestore.fncache.__class__ = instrumentedfncache |
|
502 | 502 | > EOF |
|
503 | 503 | |
|
504 | 504 | $ fncachextpath=`pwd`/fncacheloadwarn.py |
|
505 | 505 | $ hg init nofncacheload |
|
506 | 506 | $ cd nofncacheload |
|
507 | 507 | $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc |
|
508 | 508 | |
|
509 | 509 | A new file should trigger a load, as we'd want to update the fncache set in that case: |
|
510 | 510 | |
|
511 | 511 | $ touch foo |
|
512 | 512 | $ hg ci -qAm foo |
|
513 | 513 | fncache load triggered! |
|
514 | 514 | |
|
515 | 515 | But modifying that file should not: |
|
516 | 516 | |
|
517 | 517 | $ echo bar >> foo |
|
518 | 518 | $ hg ci -qm foo |
|
519 | 519 | |
|
520 | 520 | If a transaction has been aborted, the zero-size truncated index file will |
|
521 | 521 | not prevent the fncache from being loaded; rather than actually abort |
|
522 | 522 | a transaction, we simulate the situation by creating a zero-size index file: |
|
523 | 523 | |
|
524 | 524 | $ touch .hg/store/data/bar.i |
|
525 | 525 | $ touch bar |
|
526 | 526 | $ hg ci -qAm bar |
|
527 | 527 | fncache load triggered! |
|
528 | 528 | |
|
529 | 529 | Unbundling should follow the same rules; existing files should not cause a load: |
|
530 | 530 | |
|
531 | (loading during the clone is expected) | |
|
531 | 532 | $ hg clone -q . tobundle |
|
533 | fncache load triggered! | |
|
534 | fncache load triggered! | |
|
535 | ||
|
532 | 536 | $ echo 'new line' > tobundle/bar |
|
533 | 537 | $ hg -R tobundle ci -qm bar |
|
534 | 538 | $ hg -R tobundle bundle -q barupdated.hg |
|
535 | 539 | $ hg unbundle -q barupdated.hg |
|
536 | 540 | |
|
537 | 541 | but adding new files should: |
|
538 | 542 | |
|
539 | 543 | $ touch tobundle/newfile |
|
540 | 544 | $ hg -R tobundle ci -qAm newfile |
|
541 | 545 | $ hg -R tobundle bundle -q newfile.hg |
|
542 | 546 | $ hg unbundle -q newfile.hg |
|
543 | 547 | fncache load triggered! |
|
544 | 548 | |
|
545 | 549 | $ cd .. |
@@ -1,445 +1,445 b'' | |||
|
1 | 1 | #require hardlink reporevlogstore |
|
2 | 2 | |
|
3 | 3 | $ cat > nlinks.py <<EOF |
|
4 | 4 | > from __future__ import print_function |
|
5 | 5 | > import sys |
|
6 | 6 | > from mercurial import pycompat, util |
|
7 | 7 | > for f in sorted(sys.stdin.readlines()): |
|
8 | 8 | > f = f[:-1] |
|
9 | 9 | > print(util.nlinks(pycompat.fsencode(f)), f) |
|
10 | 10 | > EOF |
|
11 | 11 | |
|
12 | 12 | $ nlinksdir() |
|
13 | 13 | > { |
|
14 | 14 | > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py |
|
15 | 15 | > } |
|
16 | 16 | |
|
17 | 17 | Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux): |
|
18 | 18 | |
|
19 | 19 | $ cat > linkcp.py <<EOF |
|
20 | 20 | > from __future__ import absolute_import |
|
21 | 21 | > import sys |
|
22 | 22 | > from mercurial import pycompat, util |
|
23 | 23 | > util.copyfiles(pycompat.fsencode(sys.argv[1]), |
|
24 | 24 | > pycompat.fsencode(sys.argv[2]), hardlink=True) |
|
25 | 25 | > EOF |
|
26 | 26 | |
|
27 | 27 | $ linkcp() |
|
28 | 28 | > { |
|
29 | 29 | > "$PYTHON" $TESTTMP/linkcp.py $1 $2 |
|
30 | 30 | > } |
|
31 | 31 | |
|
32 | 32 | Prepare repo r1: |
|
33 | 33 | |
|
34 | 34 | $ hg init r1 |
|
35 | 35 | $ cd r1 |
|
36 | 36 | |
|
37 | 37 | $ echo c1 > f1 |
|
38 | 38 | $ hg add f1 |
|
39 | 39 | $ hg ci -m0 |
|
40 | 40 | |
|
41 | 41 | $ mkdir d1 |
|
42 | 42 | $ cd d1 |
|
43 | 43 | $ echo c2 > f2 |
|
44 | 44 | $ hg add f2 |
|
45 | 45 | $ hg ci -m1 |
|
46 | 46 | $ cd ../.. |
|
47 | 47 | |
|
48 | 48 | $ nlinksdir r1/.hg/store |
|
49 | 49 | 1 r1/.hg/store/00changelog.i |
|
50 | 50 | 1 r1/.hg/store/00manifest.i |
|
51 | 51 | 1 r1/.hg/store/data/d1/f2.i |
|
52 | 52 | 1 r1/.hg/store/data/f1.i |
|
53 | 53 | 1 r1/.hg/store/fncache (repofncache !) |
|
54 | 54 | 1 r1/.hg/store/phaseroots |
|
55 | 55 | 1 r1/.hg/store/undo |
|
56 | 56 | 1 r1/.hg/store/undo.backup.fncache (repofncache !) |
|
57 | 57 | 1 r1/.hg/store/undo.backupfiles |
|
58 | 58 | 1 r1/.hg/store/undo.phaseroots |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | Create hardlinked clone r2: |
|
62 | 62 | |
|
63 | 63 | $ hg clone -U --debug r1 r2 --config progress.debug=true |
|
64 | linking: 1 files | |
|
65 | linking: 2 files | |
|
66 | linking: 3 files | |
|
67 | linking: 4 files | |
|
68 | linking: 5 files | |
|
69 | linking: 6 files | |
|
70 | linking: 7 files | |
|
64 | linking: 1/7 files (14.29%) | |
|
65 | linking: 2/7 files (28.57%) | |
|
66 | linking: 3/7 files (42.86%) | |
|
67 | linking: 4/7 files (57.14%) | |
|
68 | linking: 5/7 files (71.43%) | |
|
69 | linking: 6/7 files (85.71%) | |
|
70 | linking: 7/7 files (100.00%) | |
|
71 | 71 | linked 7 files |
|
72 | 72 | updating the branch cache |
|
73 | 73 | |
|
74 | 74 | Create non-hardlinked clone r3: |
|
75 | 75 | |
|
76 | 76 | $ hg clone --pull r1 r3 |
|
77 | 77 | requesting all changes |
|
78 | 78 | adding changesets |
|
79 | 79 | adding manifests |
|
80 | 80 | adding file changes |
|
81 | 81 | added 2 changesets with 2 changes to 2 files |
|
82 | 82 | new changesets 40d85e9847f2:7069c422939c |
|
83 | 83 | updating to branch default |
|
84 | 84 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
85 | 85 | |
|
86 | 86 | |
|
87 | 87 | Repos r1 and r2 should now contain hardlinked files: |
|
88 | 88 | |
|
89 | 89 | $ nlinksdir r1/.hg/store |
|
90 | 90 | 2 r1/.hg/store/00changelog.i |
|
91 | 91 | 2 r1/.hg/store/00manifest.i |
|
92 | 92 | 2 r1/.hg/store/data/d1/f2.i |
|
93 | 93 | 2 r1/.hg/store/data/f1.i |
|
94 |
|
|
|
94 | 1 r1/.hg/store/fncache (repofncache !) | |
|
95 | 95 | 1 r1/.hg/store/phaseroots |
|
96 | 96 | 1 r1/.hg/store/undo |
|
97 | 97 | 1 r1/.hg/store/undo.backup.fncache (repofncache !) |
|
98 | 98 | 1 r1/.hg/store/undo.backupfiles |
|
99 | 99 | 1 r1/.hg/store/undo.phaseroots |
|
100 | 100 | |
|
101 | 101 | $ nlinksdir r2/.hg/store |
|
102 | 102 | 2 r2/.hg/store/00changelog.i |
|
103 | 103 | 2 r2/.hg/store/00manifest.i |
|
104 | 104 | 2 r2/.hg/store/data/d1/f2.i |
|
105 | 105 | 2 r2/.hg/store/data/f1.i |
|
106 |
|
|
|
106 | 1 r2/.hg/store/fncache (repofncache !) | |
|
107 | 107 | |
|
108 | 108 | Repo r3 should not be hardlinked: |
|
109 | 109 | |
|
110 | 110 | $ nlinksdir r3/.hg/store |
|
111 | 111 | 1 r3/.hg/store/00changelog.i |
|
112 | 112 | 1 r3/.hg/store/00manifest.i |
|
113 | 113 | 1 r3/.hg/store/data/d1/f2.i |
|
114 | 114 | 1 r3/.hg/store/data/f1.i |
|
115 | 115 | 1 r3/.hg/store/fncache (repofncache !) |
|
116 | 116 | 1 r3/.hg/store/phaseroots |
|
117 | 117 | 1 r3/.hg/store/undo |
|
118 | 118 | 1 r3/.hg/store/undo.backupfiles |
|
119 | 119 | 1 r3/.hg/store/undo.phaseroots |
|
120 | 120 | |
|
121 | 121 | |
|
122 | 122 | Create a non-inlined filelog in r3: |
|
123 | 123 | |
|
124 | 124 | $ cd r3/d1 |
|
125 | 125 | >>> f = open('data1', 'wb') |
|
126 | 126 | >>> for x in range(10000): |
|
127 | 127 | ... f.write(b"%d\n" % x) and None |
|
128 | 128 | >>> f.close() |
|
129 | 129 | $ for j in 0 1 2 3 4 5 6 7 8 9; do |
|
130 | 130 | > cat data1 >> f2 |
|
131 | 131 | > hg commit -m$j |
|
132 | 132 | > done |
|
133 | 133 | $ cd ../.. |
|
134 | 134 | |
|
135 | 135 | $ nlinksdir r3/.hg/store |
|
136 | 136 | 1 r3/.hg/store/00changelog.i |
|
137 | 137 | 1 r3/.hg/store/00manifest.i |
|
138 | 138 | 1 r3/.hg/store/data/d1/f2.d |
|
139 | 139 | 1 r3/.hg/store/data/d1/f2.i |
|
140 | 140 | 1 r3/.hg/store/data/f1.i |
|
141 | 141 | 1 r3/.hg/store/fncache (repofncache !) |
|
142 | 142 | 1 r3/.hg/store/phaseroots |
|
143 | 143 | 1 r3/.hg/store/undo |
|
144 | 144 | 1 r3/.hg/store/undo.backup.fncache (repofncache !) |
|
145 | 145 | 1 r3/.hg/store/undo.backup.phaseroots |
|
146 | 146 | 1 r3/.hg/store/undo.backupfiles |
|
147 | 147 | 1 r3/.hg/store/undo.phaseroots |
|
148 | 148 | |
|
149 | 149 | Push to repo r1 should break up most hardlinks in r2: |
|
150 | 150 | |
|
151 | 151 | $ hg -R r2 verify |
|
152 | 152 | checking changesets |
|
153 | 153 | checking manifests |
|
154 | 154 | crosschecking files in changesets and manifests |
|
155 | 155 | checking files |
|
156 | 156 | checked 2 changesets with 2 changes to 2 files |
|
157 | 157 | |
|
158 | 158 | $ cd r3 |
|
159 | 159 | $ hg push |
|
160 | 160 | pushing to $TESTTMP/r1 |
|
161 | 161 | searching for changes |
|
162 | 162 | adding changesets |
|
163 | 163 | adding manifests |
|
164 | 164 | adding file changes |
|
165 | 165 | added 10 changesets with 10 changes to 1 files |
|
166 | 166 | |
|
167 | 167 | $ cd .. |
|
168 | 168 | |
|
169 | 169 | $ nlinksdir r2/.hg/store |
|
170 | 170 | 1 r2/.hg/store/00changelog.i |
|
171 | 171 | 1 r2/.hg/store/00manifest.i |
|
172 | 172 | 1 r2/.hg/store/data/d1/f2.i |
|
173 | 173 | 2 r2/.hg/store/data/f1.i |
|
174 | 174 | [12] r2/\.hg/store/fncache (re) (repofncache !) |
|
175 | 175 | |
|
176 | 176 | #if hardlink-whitelisted repofncache |
|
177 | 177 | $ nlinksdir r2/.hg/store/fncache |
|
178 |
|
|
|
178 | 1 r2/.hg/store/fncache | |
|
179 | 179 | #endif |
|
180 | 180 | |
|
181 | 181 | $ hg -R r2 verify |
|
182 | 182 | checking changesets |
|
183 | 183 | checking manifests |
|
184 | 184 | crosschecking files in changesets and manifests |
|
185 | 185 | checking files |
|
186 | 186 | checked 2 changesets with 2 changes to 2 files |
|
187 | 187 | |
|
188 | 188 | |
|
189 | 189 | $ cd r1 |
|
190 | 190 | $ hg up |
|
191 | 191 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
192 | 192 | |
|
193 | 193 | Committing a change to f1 in r1 must break up hardlink f1.i in r2: |
|
194 | 194 | |
|
195 | 195 | $ echo c1c1 >> f1 |
|
196 | 196 | $ hg ci -m00 |
|
197 | 197 | $ cd .. |
|
198 | 198 | |
|
199 | 199 | $ nlinksdir r2/.hg/store |
|
200 | 200 | 1 r2/.hg/store/00changelog.i |
|
201 | 201 | 1 r2/.hg/store/00manifest.i |
|
202 | 202 | 1 r2/.hg/store/data/d1/f2.i |
|
203 | 203 | 1 r2/.hg/store/data/f1.i |
|
204 |
|
|
|
204 | 1 r2/.hg/store/fncache (repofncache !) | |
|
205 | 205 | |
|
206 | 206 | #if hardlink-whitelisted repofncache |
|
207 | 207 | $ nlinksdir r2/.hg/store/fncache |
|
208 |
|
|
|
208 | 1 r2/.hg/store/fncache | |
|
209 | 209 | #endif |
|
210 | 210 | |
|
211 | 211 | Create a file which exec permissions we will change |
|
212 | 212 | $ cd r3 |
|
213 | 213 | $ echo "echo hello world" > f3 |
|
214 | 214 | $ hg add f3 |
|
215 | 215 | $ hg ci -mf3 |
|
216 | 216 | $ cd .. |
|
217 | 217 | |
|
218 | 218 | $ cd r3 |
|
219 | 219 | $ hg tip --template '{rev}:{node|short}\n' |
|
220 | 220 | 12:d3b77733a28a |
|
221 | 221 | $ echo bla > f1 |
|
222 | 222 | $ chmod +x f3 |
|
223 | 223 | $ hg ci -m1 |
|
224 | 224 | $ cd .. |
|
225 | 225 | |
|
226 | 226 | Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'): |
|
227 | 227 | |
|
228 | 228 | $ linkcp r3 r4 |
|
229 | 229 | |
|
230 | 230 | 'checklink' is produced by hardlinking a symlink, which is undefined whether |
|
231 | 231 | the symlink should be followed or not. It does behave differently on Linux and |
|
232 | 232 | BSD. Just remove it so the test pass on both platforms. |
|
233 | 233 | |
|
234 | 234 | $ rm -f r4/.hg/wcache/checklink |
|
235 | 235 | |
|
236 | 236 | r4 has hardlinks in the working dir (not just inside .hg): |
|
237 | 237 | |
|
238 | 238 | $ nlinksdir r4 |
|
239 | 239 | 2 r4/.hg/00changelog.i |
|
240 | 240 | 2 r4/.hg/branch |
|
241 | 241 | 2 r4/.hg/cache/branch2-base |
|
242 | 242 | 2 r4/.hg/cache/branch2-immutable |
|
243 | 243 | 2 r4/.hg/cache/branch2-served |
|
244 | 244 | 2 r4/.hg/cache/branch2-served.hidden |
|
245 | 245 | 2 r4/.hg/cache/branch2-visible |
|
246 | 246 | 2 r4/.hg/cache/branch2-visible-hidden |
|
247 | 247 | 2 r4/.hg/cache/rbc-names-v1 |
|
248 | 248 | 2 r4/.hg/cache/rbc-revs-v1 |
|
249 | 249 | 2 r4/.hg/cache/tags2 |
|
250 | 250 | 2 r4/.hg/cache/tags2-served |
|
251 | 251 | 2 r4/.hg/dirstate |
|
252 | 252 | 2 r4/.hg/fsmonitor.state (fsmonitor !) |
|
253 | 253 | 2 r4/.hg/hgrc |
|
254 | 254 | 2 r4/.hg/last-message.txt |
|
255 | 255 | 2 r4/.hg/requires |
|
256 | 256 | 2 r4/.hg/store/00changelog.i |
|
257 | 257 | 2 r4/.hg/store/00manifest.i |
|
258 | 258 | 2 r4/.hg/store/data/d1/f2.d |
|
259 | 259 | 2 r4/.hg/store/data/d1/f2.i |
|
260 | 260 | 2 r4/.hg/store/data/f1.i |
|
261 | 261 | 2 r4/.hg/store/data/f3.i |
|
262 | 262 | 2 r4/.hg/store/fncache (repofncache !) |
|
263 | 263 | 2 r4/.hg/store/phaseroots |
|
264 | 264 | 2 r4/.hg/store/undo |
|
265 | 265 | 2 r4/.hg/store/undo.backup.fncache (repofncache !) |
|
266 | 266 | 2 r4/.hg/store/undo.backup.phaseroots |
|
267 | 267 | 2 r4/.hg/store/undo.backupfiles |
|
268 | 268 | 2 r4/.hg/store/undo.phaseroots |
|
269 | 269 | [24] r4/\.hg/undo\.backup\.dirstate (re) |
|
270 | 270 | 2 r4/.hg/undo.bookmarks |
|
271 | 271 | 2 r4/.hg/undo.branch |
|
272 | 272 | 2 r4/.hg/undo.desc |
|
273 | 273 | [24] r4/\.hg/undo\.dirstate (re) |
|
274 | 274 | 2 r4/.hg/wcache/checkisexec (execbit !) |
|
275 | 275 | 2 r4/.hg/wcache/checklink-target (symlink !) |
|
276 | 276 | 2 r4/.hg/wcache/checknoexec (execbit !) |
|
277 | 277 | 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !) |
|
278 | 278 | 2 r4/d1/data1 |
|
279 | 279 | 2 r4/d1/f2 |
|
280 | 280 | 2 r4/f1 |
|
281 | 281 | 2 r4/f3 |
|
282 | 282 | |
|
283 | 283 | Update back to revision 12 in r4 should break hardlink of file f1 and f3: |
|
284 | 284 | #if hardlink-whitelisted |
|
285 | 285 | $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate |
|
286 | 286 | 4 r4/.hg/undo.backup.dirstate |
|
287 | 287 | 4 r4/.hg/undo.dirstate |
|
288 | 288 | #endif |
|
289 | 289 | |
|
290 | 290 | |
|
291 | 291 | $ hg -R r4 up 12 |
|
292 | 292 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !) |
|
293 | 293 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !) |
|
294 | 294 | |
|
295 | 295 | $ nlinksdir r4 |
|
296 | 296 | 2 r4/.hg/00changelog.i |
|
297 | 297 | 1 r4/.hg/branch |
|
298 | 298 | 2 r4/.hg/cache/branch2-base |
|
299 | 299 | 2 r4/.hg/cache/branch2-immutable |
|
300 | 300 | 2 r4/.hg/cache/branch2-served |
|
301 | 301 | 2 r4/.hg/cache/branch2-served.hidden |
|
302 | 302 | 2 r4/.hg/cache/branch2-visible |
|
303 | 303 | 2 r4/.hg/cache/branch2-visible-hidden |
|
304 | 304 | 2 r4/.hg/cache/rbc-names-v1 |
|
305 | 305 | 2 r4/.hg/cache/rbc-revs-v1 |
|
306 | 306 | 2 r4/.hg/cache/tags2 |
|
307 | 307 | 2 r4/.hg/cache/tags2-served |
|
308 | 308 | 1 r4/.hg/dirstate |
|
309 | 309 | 1 r4/.hg/fsmonitor.state (fsmonitor !) |
|
310 | 310 | 2 r4/.hg/hgrc |
|
311 | 311 | 2 r4/.hg/last-message.txt |
|
312 | 312 | 2 r4/.hg/requires |
|
313 | 313 | 2 r4/.hg/store/00changelog.i |
|
314 | 314 | 2 r4/.hg/store/00manifest.i |
|
315 | 315 | 2 r4/.hg/store/data/d1/f2.d |
|
316 | 316 | 2 r4/.hg/store/data/d1/f2.i |
|
317 | 317 | 2 r4/.hg/store/data/f1.i |
|
318 | 318 | 2 r4/.hg/store/data/f3.i |
|
319 | 319 | 2 r4/.hg/store/fncache |
|
320 | 320 | 2 r4/.hg/store/phaseroots |
|
321 | 321 | 2 r4/.hg/store/undo |
|
322 | 322 | 2 r4/.hg/store/undo.backup.fncache (repofncache !) |
|
323 | 323 | 2 r4/.hg/store/undo.backup.phaseroots |
|
324 | 324 | 2 r4/.hg/store/undo.backupfiles |
|
325 | 325 | 2 r4/.hg/store/undo.phaseroots |
|
326 | 326 | [24] r4/\.hg/undo\.backup\.dirstate (re) |
|
327 | 327 | 2 r4/.hg/undo.bookmarks |
|
328 | 328 | 2 r4/.hg/undo.branch |
|
329 | 329 | 2 r4/.hg/undo.desc |
|
330 | 330 | [24] r4/\.hg/undo\.dirstate (re) |
|
331 | 331 | 2 r4/.hg/wcache/checkisexec (execbit !) |
|
332 | 332 | 2 r4/.hg/wcache/checklink-target (symlink !) |
|
333 | 333 | 2 r4/.hg/wcache/checknoexec (execbit !) |
|
334 | 334 | 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !) |
|
335 | 335 | 2 r4/d1/data1 |
|
336 | 336 | 2 r4/d1/f2 |
|
337 | 337 | 1 r4/f1 |
|
338 | 338 | 1 r4/f3 (execbit !) |
|
339 | 339 | 2 r4/f3 (no-execbit !) |
|
340 | 340 | |
|
341 | 341 | #if hardlink-whitelisted |
|
342 | 342 | $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate |
|
343 | 343 | 4 r4/.hg/undo.backup.dirstate |
|
344 | 344 | 4 r4/.hg/undo.dirstate |
|
345 | 345 | #endif |
|
346 | 346 | |
|
347 | 347 | Test hardlinking outside hg: |
|
348 | 348 | |
|
349 | 349 | $ mkdir x |
|
350 | 350 | $ echo foo > x/a |
|
351 | 351 | |
|
352 | 352 | $ linkcp x y |
|
353 | 353 | $ echo bar >> y/a |
|
354 | 354 | |
|
355 | 355 | No diff if hardlink: |
|
356 | 356 | |
|
357 | 357 | $ diff x/a y/a |
|
358 | 358 | |
|
359 | 359 | Test mq hardlinking: |
|
360 | 360 | |
|
361 | 361 | $ echo "[extensions]" >> $HGRCPATH |
|
362 | 362 | $ echo "mq=" >> $HGRCPATH |
|
363 | 363 | |
|
364 | 364 | $ hg init a |
|
365 | 365 | $ cd a |
|
366 | 366 | |
|
367 | 367 | $ hg qimport -n foo - << EOF |
|
368 | 368 | > # HG changeset patch |
|
369 | 369 | > # Date 1 0 |
|
370 | 370 | > diff -r 2588a8b53d66 a |
|
371 | 371 | > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
372 | 372 | > +++ b/a Wed Jul 23 15:54:29 2008 +0200 |
|
373 | 373 | > @@ -0,0 +1,1 @@ |
|
374 | 374 | > +a |
|
375 | 375 | > EOF |
|
376 | 376 | adding foo to series file |
|
377 | 377 | |
|
378 | 378 | $ hg qpush |
|
379 | 379 | applying foo |
|
380 | 380 | now at: foo |
|
381 | 381 | |
|
382 | 382 | $ cd .. |
|
383 | 383 | $ linkcp a b |
|
384 | 384 | $ cd b |
|
385 | 385 | |
|
386 | 386 | $ hg qimport -n bar - << EOF |
|
387 | 387 | > # HG changeset patch |
|
388 | 388 | > # Date 2 0 |
|
389 | 389 | > diff -r 2588a8b53d66 a |
|
390 | 390 | > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
391 | 391 | > +++ b/b Wed Jul 23 15:54:29 2008 +0200 |
|
392 | 392 | > @@ -0,0 +1,1 @@ |
|
393 | 393 | > +b |
|
394 | 394 | > EOF |
|
395 | 395 | adding bar to series file |
|
396 | 396 | |
|
397 | 397 | $ hg qpush |
|
398 | 398 | applying bar |
|
399 | 399 | now at: bar |
|
400 | 400 | |
|
401 | 401 | $ cat .hg/patches/status |
|
402 | 402 | 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo |
|
403 | 403 | 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar |
|
404 | 404 | |
|
405 | 405 | $ cat .hg/patches/series |
|
406 | 406 | foo |
|
407 | 407 | bar |
|
408 | 408 | |
|
409 | 409 | $ cat ../a/.hg/patches/status |
|
410 | 410 | 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo |
|
411 | 411 | |
|
412 | 412 | $ cat ../a/.hg/patches/series |
|
413 | 413 | foo |
|
414 | 414 | |
|
415 | 415 | Test tags hardlinking: |
|
416 | 416 | |
|
417 | 417 | $ hg qdel -r qbase:qtip |
|
418 | 418 | patch foo finalized without changeset message |
|
419 | 419 | patch bar finalized without changeset message |
|
420 | 420 | |
|
421 | 421 | $ hg tag -l lfoo |
|
422 | 422 | $ hg tag foo |
|
423 | 423 | |
|
424 | 424 | $ cd .. |
|
425 | 425 | $ linkcp b c |
|
426 | 426 | $ cd c |
|
427 | 427 | |
|
428 | 428 | $ hg tag -l -r 0 lbar |
|
429 | 429 | $ hg tag -r 0 bar |
|
430 | 430 | |
|
431 | 431 | $ cat .hgtags |
|
432 | 432 | 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo |
|
433 | 433 | 430ed4828a74fa4047bc816a25500f7472ab4bfe bar |
|
434 | 434 | |
|
435 | 435 | $ cat .hg/localtags |
|
436 | 436 | 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo |
|
437 | 437 | 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar |
|
438 | 438 | |
|
439 | 439 | $ cat ../b/.hgtags |
|
440 | 440 | 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo |
|
441 | 441 | |
|
442 | 442 | $ cat ../b/.hg/localtags |
|
443 | 443 | 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo |
|
444 | 444 | |
|
445 | 445 | $ cd .. |
@@ -1,1182 +1,1170 b'' | |||
|
1 | 1 | $ cat >> $HGRCPATH <<EOF |
|
2 | 2 | > [extdiff] |
|
3 | 3 | > # for portability: |
|
4 | 4 | > pdiff = sh "$RUNTESTDIR/pdiff" |
|
5 | 5 | > [progress] |
|
6 | 6 | > disable=False |
|
7 | 7 | > assume-tty = 1 |
|
8 | 8 | > delay = 0 |
|
9 | 9 | > # set changedelay really large so we don't see nested topics |
|
10 | 10 | > changedelay = 30000 |
|
11 | 11 | > format = topic bar number |
|
12 | 12 | > refresh = 0 |
|
13 | 13 | > width = 60 |
|
14 | 14 | > EOF |
|
15 | 15 | |
|
16 | 16 | Preparing the subrepository 'sub2' |
|
17 | 17 | |
|
18 | 18 | $ hg init sub2 |
|
19 | 19 | $ echo sub2 > sub2/sub2 |
|
20 | 20 | $ hg add -R sub2 |
|
21 | 21 | adding sub2/sub2 |
|
22 | 22 | $ hg commit -R sub2 -m "sub2 import" |
|
23 | 23 | |
|
24 | 24 | Preparing the 'sub1' repo which depends on the subrepo 'sub2' |
|
25 | 25 | |
|
26 | 26 | $ hg init sub1 |
|
27 | 27 | $ echo sub1 > sub1/sub1 |
|
28 | 28 | $ echo "sub2 = ../sub2" > sub1/.hgsub |
|
29 | 29 | $ hg clone sub2 sub1/sub2 |
|
30 | 30 | \r (no-eol) (esc) |
|
31 |
linking [ |
|
|
32 |
linking [ |
|
|
33 |
linking [ |
|
|
34 |
linking [ |
|
|
35 | linking [ <=> ] 5\r (no-eol) (esc) | |
|
36 | linking [ <=> ] 6\r (no-eol) (esc) | |
|
31 | linking [======> ] 1/6\r (no-eol) (esc) | |
|
32 | linking [==============> ] 2/6\r (no-eol) (esc) | |
|
33 | linking [=====================> ] 3/6\r (no-eol) (esc) | |
|
34 | linking [=============================> ] 4/6\r (no-eol) (esc) | |
|
35 | linking [====================================> ] 5/6\r (no-eol) (esc) | |
|
36 | linking [============================================>] 6/6\r (no-eol) (esc) | |
|
37 | 37 | \r (no-eol) (esc) |
|
38 | 38 | \r (no-eol) (esc) |
|
39 | 39 | updating [===========================================>] 1/1\r (no-eol) (esc) |
|
40 | 40 | \r (no-eol) (esc) |
|
41 | 41 | updating to branch default |
|
42 | 42 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
43 | 43 | $ hg add -R sub1 |
|
44 | 44 | adding sub1/.hgsub |
|
45 | 45 | adding sub1/sub1 |
|
46 | 46 | $ hg commit -R sub1 -m "sub1 import" |
|
47 | 47 | |
|
48 | 48 | Preparing the 'main' repo which depends on the subrepo 'sub1' |
|
49 | 49 | |
|
50 | 50 | $ hg init main |
|
51 | 51 | $ echo main > main/main |
|
52 | 52 | $ echo "sub1 = ../sub1" > main/.hgsub |
|
53 | 53 | $ hg clone sub1 main/sub1 |
|
54 | 54 | \r (no-eol) (esc) |
|
55 |
linking [ |
|
|
56 |
linking [ |
|
|
57 |
linking [ |
|
|
58 |
linking [ |
|
|
59 |
linking [ |
|
|
60 | linking [ <=> ] 6\r (no-eol) (esc) | |
|
61 | linking [ <=> ] 7\r (no-eol) (esc) | |
|
62 | linking [ <=> ] 8\r (no-eol) (esc) | |
|
63 | linking [ <=> ] 9\r (no-eol) (esc) (reposimplestore !) | |
|
64 | linking [ <=> ] 10\r (no-eol) (esc) (reposimplestore !) | |
|
55 | linking [====> ] 1/8\r (no-eol) (esc) | |
|
56 | linking [==========> ] 2/8\r (no-eol) (esc) | |
|
57 | linking [===============> ] 3/8\r (no-eol) (esc) | |
|
58 | linking [=====================> ] 4/8\r (no-eol) (esc) | |
|
59 | linking [===========================> ] 5/8\r (no-eol) (esc) | |
|
60 | linking [================================> ] 6/8\r (no-eol) (esc) | |
|
61 | linking [======================================> ] 7/8\r (no-eol) (esc) | |
|
62 | linking [============================================>] 8/8\r (no-eol) (esc) | |
|
65 | 63 | \r (no-eol) (esc) |
|
66 | 64 | \r (no-eol) (esc) |
|
67 | 65 | updating [===========================================>] 3/3\r (no-eol) (esc) |
|
68 | 66 | \r (no-eol) (esc) |
|
69 | 67 | \r (no-eol) (esc) |
|
70 |
linking [ |
|
|
71 |
linking [ |
|
|
72 |
linking [ |
|
|
73 |
linking [ |
|
|
74 | linking [ <=> ] 5\r (no-eol) (esc) | |
|
75 | linking [ <=> ] 6\r (no-eol) (esc) | |
|
68 | linking [======> ] 1/6\r (no-eol) (esc) | |
|
69 | linking [==============> ] 2/6\r (no-eol) (esc) | |
|
70 | linking [=====================> ] 3/6\r (no-eol) (esc) | |
|
71 | linking [=============================> ] 4/6\r (no-eol) (esc) | |
|
72 | linking [====================================> ] 5/6\r (no-eol) (esc) | |
|
73 | linking [============================================>] 6/6\r (no-eol) (esc) | |
|
76 | 74 | updating [===========================================>] 1/1\r (no-eol) (esc) |
|
77 | 75 | \r (no-eol) (esc) |
|
78 | 76 | updating to branch default |
|
79 | 77 | cloning subrepo sub2 from $TESTTMP/sub2 |
|
80 | 78 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
81 | 79 | $ hg add -R main |
|
82 | 80 | adding main/.hgsub |
|
83 | 81 | adding main/main |
|
84 | 82 | $ hg commit -R main -m "main import" |
|
85 | 83 | |
|
86 | 84 | #if serve |
|
87 | 85 | |
|
88 | 86 | Unfortunately, subrepos not at their nominal location cannot be cloned. But |
|
89 | 87 | they are still served from their location within the local repository. The only |
|
90 | 88 | reason why 'main' can be cloned via the filesystem is because 'sub1' and 'sub2' |
|
91 | 89 | are also available as siblings of 'main'. |
|
92 | 90 | |
|
93 | 91 | $ hg serve -R main --debug -S -p $HGPORT -d --pid-file=hg1.pid -E error.log -A access.log |
|
94 | 92 | adding = $TESTTMP/main |
|
95 | 93 | adding sub1 = $TESTTMP/main/sub1 |
|
96 | 94 | adding sub1/sub2 = $TESTTMP/main/sub1/sub2 |
|
97 | 95 | listening at http://*:$HGPORT/ (bound to *:$HGPORT) (glob) (?) |
|
98 | 96 | adding = $TESTTMP/main (?) |
|
99 | 97 | adding sub1 = $TESTTMP/main/sub1 (?) |
|
100 | 98 | adding sub1/sub2 = $TESTTMP/main/sub1/sub2 (?) |
|
101 | 99 | $ cat hg1.pid >> $DAEMON_PIDS |
|
102 | 100 | |
|
103 | 101 | $ hg clone http://localhost:$HGPORT httpclone --config progress.disable=True |
|
104 | 102 | requesting all changes |
|
105 | 103 | adding changesets |
|
106 | 104 | adding manifests |
|
107 | 105 | adding file changes |
|
108 | 106 | added 1 changesets with 3 changes to 3 files |
|
109 | 107 | new changesets 7f491f53a367 |
|
110 | 108 | updating to branch default |
|
111 | 109 | cloning subrepo sub1 from http://localhost:$HGPORT/../sub1 |
|
112 | 110 | abort: HTTP Error 404: Not Found |
|
113 | 111 | [100] |
|
114 | 112 | |
|
115 | 113 | $ cat access.log |
|
116 | 114 | * "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
117 | 115 | * "GET /?cmd=batch HTTP/1.1" 200 - * (glob) |
|
118 | 116 | * "GET /?cmd=getbundle HTTP/1.1" 200 - * (glob) |
|
119 | 117 | * "GET /../sub1?cmd=capabilities HTTP/1.1" 404 - (glob) |
|
120 | 118 | $ cat error.log |
|
121 | 119 | |
|
122 | 120 | $ killdaemons.py |
|
123 | 121 | $ rm hg1.pid error.log access.log |
|
124 | 122 | #endif |
|
125 | 123 | |
|
126 | 124 | Cleaning both repositories, just as a clone -U |
|
127 | 125 | |
|
128 | 126 | $ hg up -C -R sub2 null |
|
129 | 127 | \r (no-eol) (esc) |
|
130 | 128 | updating [===========================================>] 1/1\r (no-eol) (esc) |
|
131 | 129 | \r (no-eol) (esc) |
|
132 | 130 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
133 | 131 | $ hg up -C -R sub1 null |
|
134 | 132 | \r (no-eol) (esc) |
|
135 | 133 | updating [===========================================>] 1/1\r (no-eol) (esc) |
|
136 | 134 | \r (no-eol) (esc) |
|
137 | 135 | \r (no-eol) (esc) |
|
138 | 136 | updating [===========================================>] 3/3\r (no-eol) (esc) |
|
139 | 137 | \r (no-eol) (esc) |
|
140 | 138 | 0 files updated, 0 files merged, 3 files removed, 0 files unresolved |
|
141 | 139 | $ hg up -C -R main null |
|
142 | 140 | \r (no-eol) (esc) |
|
143 | 141 | updating [===========================================>] 1/1\r (no-eol) (esc) |
|
144 | 142 | \r (no-eol) (esc) |
|
145 | 143 | \r (no-eol) (esc) |
|
146 | 144 | updating [===========================================>] 3/3\r (no-eol) (esc) |
|
147 | 145 | \r (no-eol) (esc) |
|
148 | 146 | \r (no-eol) (esc) |
|
149 | 147 | updating [===========================================>] 3/3\r (no-eol) (esc) |
|
150 | 148 | \r (no-eol) (esc) |
|
151 | 149 | 0 files updated, 0 files merged, 3 files removed, 0 files unresolved |
|
152 | 150 | $ rm -rf main/sub1 |
|
153 | 151 | $ rm -rf sub1/sub2 |
|
154 | 152 | |
|
155 | 153 | Clone main |
|
156 | 154 | |
|
157 | 155 | $ hg --config extensions.largefiles= clone main cloned |
|
158 | 156 | \r (no-eol) (esc) |
|
159 |
linking [ |
|
|
160 |
linking [ |
|
|
161 |
linking [ |
|
|
162 |
linking [ |
|
|
163 |
linking [ |
|
|
164 | linking [ <=> ] 6\r (no-eol) (esc) | |
|
165 | linking [ <=> ] 7\r (no-eol) (esc) | |
|
166 | linking [ <=> ] 8\r (no-eol) (esc) | |
|
167 | linking [ <=> ] 9\r (no-eol) (esc) (reposimplestore !) | |
|
168 | linking [ <=> ] 10\r (no-eol) (esc) (reposimplestore !) | |
|
157 | linking [====> ] 1/8\r (no-eol) (esc) | |
|
158 | linking [==========> ] 2/8\r (no-eol) (esc) | |
|
159 | linking [===============> ] 3/8\r (no-eol) (esc) | |
|
160 | linking [=====================> ] 4/8\r (no-eol) (esc) | |
|
161 | linking [===========================> ] 5/8\r (no-eol) (esc) | |
|
162 | linking [================================> ] 6/8\r (no-eol) (esc) | |
|
163 | linking [======================================> ] 7/8\r (no-eol) (esc) | |
|
164 | linking [============================================>] 8/8\r (no-eol) (esc) | |
|
169 | 165 | \r (no-eol) (esc) |
|
170 | 166 | \r (no-eol) (esc) |
|
171 | 167 | updating [===========================================>] 3/3\r (no-eol) (esc) |
|
172 | 168 | \r (no-eol) (esc) |
|
173 | 169 | \r (no-eol) (esc) |
|
174 |
linking [ |
|
|
175 |
linking [ |
|
|
176 |
linking [ |
|
|
177 |
linking [ |
|
|
178 |
linking [ |
|
|
179 | linking [ <=> ] 6\r (no-eol) (esc) | |
|
180 | linking [ <=> ] 7\r (no-eol) (esc) | |
|
181 | linking [ <=> ] 8\r (no-eol) (esc) | |
|
182 | linking [ <=> ] 9\r (no-eol) (esc) (reposimplestore !) | |
|
183 | linking [ <=> ] 10\r (no-eol) (esc) (reposimplestore !) | |
|
170 | linking [====> ] 1/8\r (no-eol) (esc) | |
|
171 | linking [==========> ] 2/8\r (no-eol) (esc) | |
|
172 | linking [===============> ] 3/8\r (no-eol) (esc) | |
|
173 | linking [=====================> ] 4/8\r (no-eol) (esc) | |
|
174 | linking [===========================> ] 5/8\r (no-eol) (esc) | |
|
175 | linking [================================> ] 6/8\r (no-eol) (esc) | |
|
176 | linking [======================================> ] 7/8\r (no-eol) (esc) | |
|
177 | linking [============================================>] 8/8\r (no-eol) (esc) | |
|
184 | 178 | updating [===========================================>] 3/3\r (no-eol) (esc) |
|
185 | 179 | \r (no-eol) (esc) |
|
186 | 180 | \r (no-eol) (esc) |
|
187 |
linking [ |
|
|
188 |
linking [ |
|
|
189 |
linking [ |
|
|
190 | linking [ <=> ] 4\r (no-eol) (esc) (reporevlogstore !) | |
|
191 | linking [ <=> ] 5\r (no-eol) (esc) (reporevlogstore !) | |
|
192 | linking [ <=> ] 6\r (no-eol) (esc) (reporevlogstore !) | |
|
193 | linking [ <=> ] 1\r (no-eol) (esc) (reposimplestore !) | |
|
194 | linking [ <=> ] 2\r (no-eol) (esc) (reposimplestore !) | |
|
195 | linking [ <=> ] 3\r (no-eol) (esc) (reposimplestore !) | |
|
196 | linking [ <=> ] 4\r (no-eol) (esc) (reposimplestore !) | |
|
197 | linking [ <=> ] 5\r (no-eol) (esc) (reposimplestore !) | |
|
198 | linking [ <=> ] 6\r (no-eol) (esc) (reposimplestore !) | |
|
181 | linking [======> ] 1/6\r (no-eol) (esc) | |
|
182 | linking [==============> ] 2/6\r (no-eol) (esc) | |
|
183 | linking [=====================> ] 3/6\r (no-eol) (esc) | |
|
184 | linking [=============================> ] 4/6\r (no-eol) (esc) | |
|
185 | linking [====================================> ] 5/6\r (no-eol) (esc) | |
|
186 | linking [============================================>] 6/6\r (no-eol) (esc) | |
|
199 | 187 | updating [===========================================>] 1/1\r (no-eol) (esc) |
|
200 | 188 | \r (no-eol) (esc) |
|
201 | 189 | updating to branch default |
|
202 | 190 | cloning subrepo sub1 from $TESTTMP/sub1 |
|
203 | 191 | cloning subrepo sub1/sub2 from $TESTTMP/sub2 |
|
204 | 192 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
205 | 193 | |
|
206 | 194 | Largefiles is NOT enabled in the clone if the source repo doesn't require it |
|
207 | 195 | $ grep largefiles cloned/.hg/hgrc |
|
208 | 196 | [1] |
|
209 | 197 | |
|
210 | 198 | Checking cloned repo ids |
|
211 | 199 | |
|
212 | 200 | $ printf "cloned " ; hg id -R cloned |
|
213 | 201 | cloned 7f491f53a367 tip |
|
214 | 202 | $ printf "cloned/sub1 " ; hg id -R cloned/sub1 |
|
215 | 203 | cloned/sub1 fc3b4ce2696f tip |
|
216 | 204 | $ printf "cloned/sub1/sub2 " ; hg id -R cloned/sub1/sub2 |
|
217 | 205 | cloned/sub1/sub2 c57a0840e3ba tip |
|
218 | 206 | |
|
219 | 207 | debugsub output for main and sub1 |
|
220 | 208 | |
|
221 | 209 | $ hg debugsub -R cloned |
|
222 | 210 | path sub1 |
|
223 | 211 | source ../sub1 |
|
224 | 212 | revision fc3b4ce2696f7741438c79207583768f2ce6b0dd |
|
225 | 213 | $ hg debugsub -R cloned/sub1 |
|
226 | 214 | path sub2 |
|
227 | 215 | source ../sub2 |
|
228 | 216 | revision c57a0840e3badd667ef3c3ef65471609acb2ba3c |
|
229 | 217 | |
|
230 | 218 | Modifying deeply nested 'sub2' |
|
231 | 219 | |
|
232 | 220 | $ echo modified > cloned/sub1/sub2/sub2 |
|
233 | 221 | $ hg commit --subrepos -m "deep nested modif should trigger a commit" -R cloned |
|
234 | 222 | committing subrepository sub1 |
|
235 | 223 | committing subrepository sub1/sub2 |
|
236 | 224 | |
|
237 | 225 | Checking modified node ids |
|
238 | 226 | |
|
239 | 227 | $ printf "cloned " ; hg id -R cloned |
|
240 | 228 | cloned ffe6649062fe tip |
|
241 | 229 | $ printf "cloned/sub1 " ; hg id -R cloned/sub1 |
|
242 | 230 | cloned/sub1 2ecb03bf44a9 tip |
|
243 | 231 | $ printf "cloned/sub1/sub2 " ; hg id -R cloned/sub1/sub2 |
|
244 | 232 | cloned/sub1/sub2 53dd3430bcaf tip |
|
245 | 233 | |
|
246 | 234 | debugsub output for main and sub1 |
|
247 | 235 | |
|
248 | 236 | $ hg debugsub -R cloned |
|
249 | 237 | path sub1 |
|
250 | 238 | source ../sub1 |
|
251 | 239 | revision 2ecb03bf44a94e749e8669481dd9069526ce7cb9 |
|
252 | 240 | $ hg debugsub -R cloned/sub1 |
|
253 | 241 | path sub2 |
|
254 | 242 | source ../sub2 |
|
255 | 243 | revision 53dd3430bcaf5ab4a7c48262bcad6d441f510487 |
|
256 | 244 | |
|
257 | 245 | Check that deep archiving works |
|
258 | 246 | |
|
259 | 247 | $ cd cloned |
|
260 | 248 | $ echo 'test' > sub1/sub2/test.txt |
|
261 | 249 | $ hg --config extensions.largefiles=! add sub1/sub2/test.txt |
|
262 | 250 | $ mkdir sub1/sub2/folder |
|
263 | 251 | $ echo 'subfolder' > sub1/sub2/folder/test.txt |
|
264 | 252 | $ hg ci -ASm "add test.txt" |
|
265 | 253 | adding sub1/sub2/folder/test.txt |
|
266 | 254 | committing subrepository sub1 |
|
267 | 255 | committing subrepository sub1/sub2 |
|
268 | 256 | |
|
269 | 257 | $ rm -r main |
|
270 | 258 | $ hg archive -S -qr 'wdir()' ../wdir |
|
271 | 259 | $ cat ../wdir/.hg_archival.txt |
|
272 | 260 | repo: 7f491f53a367861f47ee64a80eb997d1f341b77a |
|
273 | 261 | node: 9bb10eebee29dc0f1201dcf5977b811a540255fd+ |
|
274 | 262 | branch: default |
|
275 | 263 | latesttag: null |
|
276 | 264 | latesttagdistance: 4 |
|
277 | 265 | changessincelatesttag: 4 |
|
278 | 266 | $ hg update -Cq . |
|
279 | 267 | |
|
280 | 268 | A deleted subrepo file is flagged as dirty, like the top level repo |
|
281 | 269 | |
|
282 | 270 | $ rm -r ../wdir sub1/sub2/folder/test.txt |
|
283 | 271 | $ hg archive -S -qr 'wdir()' ../wdir |
|
284 | 272 | $ cat ../wdir/.hg_archival.txt |
|
285 | 273 | repo: 7f491f53a367861f47ee64a80eb997d1f341b77a |
|
286 | 274 | node: 9bb10eebee29dc0f1201dcf5977b811a540255fd+ |
|
287 | 275 | branch: default |
|
288 | 276 | latesttag: null |
|
289 | 277 | latesttagdistance: 4 |
|
290 | 278 | changessincelatesttag: 4 |
|
291 | 279 | $ hg update -Cq . |
|
292 | 280 | $ rm -r ../wdir |
|
293 | 281 | |
|
294 | 282 | $ hg archive -S -qr 'wdir()' ../wdir \ |
|
295 | 283 | > --config 'experimental.archivemetatemplate=archived {node|short}\n' |
|
296 | 284 | $ cat ../wdir/.hg_archival.txt |
|
297 | 285 | archived ffffffffffff |
|
298 | 286 | $ rm -r ../wdir |
|
299 | 287 | |
|
300 | 288 | .. but first take a detour through some deep removal testing |
|
301 | 289 | |
|
302 | 290 | $ hg remove -S -I 're:.*.txt' . |
|
303 | 291 | \r (no-eol) (esc) |
|
304 | 292 | searching [==========================================>] 1/1\r (no-eol) (esc) |
|
305 | 293 | searching [==========================================>] 1/1\r (no-eol) (esc) |
|
306 | 294 | \r (no-eol) (esc) |
|
307 | 295 | \r (no-eol) (esc) |
|
308 | 296 | deleting [=====================> ] 1/2\r (no-eol) (esc) |
|
309 | 297 | \r (no-eol) (esc) |
|
310 | 298 | \r (no-eol) (esc) |
|
311 | 299 | deleting [===========================================>] 2/2\r (no-eol) (esc) |
|
312 | 300 | \r (no-eol) (esc) |
|
313 | 301 | removing sub1/sub2/folder/test.txt |
|
314 | 302 | removing sub1/sub2/test.txt |
|
315 | 303 | $ hg status -S |
|
316 | 304 | R sub1/sub2/folder/test.txt |
|
317 | 305 | R sub1/sub2/test.txt |
|
318 | 306 | $ hg update -Cq |
|
319 | 307 | $ hg remove -I 're:.*.txt' sub1 |
|
320 | 308 | \r (no-eol) (esc) |
|
321 | 309 | searching [==========================================>] 1/1\r (no-eol) (esc) |
|
322 | 310 | \r (no-eol) (esc) |
|
323 | 311 | \r (no-eol) (esc) |
|
324 | 312 | deleting [===========================================>] 1/1\r (no-eol) (esc) |
|
325 | 313 | \r (no-eol) (esc) |
|
326 | 314 | $ hg status -S |
|
327 | 315 | $ hg remove sub1/sub2/folder/test.txt |
|
328 | 316 | \r (no-eol) (esc) |
|
329 | 317 | searching [==========================================>] 1/1\r (no-eol) (esc) |
|
330 | 318 | searching [==========================================>] 1/1\r (no-eol) (esc) |
|
331 | 319 | \r (no-eol) (esc) |
|
332 | 320 | \r (no-eol) (esc) |
|
333 | 321 | deleting [===========================================>] 1/1\r (no-eol) (esc) |
|
334 | 322 | \r (no-eol) (esc) |
|
335 | 323 | \r (no-eol) (esc) |
|
336 | 324 | deleting [===========================================>] 1/1\r (no-eol) (esc) |
|
337 | 325 | \r (no-eol) (esc) |
|
338 | 326 | \r (no-eol) (esc) |
|
339 | 327 | deleting [===========================================>] 1/1\r (no-eol) (esc) |
|
340 | 328 | \r (no-eol) (esc) |
|
341 | 329 | $ hg remove sub1/.hgsubstate |
|
342 | 330 | \r (no-eol) (esc) |
|
343 | 331 | searching [==========================================>] 1/1\r (no-eol) (esc) |
|
344 | 332 | \r (no-eol) (esc) |
|
345 | 333 | \r (no-eol) (esc) |
|
346 | 334 | deleting [===========================================>] 1/1\r (no-eol) (esc) |
|
347 | 335 | \r (no-eol) (esc) |
|
348 | 336 | \r (no-eol) (esc) |
|
349 | 337 | deleting [===========================================>] 1/1\r (no-eol) (esc) |
|
350 | 338 | \r (no-eol) (esc) |
|
351 | 339 | $ mv sub1/.hgsub sub1/x.hgsub |
|
352 | 340 | $ hg status -S |
|
353 | 341 | warning: subrepo spec file 'sub1/.hgsub' not found |
|
354 | 342 | R sub1/.hgsubstate |
|
355 | 343 | R sub1/sub2/folder/test.txt |
|
356 | 344 | ! sub1/.hgsub |
|
357 | 345 | ? sub1/x.hgsub |
|
358 | 346 | $ hg status -R sub1 |
|
359 | 347 | warning: subrepo spec file 'sub1/.hgsub' not found |
|
360 | 348 | R .hgsubstate |
|
361 | 349 | ! .hgsub |
|
362 | 350 | ? x.hgsub |
|
363 | 351 | $ mv sub1/x.hgsub sub1/.hgsub |
|
364 | 352 | $ hg update -Cq |
|
365 | 353 | $ touch sub1/foo |
|
366 | 354 | $ hg forget sub1/sub2/folder/test.txt |
|
367 | 355 | $ rm sub1/sub2/test.txt |
|
368 | 356 | |
|
369 | 357 | Test relative path printing + subrepos |
|
370 | 358 | $ mkdir -p foo/bar |
|
371 | 359 | $ cd foo |
|
372 | 360 | $ touch bar/abc |
|
373 | 361 | $ hg addremove -S .. |
|
374 | 362 | \r (no-eol) (esc) |
|
375 | 363 | searching for exact renames [========================>] 1/1\r (no-eol) (esc) |
|
376 | 364 | \r (no-eol) (esc) |
|
377 | 365 | adding ../sub1/sub2/folder/test.txt |
|
378 | 366 | removing ../sub1/sub2/test.txt |
|
379 | 367 | adding ../sub1/foo |
|
380 | 368 | adding bar/abc |
|
381 | 369 | $ cd .. |
|
382 | 370 | $ hg status -S |
|
383 | 371 | A foo/bar/abc |
|
384 | 372 | A sub1/foo |
|
385 | 373 | R sub1/sub2/test.txt |
|
386 | 374 | |
|
387 | 375 | Archive wdir() with subrepos |
|
388 | 376 | $ hg rm main |
|
389 | 377 | \r (no-eol) (esc) |
|
390 | 378 | deleting [===========================================>] 1/1\r (no-eol) (esc) |
|
391 | 379 | \r (no-eol) (esc) |
|
392 | 380 | $ hg archive -S -r 'wdir()' ../wdir |
|
393 | 381 | \r (no-eol) (esc) |
|
394 | 382 | archiving [ ] 0/3\r (no-eol) (esc) |
|
395 | 383 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
396 | 384 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
397 | 385 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
398 | 386 | \r (no-eol) (esc) |
|
399 | 387 | \r (no-eol) (esc) |
|
400 | 388 | archiving (sub1) [ ] 0/4\r (no-eol) (esc) |
|
401 | 389 | archiving (sub1) [========> ] 1/4\r (no-eol) (esc) |
|
402 | 390 | archiving (sub1) [=================> ] 2/4\r (no-eol) (esc) |
|
403 | 391 | archiving (sub1) [==========================> ] 3/4\r (no-eol) (esc) |
|
404 | 392 | archiving (sub1) [===================================>] 4/4\r (no-eol) (esc) |
|
405 | 393 | \r (no-eol) (esc) |
|
406 | 394 | \r (no-eol) (esc) |
|
407 | 395 | archiving (sub1/sub2) [ ] 0/2\r (no-eol) (esc) |
|
408 | 396 | archiving (sub1/sub2) [==============> ] 1/2\r (no-eol) (esc) |
|
409 | 397 | archiving (sub1/sub2) [==============================>] 2/2\r (no-eol) (esc) |
|
410 | 398 | \r (no-eol) (esc) |
|
411 | 399 | $ diff -r . ../wdir | egrep -v '\.hg$|^Common subdirectories:' |
|
412 | 400 | Only in ../wdir: .hg_archival.txt |
|
413 | 401 | |
|
414 | 402 | $ find ../wdir -type f | sort |
|
415 | 403 | ../wdir/.hg_archival.txt |
|
416 | 404 | ../wdir/.hgsub |
|
417 | 405 | ../wdir/.hgsubstate |
|
418 | 406 | ../wdir/foo/bar/abc |
|
419 | 407 | ../wdir/sub1/.hgsub |
|
420 | 408 | ../wdir/sub1/.hgsubstate |
|
421 | 409 | ../wdir/sub1/foo |
|
422 | 410 | ../wdir/sub1/sub1 |
|
423 | 411 | ../wdir/sub1/sub2/folder/test.txt |
|
424 | 412 | ../wdir/sub1/sub2/sub2 |
|
425 | 413 | |
|
426 | 414 | $ cat ../wdir/.hg_archival.txt |
|
427 | 415 | repo: 7f491f53a367861f47ee64a80eb997d1f341b77a |
|
428 | 416 | node: 9bb10eebee29dc0f1201dcf5977b811a540255fd+ |
|
429 | 417 | branch: default |
|
430 | 418 | latesttag: null |
|
431 | 419 | latesttagdistance: 4 |
|
432 | 420 | changessincelatesttag: 4 |
|
433 | 421 | |
|
434 | 422 | Attempting to archive 'wdir()' with a missing file is handled gracefully |
|
435 | 423 | $ rm sub1/sub1 |
|
436 | 424 | $ rm -r ../wdir |
|
437 | 425 | $ hg archive -v -S -r 'wdir()' ../wdir |
|
438 | 426 | \r (no-eol) (esc) |
|
439 | 427 | archiving [ ] 0/3\r (no-eol) (esc) |
|
440 | 428 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
441 | 429 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
442 | 430 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
443 | 431 | \r (no-eol) (esc) |
|
444 | 432 | \r (no-eol) (esc) |
|
445 | 433 | archiving (sub1) [ ] 0/3\r (no-eol) (esc) |
|
446 | 434 | archiving (sub1) [===========> ] 1/3\r (no-eol) (esc) |
|
447 | 435 | archiving (sub1) [=======================> ] 2/3\r (no-eol) (esc) |
|
448 | 436 | archiving (sub1) [===================================>] 3/3\r (no-eol) (esc) |
|
449 | 437 | \r (no-eol) (esc) |
|
450 | 438 | \r (no-eol) (esc) |
|
451 | 439 | archiving (sub1/sub2) [ ] 0/2\r (no-eol) (esc) |
|
452 | 440 | archiving (sub1/sub2) [==============> ] 1/2\r (no-eol) (esc) |
|
453 | 441 | archiving (sub1/sub2) [==============================>] 2/2\r (no-eol) (esc) |
|
454 | 442 | \r (no-eol) (esc) |
|
455 | 443 | $ find ../wdir -type f | sort |
|
456 | 444 | ../wdir/.hg_archival.txt |
|
457 | 445 | ../wdir/.hgsub |
|
458 | 446 | ../wdir/.hgsubstate |
|
459 | 447 | ../wdir/foo/bar/abc |
|
460 | 448 | ../wdir/sub1/.hgsub |
|
461 | 449 | ../wdir/sub1/.hgsubstate |
|
462 | 450 | ../wdir/sub1/foo |
|
463 | 451 | ../wdir/sub1/sub2/folder/test.txt |
|
464 | 452 | ../wdir/sub1/sub2/sub2 |
|
465 | 453 | |
|
466 | 454 | Continue relative path printing + subrepos |
|
467 | 455 | $ hg update -Cq |
|
468 | 456 | $ rm -r ../wdir |
|
469 | 457 | $ hg archive -S -r 'wdir()' ../wdir |
|
470 | 458 | \r (no-eol) (esc) |
|
471 | 459 | archiving [ ] 0/3\r (no-eol) (esc) |
|
472 | 460 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
473 | 461 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
474 | 462 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
475 | 463 | \r (no-eol) (esc) |
|
476 | 464 | \r (no-eol) (esc) |
|
477 | 465 | archiving (sub1) [ ] 0/3\r (no-eol) (esc) |
|
478 | 466 | archiving (sub1) [===========> ] 1/3\r (no-eol) (esc) |
|
479 | 467 | archiving (sub1) [=======================> ] 2/3\r (no-eol) (esc) |
|
480 | 468 | archiving (sub1) [===================================>] 3/3\r (no-eol) (esc) |
|
481 | 469 | \r (no-eol) (esc) |
|
482 | 470 | \r (no-eol) (esc) |
|
483 | 471 | archiving (sub1/sub2) [ ] 0/3\r (no-eol) (esc) |
|
484 | 472 | archiving (sub1/sub2) [=========> ] 1/3\r (no-eol) (esc) |
|
485 | 473 | archiving (sub1/sub2) [===================> ] 2/3\r (no-eol) (esc) |
|
486 | 474 | archiving (sub1/sub2) [==============================>] 3/3\r (no-eol) (esc) |
|
487 | 475 | \r (no-eol) (esc) |
|
488 | 476 | $ cat ../wdir/.hg_archival.txt |
|
489 | 477 | repo: 7f491f53a367861f47ee64a80eb997d1f341b77a |
|
490 | 478 | node: 9bb10eebee29dc0f1201dcf5977b811a540255fd |
|
491 | 479 | branch: default |
|
492 | 480 | latesttag: null |
|
493 | 481 | latesttagdistance: 4 |
|
494 | 482 | changessincelatesttag: 4 |
|
495 | 483 | |
|
496 | 484 | $ touch sub1/sub2/folder/bar |
|
497 | 485 | $ hg addremove sub1/sub2 |
|
498 | 486 | adding sub1/sub2/folder/bar |
|
499 | 487 | $ hg status -S |
|
500 | 488 | A sub1/sub2/folder/bar |
|
501 | 489 | ? foo/bar/abc |
|
502 | 490 | ? sub1/foo |
|
503 | 491 | $ hg update -Cq |
|
504 | 492 | $ hg addremove sub1 |
|
505 | 493 | adding sub1/sub2/folder/bar |
|
506 | 494 | adding sub1/foo |
|
507 | 495 | $ hg update -Cq |
|
508 | 496 | $ rm sub1/sub2/folder/test.txt |
|
509 | 497 | $ rm sub1/sub2/test.txt |
|
510 | 498 | $ hg ci -ASm "remove test.txt" |
|
511 | 499 | adding sub1/sub2/folder/bar |
|
512 | 500 | removing sub1/sub2/folder/test.txt |
|
513 | 501 | removing sub1/sub2/test.txt |
|
514 | 502 | adding sub1/foo |
|
515 | 503 | adding foo/bar/abc |
|
516 | 504 | committing subrepository sub1 |
|
517 | 505 | committing subrepository sub1/sub2 |
|
518 | 506 | |
|
519 | 507 | $ hg forget sub1/sub2/sub2 |
|
520 | 508 | $ echo x > sub1/sub2/x.txt |
|
521 | 509 | $ hg add sub1/sub2/x.txt |
|
522 | 510 | |
|
523 | 511 | Files sees uncommitted adds and removes in subrepos |
|
524 | 512 | $ hg files -S |
|
525 | 513 | .hgsub |
|
526 | 514 | .hgsubstate |
|
527 | 515 | foo/bar/abc |
|
528 | 516 | main |
|
529 | 517 | sub1/.hgsub |
|
530 | 518 | sub1/.hgsubstate |
|
531 | 519 | sub1/foo |
|
532 | 520 | sub1/sub1 |
|
533 | 521 | sub1/sub2/folder/bar |
|
534 | 522 | sub1/sub2/x.txt |
|
535 | 523 | |
|
536 | 524 | $ hg files -S "set:eol('dos') or eol('unix') or size('<= 0')" |
|
537 | 525 | .hgsub |
|
538 | 526 | .hgsubstate |
|
539 | 527 | foo/bar/abc |
|
540 | 528 | main |
|
541 | 529 | sub1/.hgsub |
|
542 | 530 | sub1/.hgsubstate |
|
543 | 531 | sub1/foo |
|
544 | 532 | sub1/sub1 |
|
545 | 533 | sub1/sub2/folder/bar |
|
546 | 534 | sub1/sub2/x.txt |
|
547 | 535 | |
|
548 | 536 | $ hg files -r '.^' -S "set:eol('dos') or eol('unix')" |
|
549 | 537 | .hgsub |
|
550 | 538 | .hgsubstate |
|
551 | 539 | main |
|
552 | 540 | sub1/.hgsub |
|
553 | 541 | sub1/.hgsubstate |
|
554 | 542 | sub1/sub1 |
|
555 | 543 | sub1/sub2/folder/test.txt |
|
556 | 544 | sub1/sub2/sub2 |
|
557 | 545 | sub1/sub2/test.txt |
|
558 | 546 | |
|
559 | 547 | $ hg files sub1 |
|
560 | 548 | sub1/.hgsub |
|
561 | 549 | sub1/.hgsubstate |
|
562 | 550 | sub1/foo |
|
563 | 551 | sub1/sub1 |
|
564 | 552 | sub1/sub2/folder/bar |
|
565 | 553 | sub1/sub2/x.txt |
|
566 | 554 | |
|
567 | 555 | $ hg files sub1/sub2 |
|
568 | 556 | sub1/sub2/folder/bar |
|
569 | 557 | sub1/sub2/x.txt |
|
570 | 558 | |
|
571 | 559 | $ hg files |
|
572 | 560 | .hgsub |
|
573 | 561 | .hgsubstate |
|
574 | 562 | foo/bar/abc |
|
575 | 563 | main |
|
576 | 564 | |
|
577 | 565 | $ hg files -S -r '.^' sub1/sub2/folder |
|
578 | 566 | sub1/sub2/folder/test.txt |
|
579 | 567 | |
|
580 | 568 | $ hg files -S -r '.^' sub1/sub2/missing |
|
581 | 569 | sub1/sub2/missing: no such file in rev 78026e779ea6 |
|
582 | 570 | [1] |
|
583 | 571 | |
|
584 | 572 | $ hg files -r '.^' sub1/ |
|
585 | 573 | sub1/.hgsub |
|
586 | 574 | sub1/.hgsubstate |
|
587 | 575 | sub1/sub1 |
|
588 | 576 | sub1/sub2/folder/test.txt |
|
589 | 577 | sub1/sub2/sub2 |
|
590 | 578 | sub1/sub2/test.txt |
|
591 | 579 | |
|
592 | 580 | $ hg files -r '.^' sub1/sub2 |
|
593 | 581 | sub1/sub2/folder/test.txt |
|
594 | 582 | sub1/sub2/sub2 |
|
595 | 583 | sub1/sub2/test.txt |
|
596 | 584 | |
|
597 | 585 | $ hg rollback -q |
|
598 | 586 | $ hg up -Cq |
|
599 | 587 | |
|
600 | 588 | $ hg --config extensions.largefiles=! archive -S ../archive_all |
|
601 | 589 | \r (no-eol) (esc) |
|
602 | 590 | archiving [ ] 0/3\r (no-eol) (esc) |
|
603 | 591 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
604 | 592 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
605 | 593 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
606 | 594 | \r (no-eol) (esc) |
|
607 | 595 | \r (no-eol) (esc) |
|
608 | 596 | archiving (sub1) [ ] 0/3\r (no-eol) (esc) |
|
609 | 597 | archiving (sub1) [===========> ] 1/3\r (no-eol) (esc) |
|
610 | 598 | archiving (sub1) [=======================> ] 2/3\r (no-eol) (esc) |
|
611 | 599 | archiving (sub1) [===================================>] 3/3\r (no-eol) (esc) |
|
612 | 600 | \r (no-eol) (esc) |
|
613 | 601 | \r (no-eol) (esc) |
|
614 | 602 | archiving (sub1/sub2) [ ] 0/3\r (no-eol) (esc) |
|
615 | 603 | archiving (sub1/sub2) [=========> ] 1/3\r (no-eol) (esc) |
|
616 | 604 | archiving (sub1/sub2) [===================> ] 2/3\r (no-eol) (esc) |
|
617 | 605 | archiving (sub1/sub2) [==============================>] 3/3\r (no-eol) (esc) |
|
618 | 606 | \r (no-eol) (esc) |
|
619 | 607 | $ find ../archive_all | sort |
|
620 | 608 | ../archive_all |
|
621 | 609 | ../archive_all/.hg_archival.txt |
|
622 | 610 | ../archive_all/.hgsub |
|
623 | 611 | ../archive_all/.hgsubstate |
|
624 | 612 | ../archive_all/main |
|
625 | 613 | ../archive_all/sub1 |
|
626 | 614 | ../archive_all/sub1/.hgsub |
|
627 | 615 | ../archive_all/sub1/.hgsubstate |
|
628 | 616 | ../archive_all/sub1/sub1 |
|
629 | 617 | ../archive_all/sub1/sub2 |
|
630 | 618 | ../archive_all/sub1/sub2/folder |
|
631 | 619 | ../archive_all/sub1/sub2/folder/test.txt |
|
632 | 620 | ../archive_all/sub1/sub2/sub2 |
|
633 | 621 | ../archive_all/sub1/sub2/test.txt |
|
634 | 622 | |
|
635 | 623 | Check that archive -X works in deep subrepos |
|
636 | 624 | |
|
637 | 625 | $ hg --config extensions.largefiles=! archive -S -X '**test*' ../archive_exclude |
|
638 | 626 | \r (no-eol) (esc) |
|
639 | 627 | archiving [ ] 0/3\r (no-eol) (esc) |
|
640 | 628 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
641 | 629 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
642 | 630 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
643 | 631 | \r (no-eol) (esc) |
|
644 | 632 | \r (no-eol) (esc) |
|
645 | 633 | archiving (sub1) [ ] 0/3\r (no-eol) (esc) |
|
646 | 634 | archiving (sub1) [===========> ] 1/3\r (no-eol) (esc) |
|
647 | 635 | archiving (sub1) [=======================> ] 2/3\r (no-eol) (esc) |
|
648 | 636 | archiving (sub1) [===================================>] 3/3\r (no-eol) (esc) |
|
649 | 637 | \r (no-eol) (esc) |
|
650 | 638 | \r (no-eol) (esc) |
|
651 | 639 | archiving (sub1/sub2) [ ] 0/1\r (no-eol) (esc) |
|
652 | 640 | archiving (sub1/sub2) [==============================>] 1/1\r (no-eol) (esc) |
|
653 | 641 | \r (no-eol) (esc) |
|
654 | 642 | $ find ../archive_exclude | sort |
|
655 | 643 | ../archive_exclude |
|
656 | 644 | ../archive_exclude/.hg_archival.txt |
|
657 | 645 | ../archive_exclude/.hgsub |
|
658 | 646 | ../archive_exclude/.hgsubstate |
|
659 | 647 | ../archive_exclude/main |
|
660 | 648 | ../archive_exclude/sub1 |
|
661 | 649 | ../archive_exclude/sub1/.hgsub |
|
662 | 650 | ../archive_exclude/sub1/.hgsubstate |
|
663 | 651 | ../archive_exclude/sub1/sub1 |
|
664 | 652 | ../archive_exclude/sub1/sub2 |
|
665 | 653 | ../archive_exclude/sub1/sub2/sub2 |
|
666 | 654 | |
|
667 | 655 | $ hg --config extensions.largefiles=! archive -S -I '**test*' ../archive_include |
|
668 | 656 | \r (no-eol) (esc) |
|
669 | 657 | archiving (sub1) [ <=> ] 0\r (no-eol) (esc) |
|
670 | 658 | \r (no-eol) (esc) |
|
671 | 659 | \r (no-eol) (esc) |
|
672 | 660 | archiving (sub1/sub2) [ ] 0/2\r (no-eol) (esc) |
|
673 | 661 | archiving (sub1/sub2) [==============> ] 1/2\r (no-eol) (esc) |
|
674 | 662 | archiving (sub1/sub2) [==============================>] 2/2\r (no-eol) (esc) |
|
675 | 663 | \r (no-eol) (esc) |
|
676 | 664 | $ find ../archive_include | sort |
|
677 | 665 | ../archive_include |
|
678 | 666 | ../archive_include/sub1 |
|
679 | 667 | ../archive_include/sub1/sub2 |
|
680 | 668 | ../archive_include/sub1/sub2/folder |
|
681 | 669 | ../archive_include/sub1/sub2/folder/test.txt |
|
682 | 670 | ../archive_include/sub1/sub2/test.txt |
|
683 | 671 | |
|
684 | 672 | Check that deep archive works with largefiles (which overrides hgsubrepo impl) |
|
685 | 673 | This also tests the repo.ui regression in 43fb170a23bd, and that lf subrepo |
|
686 | 674 | subrepos are archived properly. |
|
687 | 675 | Note that add --large through a subrepo currently adds the file as a normal file |
|
688 | 676 | |
|
689 | 677 | $ echo "large" > sub1/sub2/large.bin |
|
690 | 678 | $ hg --config extensions.largefiles= add --large -R sub1/sub2 sub1/sub2/large.bin |
|
691 | 679 | $ echo "large" > large.bin |
|
692 | 680 | $ hg --config extensions.largefiles= add --large large.bin |
|
693 | 681 | $ hg --config extensions.largefiles= ci -S -m "add large files" |
|
694 | 682 | committing subrepository sub1 |
|
695 | 683 | committing subrepository sub1/sub2 |
|
696 | 684 | |
|
697 | 685 | $ hg --config extensions.largefiles= archive -S ../archive_lf |
|
698 | 686 | $ find ../archive_lf | sort |
|
699 | 687 | ../archive_lf |
|
700 | 688 | ../archive_lf/.hg_archival.txt |
|
701 | 689 | ../archive_lf/.hgsub |
|
702 | 690 | ../archive_lf/.hgsubstate |
|
703 | 691 | ../archive_lf/large.bin |
|
704 | 692 | ../archive_lf/main |
|
705 | 693 | ../archive_lf/sub1 |
|
706 | 694 | ../archive_lf/sub1/.hgsub |
|
707 | 695 | ../archive_lf/sub1/.hgsubstate |
|
708 | 696 | ../archive_lf/sub1/sub1 |
|
709 | 697 | ../archive_lf/sub1/sub2 |
|
710 | 698 | ../archive_lf/sub1/sub2/folder |
|
711 | 699 | ../archive_lf/sub1/sub2/folder/test.txt |
|
712 | 700 | ../archive_lf/sub1/sub2/large.bin |
|
713 | 701 | ../archive_lf/sub1/sub2/sub2 |
|
714 | 702 | ../archive_lf/sub1/sub2/test.txt |
|
715 | 703 | $ rm -rf ../archive_lf |
|
716 | 704 | |
|
717 | 705 | Exclude large files from main and sub-sub repo |
|
718 | 706 | |
|
719 | 707 | $ hg --config extensions.largefiles= archive -S -X '**.bin' ../archive_lf |
|
720 | 708 | $ find ../archive_lf | sort |
|
721 | 709 | ../archive_lf |
|
722 | 710 | ../archive_lf/.hg_archival.txt |
|
723 | 711 | ../archive_lf/.hgsub |
|
724 | 712 | ../archive_lf/.hgsubstate |
|
725 | 713 | ../archive_lf/main |
|
726 | 714 | ../archive_lf/sub1 |
|
727 | 715 | ../archive_lf/sub1/.hgsub |
|
728 | 716 | ../archive_lf/sub1/.hgsubstate |
|
729 | 717 | ../archive_lf/sub1/sub1 |
|
730 | 718 | ../archive_lf/sub1/sub2 |
|
731 | 719 | ../archive_lf/sub1/sub2/folder |
|
732 | 720 | ../archive_lf/sub1/sub2/folder/test.txt |
|
733 | 721 | ../archive_lf/sub1/sub2/sub2 |
|
734 | 722 | ../archive_lf/sub1/sub2/test.txt |
|
735 | 723 | $ rm -rf ../archive_lf |
|
736 | 724 | |
|
737 | 725 | Exclude normal files from main and sub-sub repo |
|
738 | 726 | |
|
739 | 727 | $ hg --config extensions.largefiles= archive -S -X '**.txt' -p '.' ../archive_lf.tgz |
|
740 | 728 | $ tar -tzf ../archive_lf.tgz | sort |
|
741 | 729 | .hgsub |
|
742 | 730 | .hgsubstate |
|
743 | 731 | large.bin |
|
744 | 732 | main |
|
745 | 733 | sub1/.hgsub |
|
746 | 734 | sub1/.hgsubstate |
|
747 | 735 | sub1/sub1 |
|
748 | 736 | sub1/sub2/large.bin |
|
749 | 737 | sub1/sub2/sub2 |
|
750 | 738 | |
|
751 | 739 | Include normal files from within a largefiles subrepo |
|
752 | 740 | |
|
753 | 741 | $ hg --config extensions.largefiles= archive -S -I '**.txt' ../archive_lf |
|
754 | 742 | $ find ../archive_lf | sort |
|
755 | 743 | ../archive_lf |
|
756 | 744 | ../archive_lf/.hg_archival.txt |
|
757 | 745 | ../archive_lf/sub1 |
|
758 | 746 | ../archive_lf/sub1/sub2 |
|
759 | 747 | ../archive_lf/sub1/sub2/folder |
|
760 | 748 | ../archive_lf/sub1/sub2/folder/test.txt |
|
761 | 749 | ../archive_lf/sub1/sub2/test.txt |
|
762 | 750 | $ rm -rf ../archive_lf |
|
763 | 751 | |
|
764 | 752 | Include large files from within a largefiles subrepo |
|
765 | 753 | |
|
766 | 754 | $ hg --config extensions.largefiles= archive -S -I '**.bin' ../archive_lf |
|
767 | 755 | $ find ../archive_lf | sort |
|
768 | 756 | ../archive_lf |
|
769 | 757 | ../archive_lf/large.bin |
|
770 | 758 | ../archive_lf/sub1 |
|
771 | 759 | ../archive_lf/sub1/sub2 |
|
772 | 760 | ../archive_lf/sub1/sub2/large.bin |
|
773 | 761 | $ rm -rf ../archive_lf |
|
774 | 762 | |
|
775 | 763 | Find an exact largefile match in a largefiles subrepo |
|
776 | 764 | |
|
777 | 765 | $ hg --config extensions.largefiles= archive -S -I 'sub1/sub2/large.bin' ../archive_lf |
|
778 | 766 | $ find ../archive_lf | sort |
|
779 | 767 | ../archive_lf |
|
780 | 768 | ../archive_lf/sub1 |
|
781 | 769 | ../archive_lf/sub1/sub2 |
|
782 | 770 | ../archive_lf/sub1/sub2/large.bin |
|
783 | 771 | $ rm -rf ../archive_lf |
|
784 | 772 | |
|
785 | 773 | The local repo enables largefiles if a largefiles repo is cloned |
|
786 | 774 | |
|
787 | 775 | $ hg showconfig extensions |
|
788 | 776 | extensions.largefiles= |
|
789 | 777 | |
|
790 | 778 | $ hg --config extensions.largefiles= clone -qU . ../lfclone |
|
791 | 779 | $ grep largefiles ../lfclone/.hg/requires |
|
792 | 780 | largefiles |
|
793 | 781 | |
|
794 | 782 | Find an exact match to a standin (should archive nothing) |
|
795 | 783 | $ hg --config extensions.largefiles= archive -S -I 'sub/sub2/.hglf/large.bin' ../archive_lf |
|
796 | 784 | $ find ../archive_lf 2> /dev/null | sort |
|
797 | 785 | |
|
798 | 786 | $ cat >> $HGRCPATH <<EOF |
|
799 | 787 | > [extensions] |
|
800 | 788 | > largefiles= |
|
801 | 789 | > [largefiles] |
|
802 | 790 | > patterns=glob:**.dat |
|
803 | 791 | > EOF |
|
804 | 792 | |
|
805 | 793 | Test forget through a deep subrepo with the largefiles extension, both a |
|
806 | 794 | largefile and a normal file. Then a largefile that hasn't been committed yet. |
|
807 | 795 | $ touch sub1/sub2/untracked.txt |
|
808 | 796 | $ touch sub1/sub2/large.dat |
|
809 | 797 | $ hg forget sub1/sub2/large.bin sub1/sub2/test.txt sub1/sub2/untracked.txt |
|
810 | 798 | not removing sub1/sub2/untracked.txt: file is already untracked |
|
811 | 799 | [1] |
|
812 | 800 | $ hg add --large --dry-run -v sub1/sub2/untracked.txt |
|
813 | 801 | adding sub1/sub2/untracked.txt as a largefile |
|
814 | 802 | $ hg add --large -v sub1/sub2/untracked.txt |
|
815 | 803 | adding sub1/sub2/untracked.txt as a largefile |
|
816 | 804 | $ hg add --normal -v sub1/sub2/large.dat |
|
817 | 805 | adding sub1/sub2/large.dat |
|
818 | 806 | $ hg forget -v sub1/sub2/untracked.txt |
|
819 | 807 | removing sub1/sub2/untracked.txt |
|
820 | 808 | $ hg status -S |
|
821 | 809 | A sub1/sub2/large.dat |
|
822 | 810 | R sub1/sub2/large.bin |
|
823 | 811 | R sub1/sub2/test.txt |
|
824 | 812 | ? foo/bar/abc |
|
825 | 813 | ? sub1/sub2/untracked.txt |
|
826 | 814 | ? sub1/sub2/x.txt |
|
827 | 815 | $ hg add sub1/sub2 |
|
828 | 816 | |
|
829 | 817 | $ hg archive -S -r 'wdir()' ../wdir2 |
|
830 | 818 | $ diff -r . ../wdir2 | egrep -v '\.hg$|^Common subdirectories:' |
|
831 | 819 | Only in ../wdir2: .hg_archival.txt |
|
832 | 820 | Only in .: .hglf |
|
833 | 821 | Only in .: foo |
|
834 | 822 | Only in ./sub1/sub2: large.bin |
|
835 | 823 | Only in ./sub1/sub2: test.txt |
|
836 | 824 | Only in ./sub1/sub2: untracked.txt |
|
837 | 825 | Only in ./sub1/sub2: x.txt |
|
838 | 826 | $ find ../wdir2 -type f | sort |
|
839 | 827 | ../wdir2/.hg_archival.txt |
|
840 | 828 | ../wdir2/.hgsub |
|
841 | 829 | ../wdir2/.hgsubstate |
|
842 | 830 | ../wdir2/large.bin |
|
843 | 831 | ../wdir2/main |
|
844 | 832 | ../wdir2/sub1/.hgsub |
|
845 | 833 | ../wdir2/sub1/.hgsubstate |
|
846 | 834 | ../wdir2/sub1/sub1 |
|
847 | 835 | ../wdir2/sub1/sub2/folder/test.txt |
|
848 | 836 | ../wdir2/sub1/sub2/large.dat |
|
849 | 837 | ../wdir2/sub1/sub2/sub2 |
|
850 | 838 | $ hg status -S -mac -n | sort |
|
851 | 839 | .hgsub |
|
852 | 840 | .hgsubstate |
|
853 | 841 | large.bin |
|
854 | 842 | main |
|
855 | 843 | sub1/.hgsub |
|
856 | 844 | sub1/.hgsubstate |
|
857 | 845 | sub1/sub1 |
|
858 | 846 | sub1/sub2/folder/test.txt |
|
859 | 847 | sub1/sub2/large.dat |
|
860 | 848 | sub1/sub2/sub2 |
|
861 | 849 | |
|
862 | 850 | $ hg ci -Sqm 'forget testing' |
|
863 | 851 | |
|
864 | 852 | Test 'wdir()' modified file archiving with largefiles |
|
865 | 853 | $ echo 'mod' > main |
|
866 | 854 | $ echo 'mod' > large.bin |
|
867 | 855 | $ echo 'mod' > sub1/sub2/large.dat |
|
868 | 856 | $ hg archive -S -r 'wdir()' ../wdir3 |
|
869 | 857 | $ diff -r . ../wdir3 | egrep -v '\.hg$|^Common subdirectories' |
|
870 | 858 | Only in ../wdir3: .hg_archival.txt |
|
871 | 859 | Only in .: .hglf |
|
872 | 860 | Only in .: foo |
|
873 | 861 | Only in ./sub1/sub2: large.bin |
|
874 | 862 | Only in ./sub1/sub2: test.txt |
|
875 | 863 | Only in ./sub1/sub2: untracked.txt |
|
876 | 864 | Only in ./sub1/sub2: x.txt |
|
877 | 865 | $ find ../wdir3 -type f | sort |
|
878 | 866 | ../wdir3/.hg_archival.txt |
|
879 | 867 | ../wdir3/.hgsub |
|
880 | 868 | ../wdir3/.hgsubstate |
|
881 | 869 | ../wdir3/large.bin |
|
882 | 870 | ../wdir3/main |
|
883 | 871 | ../wdir3/sub1/.hgsub |
|
884 | 872 | ../wdir3/sub1/.hgsubstate |
|
885 | 873 | ../wdir3/sub1/sub1 |
|
886 | 874 | ../wdir3/sub1/sub2/folder/test.txt |
|
887 | 875 | ../wdir3/sub1/sub2/large.dat |
|
888 | 876 | ../wdir3/sub1/sub2/sub2 |
|
889 | 877 | $ hg up -Cq |
|
890 | 878 | |
|
891 | 879 | Test issue4330: commit a directory where only normal files have changed |
|
892 | 880 | $ touch foo/bar/large.dat |
|
893 | 881 | $ hg add --large foo/bar/large.dat |
|
894 | 882 | $ hg ci -m 'add foo/bar/large.dat' |
|
895 | 883 | $ touch a.txt |
|
896 | 884 | $ touch a.dat |
|
897 | 885 | $ hg add -v foo/bar/abc a.txt a.dat |
|
898 | 886 | adding a.dat as a largefile |
|
899 | 887 | adding a.txt |
|
900 | 888 | adding foo/bar/abc |
|
901 | 889 | $ hg ci -m 'dir commit with only normal file deltas' foo/bar |
|
902 | 890 | $ hg status |
|
903 | 891 | A a.dat |
|
904 | 892 | A a.txt |
|
905 | 893 | |
|
906 | 894 | Test a directory commit with a changed largefile and a changed normal file |
|
907 | 895 | $ echo changed > foo/bar/large.dat |
|
908 | 896 | $ echo changed > foo/bar/abc |
|
909 | 897 | $ hg ci -m 'dir commit with normal and lf file deltas' foo |
|
910 | 898 | $ hg status |
|
911 | 899 | A a.dat |
|
912 | 900 | A a.txt |
|
913 | 901 | |
|
914 | 902 | $ hg ci -m "add a.*" |
|
915 | 903 | $ hg mv a.dat b.dat |
|
916 | 904 | $ hg mv foo/bar/abc foo/bar/def |
|
917 | 905 | $ hg status -C |
|
918 | 906 | A b.dat |
|
919 | 907 | a.dat |
|
920 | 908 | A foo/bar/def |
|
921 | 909 | foo/bar/abc |
|
922 | 910 | R a.dat |
|
923 | 911 | R foo/bar/abc |
|
924 | 912 | |
|
925 | 913 | $ hg ci -m "move large and normal" |
|
926 | 914 | $ hg status -C --rev '.^' --rev . |
|
927 | 915 | A b.dat |
|
928 | 916 | a.dat |
|
929 | 917 | A foo/bar/def |
|
930 | 918 | foo/bar/abc |
|
931 | 919 | R a.dat |
|
932 | 920 | R foo/bar/abc |
|
933 | 921 | |
|
934 | 922 | |
|
935 | 923 | $ echo foo > main |
|
936 | 924 | $ hg ci -m "mod parent only" |
|
937 | 925 | $ hg init sub3 |
|
938 | 926 | $ echo "sub3 = sub3" >> .hgsub |
|
939 | 927 | $ echo xyz > sub3/a.txt |
|
940 | 928 | $ hg add sub3/a.txt |
|
941 | 929 | $ hg ci -Sm "add sub3" |
|
942 | 930 | committing subrepository sub3 |
|
943 | 931 | $ cat .hgsub | grep -v sub3 > .hgsub1 |
|
944 | 932 | $ mv .hgsub1 .hgsub |
|
945 | 933 | $ hg ci -m "remove sub3" |
|
946 | 934 | |
|
947 | 935 | $ hg log -r "subrepo()" --style compact |
|
948 | 936 | 0 7f491f53a367 1970-01-01 00:00 +0000 test |
|
949 | 937 | main import |
|
950 | 938 | |
|
951 | 939 | 1 ffe6649062fe 1970-01-01 00:00 +0000 test |
|
952 | 940 | deep nested modif should trigger a commit |
|
953 | 941 | |
|
954 | 942 | 2 9bb10eebee29 1970-01-01 00:00 +0000 test |
|
955 | 943 | add test.txt |
|
956 | 944 | |
|
957 | 945 | 3 7c64f035294f 1970-01-01 00:00 +0000 test |
|
958 | 946 | add large files |
|
959 | 947 | |
|
960 | 948 | 4 f734a59e2e35 1970-01-01 00:00 +0000 test |
|
961 | 949 | forget testing |
|
962 | 950 | |
|
963 | 951 | 11 9685a22af5db 1970-01-01 00:00 +0000 test |
|
964 | 952 | add sub3 |
|
965 | 953 | |
|
966 | 954 | 12[tip] 2e0485b475b9 1970-01-01 00:00 +0000 test |
|
967 | 955 | remove sub3 |
|
968 | 956 | |
|
969 | 957 | $ hg log -r "subrepo('sub3')" --style compact |
|
970 | 958 | 11 9685a22af5db 1970-01-01 00:00 +0000 test |
|
971 | 959 | add sub3 |
|
972 | 960 | |
|
973 | 961 | 12[tip] 2e0485b475b9 1970-01-01 00:00 +0000 test |
|
974 | 962 | remove sub3 |
|
975 | 963 | |
|
976 | 964 | $ hg log -r "subrepo('bogus')" --style compact |
|
977 | 965 | |
|
978 | 966 | |
|
979 | 967 | Test .hgsubstate in the R state |
|
980 | 968 | |
|
981 | 969 | $ hg rm .hgsub .hgsubstate |
|
982 | 970 | \r (no-eol) (esc) |
|
983 | 971 | deleting [=====================> ] 1/2\r (no-eol) (esc) |
|
984 | 972 | deleting [===========================================>] 2/2\r (no-eol) (esc) |
|
985 | 973 | \r (no-eol) (esc) |
|
986 | 974 | $ hg ci -m 'trash subrepo tracking' |
|
987 | 975 | |
|
988 | 976 | $ hg log -r "subrepo('re:sub\d+')" --style compact |
|
989 | 977 | 0 7f491f53a367 1970-01-01 00:00 +0000 test |
|
990 | 978 | main import |
|
991 | 979 | |
|
992 | 980 | 1 ffe6649062fe 1970-01-01 00:00 +0000 test |
|
993 | 981 | deep nested modif should trigger a commit |
|
994 | 982 | |
|
995 | 983 | 2 9bb10eebee29 1970-01-01 00:00 +0000 test |
|
996 | 984 | add test.txt |
|
997 | 985 | |
|
998 | 986 | 3 7c64f035294f 1970-01-01 00:00 +0000 test |
|
999 | 987 | add large files |
|
1000 | 988 | |
|
1001 | 989 | 4 f734a59e2e35 1970-01-01 00:00 +0000 test |
|
1002 | 990 | forget testing |
|
1003 | 991 | |
|
1004 | 992 | 11 9685a22af5db 1970-01-01 00:00 +0000 test |
|
1005 | 993 | add sub3 |
|
1006 | 994 | |
|
1007 | 995 | 12 2e0485b475b9 1970-01-01 00:00 +0000 test |
|
1008 | 996 | remove sub3 |
|
1009 | 997 | |
|
1010 | 998 | 13[tip] a68b2c361653 1970-01-01 00:00 +0000 test |
|
1011 | 999 | trash subrepo tracking |
|
1012 | 1000 | |
|
1013 | 1001 | |
|
1014 | 1002 | Restore the trashed subrepo tracking |
|
1015 | 1003 | |
|
1016 | 1004 | $ hg rollback -q |
|
1017 | 1005 | $ hg update -Cq . |
|
1018 | 1006 | |
|
1019 | 1007 | Interaction with extdiff, largefiles and subrepos |
|
1020 | 1008 | |
|
1021 | 1009 | $ hg --config extensions.extdiff= pdiff -S |
|
1022 | 1010 | |
|
1023 | 1011 | $ hg --config extensions.extdiff= pdiff -r '.^' -S |
|
1024 | 1012 | \r (no-eol) (esc) |
|
1025 | 1013 | archiving [ ] 0/2\r (no-eol) (esc) |
|
1026 | 1014 | archiving [====================> ] 1/2\r (no-eol) (esc) |
|
1027 | 1015 | archiving [==========================================>] 2/2\r (no-eol) (esc) |
|
1028 | 1016 | \r (no-eol) (esc) |
|
1029 | 1017 | \r (no-eol) (esc) |
|
1030 | 1018 | archiving (sub1) [ <=> ] 0\r (no-eol) (esc) |
|
1031 | 1019 | \r (no-eol) (esc) |
|
1032 | 1020 | \r (no-eol) (esc) |
|
1033 | 1021 | archiving (sub1/sub2) [ <=> ] 0\r (no-eol) (esc) |
|
1034 | 1022 | \r (no-eol) (esc) |
|
1035 | 1023 | \r (no-eol) (esc) |
|
1036 | 1024 | archiving (sub3) [ <=> ] 0\r (no-eol) (esc) |
|
1037 | 1025 | \r (no-eol) (esc) |
|
1038 | 1026 | \r (no-eol) (esc) |
|
1039 | 1027 | archiving [ ] 0/2\r (no-eol) (esc) |
|
1040 | 1028 | archiving [====================> ] 1/2\r (no-eol) (esc) |
|
1041 | 1029 | archiving [==========================================>] 2/2\r (no-eol) (esc) |
|
1042 | 1030 | \r (no-eol) (esc) |
|
1043 | 1031 | \r (no-eol) (esc) |
|
1044 | 1032 | archiving (sub1) [ <=> ] 0\r (no-eol) (esc) |
|
1045 | 1033 | \r (no-eol) (esc) |
|
1046 | 1034 | \r (no-eol) (esc) |
|
1047 | 1035 | archiving (sub1/sub2) [ <=> ] 0\r (no-eol) (esc) |
|
1048 | 1036 | \r (no-eol) (esc) |
|
1049 | 1037 | diff -Nru cloned.*/.hgsub cloned/.hgsub (glob) |
|
1050 | 1038 | --- cloned.*/.hgsub * (glob) |
|
1051 | 1039 | +++ cloned/.hgsub * (glob) |
|
1052 | 1040 | @@ -1,2 +1* @@ (glob) |
|
1053 | 1041 | sub1 = ../sub1 |
|
1054 | 1042 | -sub3 = sub3 |
|
1055 | 1043 | diff -Nru cloned.*/.hgsubstate cloned/.hgsubstate (glob) |
|
1056 | 1044 | --- cloned.*/.hgsubstate * (glob) |
|
1057 | 1045 | +++ cloned/.hgsubstate * (glob) |
|
1058 | 1046 | @@ -1,2 +1* @@ (glob) |
|
1059 | 1047 | 7a36fa02b66e61f27f3d4a822809f159479b8ab2 sub1 |
|
1060 | 1048 | -b1a26de6f2a045a9f079323693614ee322f1ff7e sub3 |
|
1061 | 1049 | [1] |
|
1062 | 1050 | |
|
1063 | 1051 | $ hg --config extensions.extdiff= pdiff -r 0 -r '.^' -S |
|
1064 | 1052 | \r (no-eol) (esc) |
|
1065 | 1053 | archiving [ ] 0/3\r (no-eol) (esc) |
|
1066 | 1054 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
1067 | 1055 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
1068 | 1056 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
1069 | 1057 | \r (no-eol) (esc) |
|
1070 | 1058 | \r (no-eol) (esc) |
|
1071 | 1059 | archiving (sub1) [ ] 0/1\r (no-eol) (esc) |
|
1072 | 1060 | archiving (sub1) [===================================>] 1/1\r (no-eol) (esc) |
|
1073 | 1061 | \r (no-eol) (esc) |
|
1074 | 1062 | \r (no-eol) (esc) |
|
1075 | 1063 | archiving (sub1/sub2) [ ] 0/1\r (no-eol) (esc) |
|
1076 | 1064 | archiving (sub1/sub2) [==============================>] 1/1\r (no-eol) (esc) |
|
1077 | 1065 | \r (no-eol) (esc) |
|
1078 | 1066 | \r (no-eol) (esc) |
|
1079 | 1067 | archiving [ ] 0/8\r (no-eol) (esc) |
|
1080 | 1068 | archiving [====> ] 1/8\r (no-eol) (esc) |
|
1081 | 1069 | archiving [=========> ] 2/8\r (no-eol) (esc) |
|
1082 | 1070 | archiving [===============> ] 3/8\r (no-eol) (esc) |
|
1083 | 1071 | archiving [====================> ] 4/8\r (no-eol) (esc) |
|
1084 | 1072 | archiving [=========================> ] 5/8\r (no-eol) (esc) |
|
1085 | 1073 | archiving [===============================> ] 6/8\r (no-eol) (esc) |
|
1086 | 1074 | archiving [====================================> ] 7/8\r (no-eol) (esc) |
|
1087 | 1075 | archiving [==========================================>] 8/8\r (no-eol) (esc) |
|
1088 | 1076 | \r (no-eol) (esc) |
|
1089 | 1077 | \r (no-eol) (esc) |
|
1090 | 1078 | archiving (sub1) [ ] 0/1\r (no-eol) (esc) |
|
1091 | 1079 | archiving (sub1) [===================================>] 1/1\r (no-eol) (esc) |
|
1092 | 1080 | \r (no-eol) (esc) |
|
1093 | 1081 | \r (no-eol) (esc) |
|
1094 | 1082 | archiving (sub1/sub2) [ ] 0/3\r (no-eol) (esc) |
|
1095 | 1083 | archiving (sub1/sub2) [=========> ] 1/3\r (no-eol) (esc) |
|
1096 | 1084 | archiving (sub1/sub2) [===================> ] 2/3\r (no-eol) (esc) |
|
1097 | 1085 | archiving (sub1/sub2) [==============================>] 3/3\r (no-eol) (esc) |
|
1098 | 1086 | \r (no-eol) (esc) |
|
1099 | 1087 | \r (no-eol) (esc) |
|
1100 | 1088 | archiving (sub3) [ ] 0/1\r (no-eol) (esc) |
|
1101 | 1089 | archiving (sub3) [===================================>] 1/1\r (no-eol) (esc) |
|
1102 | 1090 | \r (no-eol) (esc) |
|
1103 | 1091 | diff -Nru cloned.*/.hglf/b.dat cloned.*/.hglf/b.dat (glob) |
|
1104 | 1092 | --- cloned.*/.hglf/b.dat * (glob) |
|
1105 | 1093 | +++ cloned.*/.hglf/b.dat * (glob) |
|
1106 | 1094 | @@ -*,0 +1* @@ (glob) |
|
1107 | 1095 | +da39a3ee5e6b4b0d3255bfef95601890afd80709 |
|
1108 | 1096 | diff -Nru cloned.*/.hglf/foo/bar/large.dat cloned.*/.hglf/foo/bar/large.dat (glob) |
|
1109 | 1097 | --- cloned.*/.hglf/foo/bar/large.dat * (glob) |
|
1110 | 1098 | +++ cloned.*/.hglf/foo/bar/large.dat * (glob) |
|
1111 | 1099 | @@ -*,0 +1* @@ (glob) |
|
1112 | 1100 | +2f6933b5ee0f5fdd823d9717d8729f3c2523811b |
|
1113 | 1101 | diff -Nru cloned.*/.hglf/large.bin cloned.*/.hglf/large.bin (glob) |
|
1114 | 1102 | --- cloned.*/.hglf/large.bin * (glob) |
|
1115 | 1103 | +++ cloned.*/.hglf/large.bin * (glob) |
|
1116 | 1104 | @@ -*,0 +1* @@ (glob) |
|
1117 | 1105 | +7f7097b041ccf68cc5561e9600da4655d21c6d18 |
|
1118 | 1106 | diff -Nru cloned.*/.hgsub cloned.*/.hgsub (glob) |
|
1119 | 1107 | --- cloned.*/.hgsub * (glob) |
|
1120 | 1108 | +++ cloned.*/.hgsub * (glob) |
|
1121 | 1109 | @@ -1* +1,2 @@ (glob) |
|
1122 | 1110 | sub1 = ../sub1 |
|
1123 | 1111 | +sub3 = sub3 |
|
1124 | 1112 | diff -Nru cloned.*/.hgsubstate cloned.*/.hgsubstate (glob) |
|
1125 | 1113 | --- cloned.*/.hgsubstate * (glob) |
|
1126 | 1114 | +++ cloned.*/.hgsubstate * (glob) |
|
1127 | 1115 | @@ -1* +1,2 @@ (glob) |
|
1128 | 1116 | -fc3b4ce2696f7741438c79207583768f2ce6b0dd sub1 |
|
1129 | 1117 | +7a36fa02b66e61f27f3d4a822809f159479b8ab2 sub1 |
|
1130 | 1118 | +b1a26de6f2a045a9f079323693614ee322f1ff7e sub3 |
|
1131 | 1119 | diff -Nru cloned.*/foo/bar/def cloned.*/foo/bar/def (glob) |
|
1132 | 1120 | --- cloned.*/foo/bar/def * (glob) |
|
1133 | 1121 | +++ cloned.*/foo/bar/def * (glob) |
|
1134 | 1122 | @@ -*,0 +1* @@ (glob) |
|
1135 | 1123 | +changed |
|
1136 | 1124 | diff -Nru cloned.*/main cloned.*/main (glob) |
|
1137 | 1125 | --- cloned.*/main * (glob) |
|
1138 | 1126 | +++ cloned.*/main * (glob) |
|
1139 | 1127 | @@ -1* +1* @@ (glob) |
|
1140 | 1128 | -main |
|
1141 | 1129 | +foo |
|
1142 | 1130 | diff -Nru cloned.*/sub1/.hgsubstate cloned.*/sub1/.hgsubstate (glob) |
|
1143 | 1131 | --- cloned.*/sub1/.hgsubstate * (glob) |
|
1144 | 1132 | +++ cloned.*/sub1/.hgsubstate * (glob) |
|
1145 | 1133 | @@ -1* +1* @@ (glob) |
|
1146 | 1134 | -c57a0840e3badd667ef3c3ef65471609acb2ba3c sub2 |
|
1147 | 1135 | +c77908c81ccea3794a896c79e98b0e004aee2e9e sub2 |
|
1148 | 1136 | diff -Nru cloned.*/sub1/sub2/folder/test.txt cloned.*/sub1/sub2/folder/test.txt (glob) |
|
1149 | 1137 | --- cloned.*/sub1/sub2/folder/test.txt * (glob) |
|
1150 | 1138 | +++ cloned.*/sub1/sub2/folder/test.txt * (glob) |
|
1151 | 1139 | @@ -*,0 +1* @@ (glob) |
|
1152 | 1140 | +subfolder |
|
1153 | 1141 | diff -Nru cloned.*/sub1/sub2/sub2 cloned.*/sub1/sub2/sub2 (glob) |
|
1154 | 1142 | --- cloned.*/sub1/sub2/sub2 * (glob) |
|
1155 | 1143 | +++ cloned.*/sub1/sub2/sub2 * (glob) |
|
1156 | 1144 | @@ -1* +1* @@ (glob) |
|
1157 | 1145 | -sub2 |
|
1158 | 1146 | +modified |
|
1159 | 1147 | diff -Nru cloned.*/sub3/a.txt cloned.*/sub3/a.txt (glob) |
|
1160 | 1148 | --- cloned.*/sub3/a.txt * (glob) |
|
1161 | 1149 | +++ cloned.*/sub3/a.txt * (glob) |
|
1162 | 1150 | @@ -*,0 +1* @@ (glob) |
|
1163 | 1151 | +xyz |
|
1164 | 1152 | [1] |
|
1165 | 1153 | |
|
1166 | 1154 | $ echo mod > sub1/sub2/sub2 |
|
1167 | 1155 | $ hg --config extensions.extdiff= pdiff -S |
|
1168 | 1156 | \r (no-eol) (esc) |
|
1169 | 1157 | archiving (sub1) [ <=> ] 0\r (no-eol) (esc) |
|
1170 | 1158 | \r (no-eol) (esc) |
|
1171 | 1159 | \r (no-eol) (esc) |
|
1172 | 1160 | archiving (sub1/sub2) [ ] 0/1\r (no-eol) (esc) |
|
1173 | 1161 | archiving (sub1/sub2) [==============================>] 1/1\r (no-eol) (esc) |
|
1174 | 1162 | \r (no-eol) (esc) |
|
1175 | 1163 | --- */cloned.*/sub1/sub2/sub2 * (glob) |
|
1176 | 1164 | +++ */cloned/sub1/sub2/sub2 * (glob) |
|
1177 | 1165 | @@ -1* +1* @@ (glob) |
|
1178 | 1166 | -modified |
|
1179 | 1167 | +mod |
|
1180 | 1168 | [1] |
|
1181 | 1169 | |
|
1182 | 1170 | $ cd .. |
@@ -1,715 +1,700 b'' | |||
|
1 | 1 | Create test repository: |
|
2 | 2 | |
|
3 | 3 | $ hg init repo |
|
4 | 4 | $ cd repo |
|
5 | 5 | $ echo x1 > x.txt |
|
6 | 6 | |
|
7 | 7 | $ hg init foo |
|
8 | 8 | $ cd foo |
|
9 | 9 | $ echo y1 > y.txt |
|
10 | 10 | |
|
11 | 11 | $ hg init bar |
|
12 | 12 | $ cd bar |
|
13 | 13 | $ echo z1 > z.txt |
|
14 | 14 | |
|
15 | 15 | $ cd .. |
|
16 | 16 | $ echo 'bar = bar' > .hgsub |
|
17 | 17 | |
|
18 | 18 | $ cd .. |
|
19 | 19 | $ echo 'foo = foo' > .hgsub |
|
20 | 20 | |
|
21 | 21 | Add files --- .hgsub files must go first to trigger subrepos: |
|
22 | 22 | |
|
23 | 23 | $ hg add -S .hgsub |
|
24 | 24 | $ hg add -S foo/.hgsub |
|
25 | 25 | $ hg add -S foo/bar |
|
26 | 26 | adding foo/bar/z.txt |
|
27 | 27 | $ hg add -S |
|
28 | 28 | adding x.txt |
|
29 | 29 | adding foo/y.txt |
|
30 | 30 | |
|
31 | 31 | Test recursive status without committing anything: |
|
32 | 32 | |
|
33 | 33 | $ hg status -S |
|
34 | 34 | A .hgsub |
|
35 | 35 | A foo/.hgsub |
|
36 | 36 | A foo/bar/z.txt |
|
37 | 37 | A foo/y.txt |
|
38 | 38 | A x.txt |
|
39 | 39 | |
|
40 | 40 | Test recursive diff without committing anything: |
|
41 | 41 | |
|
42 | 42 | $ hg diff --nodates -S foo |
|
43 | 43 | diff -r 000000000000 foo/.hgsub |
|
44 | 44 | --- /dev/null |
|
45 | 45 | +++ b/foo/.hgsub |
|
46 | 46 | @@ -0,0 +1,1 @@ |
|
47 | 47 | +bar = bar |
|
48 | 48 | diff -r 000000000000 foo/y.txt |
|
49 | 49 | --- /dev/null |
|
50 | 50 | +++ b/foo/y.txt |
|
51 | 51 | @@ -0,0 +1,1 @@ |
|
52 | 52 | +y1 |
|
53 | 53 | diff -r 000000000000 foo/bar/z.txt |
|
54 | 54 | --- /dev/null |
|
55 | 55 | +++ b/foo/bar/z.txt |
|
56 | 56 | @@ -0,0 +1,1 @@ |
|
57 | 57 | +z1 |
|
58 | 58 | |
|
59 | 59 | Commits: |
|
60 | 60 | |
|
61 | 61 | $ hg commit -m fails |
|
62 | 62 | abort: uncommitted changes in subrepository "foo" |
|
63 | 63 | (use --subrepos for recursive commit) |
|
64 | 64 | [255] |
|
65 | 65 | |
|
66 | 66 | The --subrepos flag overwrite the config setting: |
|
67 | 67 | |
|
68 | 68 | $ hg commit -m 0-0-0 --config ui.commitsubrepos=No --subrepos |
|
69 | 69 | committing subrepository foo |
|
70 | 70 | committing subrepository foo/bar |
|
71 | 71 | |
|
72 | 72 | $ cd foo |
|
73 | 73 | $ echo y2 >> y.txt |
|
74 | 74 | $ hg commit -m 0-1-0 |
|
75 | 75 | |
|
76 | 76 | $ cd bar |
|
77 | 77 | $ echo z2 >> z.txt |
|
78 | 78 | $ hg commit -m 0-1-1 |
|
79 | 79 | |
|
80 | 80 | $ cd .. |
|
81 | 81 | $ hg commit -m 0-2-1 |
|
82 | 82 | |
|
83 | 83 | $ cd .. |
|
84 | 84 | $ hg commit -m 1-2-1 |
|
85 | 85 | |
|
86 | 86 | Change working directory: |
|
87 | 87 | |
|
88 | 88 | $ echo y3 >> foo/y.txt |
|
89 | 89 | $ echo z3 >> foo/bar/z.txt |
|
90 | 90 | $ hg status -S |
|
91 | 91 | M foo/bar/z.txt |
|
92 | 92 | M foo/y.txt |
|
93 | 93 | $ hg diff --nodates -S |
|
94 | 94 | diff -r d254738c5f5e foo/y.txt |
|
95 | 95 | --- a/foo/y.txt |
|
96 | 96 | +++ b/foo/y.txt |
|
97 | 97 | @@ -1,2 +1,3 @@ |
|
98 | 98 | y1 |
|
99 | 99 | y2 |
|
100 | 100 | +y3 |
|
101 | 101 | diff -r 9647f22de499 foo/bar/z.txt |
|
102 | 102 | --- a/foo/bar/z.txt |
|
103 | 103 | +++ b/foo/bar/z.txt |
|
104 | 104 | @@ -1,2 +1,3 @@ |
|
105 | 105 | z1 |
|
106 | 106 | z2 |
|
107 | 107 | +z3 |
|
108 | 108 | |
|
109 | 109 | Status call crossing repository boundaries: |
|
110 | 110 | |
|
111 | 111 | $ hg status -S foo/bar/z.txt |
|
112 | 112 | M foo/bar/z.txt |
|
113 | 113 | $ hg status -S -I 'foo/?.txt' |
|
114 | 114 | M foo/y.txt |
|
115 | 115 | $ hg status -S -I '**/?.txt' |
|
116 | 116 | M foo/bar/z.txt |
|
117 | 117 | M foo/y.txt |
|
118 | 118 | $ hg diff --nodates -S -I '**/?.txt' |
|
119 | 119 | diff -r d254738c5f5e foo/y.txt |
|
120 | 120 | --- a/foo/y.txt |
|
121 | 121 | +++ b/foo/y.txt |
|
122 | 122 | @@ -1,2 +1,3 @@ |
|
123 | 123 | y1 |
|
124 | 124 | y2 |
|
125 | 125 | +y3 |
|
126 | 126 | diff -r 9647f22de499 foo/bar/z.txt |
|
127 | 127 | --- a/foo/bar/z.txt |
|
128 | 128 | +++ b/foo/bar/z.txt |
|
129 | 129 | @@ -1,2 +1,3 @@ |
|
130 | 130 | z1 |
|
131 | 131 | z2 |
|
132 | 132 | +z3 |
|
133 | 133 | |
|
134 | 134 | Status from within a subdirectory: |
|
135 | 135 | |
|
136 | 136 | $ mkdir dir |
|
137 | 137 | $ cd dir |
|
138 | 138 | $ echo a1 > a.txt |
|
139 | 139 | $ hg status -S |
|
140 | 140 | M foo/bar/z.txt |
|
141 | 141 | M foo/y.txt |
|
142 | 142 | ? dir/a.txt |
|
143 | 143 | $ hg diff --nodates -S |
|
144 | 144 | diff -r d254738c5f5e foo/y.txt |
|
145 | 145 | --- a/foo/y.txt |
|
146 | 146 | +++ b/foo/y.txt |
|
147 | 147 | @@ -1,2 +1,3 @@ |
|
148 | 148 | y1 |
|
149 | 149 | y2 |
|
150 | 150 | +y3 |
|
151 | 151 | diff -r 9647f22de499 foo/bar/z.txt |
|
152 | 152 | --- a/foo/bar/z.txt |
|
153 | 153 | +++ b/foo/bar/z.txt |
|
154 | 154 | @@ -1,2 +1,3 @@ |
|
155 | 155 | z1 |
|
156 | 156 | z2 |
|
157 | 157 | +z3 |
|
158 | 158 | |
|
159 | 159 | Status with relative path: |
|
160 | 160 | |
|
161 | 161 | $ hg status -S .. |
|
162 | 162 | M ../foo/bar/z.txt |
|
163 | 163 | M ../foo/y.txt |
|
164 | 164 | ? a.txt |
|
165 | 165 | |
|
166 | 166 | XXX: filtering lfilesrepo.status() in 3.3-rc causes these files to be listed as |
|
167 | 167 | added instead of modified. |
|
168 | 168 | $ hg status -S .. --config extensions.largefiles= |
|
169 | 169 | M ../foo/bar/z.txt |
|
170 | 170 | M ../foo/y.txt |
|
171 | 171 | ? a.txt |
|
172 | 172 | |
|
173 | 173 | $ hg diff --nodates -S .. |
|
174 | 174 | diff -r d254738c5f5e foo/y.txt |
|
175 | 175 | --- a/foo/y.txt |
|
176 | 176 | +++ b/foo/y.txt |
|
177 | 177 | @@ -1,2 +1,3 @@ |
|
178 | 178 | y1 |
|
179 | 179 | y2 |
|
180 | 180 | +y3 |
|
181 | 181 | diff -r 9647f22de499 foo/bar/z.txt |
|
182 | 182 | --- a/foo/bar/z.txt |
|
183 | 183 | +++ b/foo/bar/z.txt |
|
184 | 184 | @@ -1,2 +1,3 @@ |
|
185 | 185 | z1 |
|
186 | 186 | z2 |
|
187 | 187 | +z3 |
|
188 | 188 | $ cd .. |
|
189 | 189 | |
|
190 | 190 | Cleanup and final commit: |
|
191 | 191 | |
|
192 | 192 | $ rm -r dir |
|
193 | 193 | $ hg commit --subrepos -m 2-3-2 |
|
194 | 194 | committing subrepository foo |
|
195 | 195 | committing subrepository foo/bar |
|
196 | 196 | |
|
197 | 197 | Test explicit path commands within subrepos: add/forget |
|
198 | 198 | $ echo z1 > foo/bar/z2.txt |
|
199 | 199 | $ hg status -S |
|
200 | 200 | ? foo/bar/z2.txt |
|
201 | 201 | $ hg add foo/bar/z2.txt |
|
202 | 202 | $ hg status -S |
|
203 | 203 | A foo/bar/z2.txt |
|
204 | 204 | $ hg forget foo/bar/z2.txt |
|
205 | 205 | $ hg status -S |
|
206 | 206 | ? foo/bar/z2.txt |
|
207 | 207 | $ hg forget foo/bar/z2.txt |
|
208 | 208 | not removing foo/bar/z2.txt: file is already untracked |
|
209 | 209 | [1] |
|
210 | 210 | $ hg status -S |
|
211 | 211 | ? foo/bar/z2.txt |
|
212 | 212 | $ rm foo/bar/z2.txt |
|
213 | 213 | |
|
214 | 214 | Log with the relationships between repo and its subrepo: |
|
215 | 215 | |
|
216 | 216 | $ hg log --template '{rev}:{node|short} {desc}\n' |
|
217 | 217 | 2:1326fa26d0c0 2-3-2 |
|
218 | 218 | 1:4b3c9ff4f66b 1-2-1 |
|
219 | 219 | 0:23376cbba0d8 0-0-0 |
|
220 | 220 | |
|
221 | 221 | $ hg -R foo log --template '{rev}:{node|short} {desc}\n' |
|
222 | 222 | 3:65903cebad86 2-3-2 |
|
223 | 223 | 2:d254738c5f5e 0-2-1 |
|
224 | 224 | 1:8629ce7dcc39 0-1-0 |
|
225 | 225 | 0:af048e97ade2 0-0-0 |
|
226 | 226 | |
|
227 | 227 | $ hg -R foo/bar log --template '{rev}:{node|short} {desc}\n' |
|
228 | 228 | 2:31ecbdafd357 2-3-2 |
|
229 | 229 | 1:9647f22de499 0-1-1 |
|
230 | 230 | 0:4904098473f9 0-0-0 |
|
231 | 231 | |
|
232 | 232 | Status between revisions: |
|
233 | 233 | |
|
234 | 234 | $ hg status -S |
|
235 | 235 | $ hg status -S --rev 0:1 |
|
236 | 236 | M .hgsubstate |
|
237 | 237 | M foo/.hgsubstate |
|
238 | 238 | M foo/bar/z.txt |
|
239 | 239 | M foo/y.txt |
|
240 | 240 | $ hg diff --nodates -S -I '**/?.txt' --rev 0:1 |
|
241 | 241 | diff -r af048e97ade2 -r d254738c5f5e foo/y.txt |
|
242 | 242 | --- a/foo/y.txt |
|
243 | 243 | +++ b/foo/y.txt |
|
244 | 244 | @@ -1,1 +1,2 @@ |
|
245 | 245 | y1 |
|
246 | 246 | +y2 |
|
247 | 247 | diff -r 4904098473f9 -r 9647f22de499 foo/bar/z.txt |
|
248 | 248 | --- a/foo/bar/z.txt |
|
249 | 249 | +++ b/foo/bar/z.txt |
|
250 | 250 | @@ -1,1 +1,2 @@ |
|
251 | 251 | z1 |
|
252 | 252 | +z2 |
|
253 | 253 | |
|
254 | 254 | #if serve |
|
255 | 255 | $ cd .. |
|
256 | 256 | $ hg serve -R repo --debug -S -p $HGPORT -d --pid-file=hg1.pid -E error.log -A access.log |
|
257 | 257 | adding = $TESTTMP/repo |
|
258 | 258 | adding foo = $TESTTMP/repo/foo |
|
259 | 259 | adding foo/bar = $TESTTMP/repo/foo/bar |
|
260 | 260 | listening at http://*:$HGPORT/ (bound to *:$HGPORT) (glob) (?) |
|
261 | 261 | adding = $TESTTMP/repo (?) |
|
262 | 262 | adding foo = $TESTTMP/repo/foo (?) |
|
263 | 263 | adding foo/bar = $TESTTMP/repo/foo/bar (?) |
|
264 | 264 | $ cat hg1.pid >> $DAEMON_PIDS |
|
265 | 265 | |
|
266 | 266 | $ hg clone http://localhost:$HGPORT clone --config progress.disable=True |
|
267 | 267 | requesting all changes |
|
268 | 268 | adding changesets |
|
269 | 269 | adding manifests |
|
270 | 270 | adding file changes |
|
271 | 271 | added 3 changesets with 5 changes to 3 files |
|
272 | 272 | new changesets 23376cbba0d8:1326fa26d0c0 |
|
273 | 273 | updating to branch default |
|
274 | 274 | cloning subrepo foo from http://localhost:$HGPORT/foo |
|
275 | 275 | requesting all changes |
|
276 | 276 | adding changesets |
|
277 | 277 | adding manifests |
|
278 | 278 | adding file changes |
|
279 | 279 | added 4 changesets with 7 changes to 3 files |
|
280 | 280 | new changesets af048e97ade2:65903cebad86 |
|
281 | 281 | cloning subrepo foo/bar from http://localhost:$HGPORT/foo/bar |
|
282 | 282 | requesting all changes |
|
283 | 283 | adding changesets |
|
284 | 284 | adding manifests |
|
285 | 285 | adding file changes |
|
286 | 286 | added 3 changesets with 3 changes to 1 files |
|
287 | 287 | new changesets 4904098473f9:31ecbdafd357 |
|
288 | 288 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
289 | 289 | |
|
290 | 290 | $ cat clone/foo/bar/z.txt |
|
291 | 291 | z1 |
|
292 | 292 | z2 |
|
293 | 293 | z3 |
|
294 | 294 | |
|
295 | 295 | Clone pooling from a remote URL will share the top level repo and the subrepos, |
|
296 | 296 | even if they are referenced by remote URL. |
|
297 | 297 | |
|
298 | 298 | $ hg --config extensions.share= --config share.pool=$TESTTMP/pool \ |
|
299 | 299 | > clone http://localhost:$HGPORT shared |
|
300 | 300 | (sharing from new pooled repository 23376cbba0d87c15906bb3652584927c140907bf) |
|
301 | 301 | requesting all changes |
|
302 | 302 | adding changesets |
|
303 | 303 | adding manifests |
|
304 | 304 | adding file changes |
|
305 | 305 | added 3 changesets with 5 changes to 3 files |
|
306 | 306 | new changesets 23376cbba0d8:1326fa26d0c0 |
|
307 | 307 | searching for changes |
|
308 | 308 | no changes found |
|
309 | 309 | updating working directory |
|
310 | 310 | cloning subrepo foo from http://localhost:$HGPORT/foo |
|
311 | 311 | (sharing from new pooled repository af048e97ade2e236f754f05d07013e586af0f8bf) |
|
312 | 312 | requesting all changes |
|
313 | 313 | adding changesets |
|
314 | 314 | adding manifests |
|
315 | 315 | adding file changes |
|
316 | 316 | added 4 changesets with 7 changes to 3 files |
|
317 | 317 | new changesets af048e97ade2:65903cebad86 |
|
318 | 318 | searching for changes |
|
319 | 319 | no changes found |
|
320 | 320 | cloning subrepo foo/bar from http://localhost:$HGPORT/foo/bar |
|
321 | 321 | (sharing from new pooled repository 4904098473f96c900fec436dad267edd4da59fad) |
|
322 | 322 | requesting all changes |
|
323 | 323 | adding changesets |
|
324 | 324 | adding manifests |
|
325 | 325 | adding file changes |
|
326 | 326 | added 3 changesets with 3 changes to 1 files |
|
327 | 327 | new changesets 4904098473f9:31ecbdafd357 |
|
328 | 328 | searching for changes |
|
329 | 329 | no changes found |
|
330 | 330 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
331 | 331 | |
|
332 | 332 | $ cat access.log |
|
333 | 333 | * "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
334 | 334 | * "GET /?cmd=batch HTTP/1.1" 200 - * (glob) |
|
335 | 335 | * "GET /?cmd=getbundle HTTP/1.1" 200 - * (glob) |
|
336 | 336 | * "GET /foo?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
337 | 337 | * "GET /foo?cmd=batch HTTP/1.1" 200 - * (glob) |
|
338 | 338 | * "GET /foo?cmd=getbundle HTTP/1.1" 200 - * (glob) |
|
339 | 339 | * "GET /foo/bar?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
340 | 340 | * "GET /foo/bar?cmd=batch HTTP/1.1" 200 - * (glob) |
|
341 | 341 | * "GET /foo/bar?cmd=getbundle HTTP/1.1" 200 - * (glob) |
|
342 | 342 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
343 | 343 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=0 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
344 | 344 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
345 | 345 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
346 | 346 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=1326fa26d0c00d2146c63b56bb6a45149d7325ac&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
347 | 347 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D1326fa26d0c00d2146c63b56bb6a45149d7325ac x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
348 | 348 | $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=1326fa26d0c00d2146c63b56bb6a45149d7325ac&heads=1326fa26d0c00d2146c63b56bb6a45149d7325ac&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
349 | 349 | $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
350 | 350 | $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=0 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
351 | 351 | $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
352 | 352 | $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
353 | 353 | $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=65903cebad86f1a84bd4f1134f62fa7dcb7a1c98&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
354 | 354 | $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D65903cebad86f1a84bd4f1134f62fa7dcb7a1c98 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
355 | 355 | $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=65903cebad86f1a84bd4f1134f62fa7dcb7a1c98&heads=65903cebad86f1a84bd4f1134f62fa7dcb7a1c98&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
356 | 356 | $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
357 | 357 | $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=0 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
358 | 358 | $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=capabilities HTTP/1.1" 200 - (glob) |
|
359 | 359 | $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
360 | 360 | $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=31ecbdafd357f54b281c9bd1d681bb90de219e22&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
361 | 361 | $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D31ecbdafd357f54b281c9bd1d681bb90de219e22 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
362 | 362 | $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=31ecbdafd357f54b281c9bd1d681bb90de219e22&heads=31ecbdafd357f54b281c9bd1d681bb90de219e22&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob) |
|
363 | 363 | |
|
364 | 364 | $ killdaemons.py |
|
365 | 365 | $ rm hg1.pid error.log access.log |
|
366 | 366 | $ cd repo |
|
367 | 367 | #endif |
|
368 | 368 | |
|
369 | 369 | Enable progress extension for archive tests: |
|
370 | 370 | |
|
371 | 371 | $ cp $HGRCPATH $HGRCPATH.no-progress |
|
372 | 372 | $ cat >> $HGRCPATH <<EOF |
|
373 | 373 | > [progress] |
|
374 | 374 | > disable=False |
|
375 | 375 | > assume-tty = 1 |
|
376 | 376 | > delay = 0 |
|
377 | 377 | > # set changedelay really large so we don't see nested topics |
|
378 | 378 | > changedelay = 30000 |
|
379 | 379 | > format = topic bar number |
|
380 | 380 | > refresh = 0 |
|
381 | 381 | > width = 60 |
|
382 | 382 | > EOF |
|
383 | 383 | |
|
384 | 384 | Test archiving to a directory tree (the doubled lines in the output |
|
385 | 385 | only show up in the test output, not in real usage): |
|
386 | 386 | |
|
387 | 387 | $ hg archive --subrepos ../archive |
|
388 | 388 | \r (no-eol) (esc) |
|
389 | 389 | archiving [ ] 0/3\r (no-eol) (esc) |
|
390 | 390 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
391 | 391 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
392 | 392 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
393 | 393 | \r (no-eol) (esc) |
|
394 | 394 | \r (no-eol) (esc) |
|
395 | 395 | archiving (foo) [ ] 0/3\r (no-eol) (esc) |
|
396 | 396 | archiving (foo) [===========> ] 1/3\r (no-eol) (esc) |
|
397 | 397 | archiving (foo) [=======================> ] 2/3\r (no-eol) (esc) |
|
398 | 398 | archiving (foo) [====================================>] 3/3\r (no-eol) (esc) |
|
399 | 399 | \r (no-eol) (esc) |
|
400 | 400 | \r (no-eol) (esc) |
|
401 | 401 | archiving (foo/bar) [ ] 0/1\r (no-eol) (esc) |
|
402 | 402 | archiving (foo/bar) [================================>] 1/1\r (no-eol) (esc) |
|
403 | 403 | \r (no-eol) (esc) |
|
404 | 404 | $ find ../archive | sort |
|
405 | 405 | ../archive |
|
406 | 406 | ../archive/.hg_archival.txt |
|
407 | 407 | ../archive/.hgsub |
|
408 | 408 | ../archive/.hgsubstate |
|
409 | 409 | ../archive/foo |
|
410 | 410 | ../archive/foo/.hgsub |
|
411 | 411 | ../archive/foo/.hgsubstate |
|
412 | 412 | ../archive/foo/bar |
|
413 | 413 | ../archive/foo/bar/z.txt |
|
414 | 414 | ../archive/foo/y.txt |
|
415 | 415 | ../archive/x.txt |
|
416 | 416 | |
|
417 | 417 | Test archiving to zip file (unzip output is unstable): |
|
418 | 418 | |
|
419 | 419 | $ hg archive --subrepos --prefix '.' ../archive.zip |
|
420 | 420 | \r (no-eol) (esc) |
|
421 | 421 | archiving [ ] 0/3\r (no-eol) (esc) |
|
422 | 422 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
423 | 423 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
424 | 424 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
425 | 425 | \r (no-eol) (esc) |
|
426 | 426 | \r (no-eol) (esc) |
|
427 | 427 | archiving (foo) [ ] 0/3\r (no-eol) (esc) |
|
428 | 428 | archiving (foo) [===========> ] 1/3\r (no-eol) (esc) |
|
429 | 429 | archiving (foo) [=======================> ] 2/3\r (no-eol) (esc) |
|
430 | 430 | archiving (foo) [====================================>] 3/3\r (no-eol) (esc) |
|
431 | 431 | \r (no-eol) (esc) |
|
432 | 432 | \r (no-eol) (esc) |
|
433 | 433 | archiving (foo/bar) [ ] 0/1\r (no-eol) (esc) |
|
434 | 434 | archiving (foo/bar) [================================>] 1/1\r (no-eol) (esc) |
|
435 | 435 | \r (no-eol) (esc) |
|
436 | 436 | |
|
437 | 437 | (unzip date formating is unstable, we do not care about it and glob it out) |
|
438 | 438 | |
|
439 | 439 | $ unzip -l ../archive.zip | grep -v -- ----- | egrep -v files$ |
|
440 | 440 | Archive: ../archive.zip |
|
441 | 441 | Length [ ]* Date [ ]* Time [ ]* Name (re) |
|
442 | 442 | 172 [0-9:\- ]* .hg_archival.txt (re) |
|
443 | 443 | 10 [0-9:\- ]* .hgsub (re) |
|
444 | 444 | 45 [0-9:\- ]* .hgsubstate (re) |
|
445 | 445 | 3 [0-9:\- ]* x.txt (re) |
|
446 | 446 | 10 [0-9:\- ]* foo/.hgsub (re) |
|
447 | 447 | 45 [0-9:\- ]* foo/.hgsubstate (re) |
|
448 | 448 | 9 [0-9:\- ]* foo/y.txt (re) |
|
449 | 449 | 9 [0-9:\- ]* foo/bar/z.txt (re) |
|
450 | 450 | |
|
451 | 451 | Test archiving a revision that references a subrepo that is not yet |
|
452 | 452 | cloned: |
|
453 | 453 | |
|
454 | 454 | #if hardlink |
|
455 | 455 | $ hg clone -U . ../empty |
|
456 | 456 | \r (no-eol) (esc) |
|
457 |
linking [ |
|
|
458 |
linking [ |
|
|
459 |
linking [ |
|
|
460 |
linking [ |
|
|
461 |
linking [ |
|
|
462 |
linking [ |
|
|
463 | linking [ <=> ] 7\r (no-eol) (esc) | |
|
464 | linking [ <=> ] 8\r (no-eol) (esc) | |
|
465 | linking [ <=> ] 9\r (no-eol) (esc) (reposimplestore !) | |
|
466 | linking [ <=> ] 10\r (no-eol) (esc) (reposimplestore !) | |
|
467 | linking [ <=> ] 11\r (no-eol) (esc) (reposimplestore !) | |
|
468 | linking [ <=> ] 12\r (no-eol) (esc) (reposimplestore !) | |
|
469 | linking [ <=> ] 13\r (no-eol) (esc) (reposimplestore !) | |
|
457 | linking [====> ] 1/9\r (no-eol) (esc) | |
|
458 | linking [=========> ] 2/9\r (no-eol) (esc) | |
|
459 | linking [==============> ] 3/9\r (no-eol) (esc) | |
|
460 | linking [===================> ] 4/9\r (no-eol) (esc) | |
|
461 | linking [========================> ] 5/9\r (no-eol) (esc) | |
|
462 | linking [=============================> ] 6/9\r (no-eol) (esc) | |
|
463 | linking [==================================> ] 7/9\r (no-eol) (esc) | |
|
464 | linking [=======================================> ] 8/9\r (no-eol) (esc) | |
|
465 | linking [============================================>] 9/9\r (no-eol) (esc) | |
|
470 | 466 | \r (no-eol) (esc) |
|
471 | 467 | #else |
|
472 | 468 | $ hg clone -U . ../empty |
|
473 | 469 | \r (no-eol) (esc) |
|
474 | 470 | linking [ <=> ] 1 (no-eol) |
|
475 | 471 | #endif |
|
476 | 472 | |
|
477 | 473 | $ cd ../empty |
|
478 | 474 | #if hardlink |
|
479 | 475 | $ hg archive --subrepos -r tip --prefix './' ../archive.tar.gz |
|
480 | 476 | \r (no-eol) (esc) |
|
481 | 477 | archiving [ ] 0/3\r (no-eol) (esc) |
|
482 | 478 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
483 | 479 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
484 | 480 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
485 | 481 | \r (no-eol) (esc) |
|
486 | 482 | \r (no-eol) (esc) |
|
487 |
linking [ |
|
|
488 |
linking [ |
|
|
489 |
linking [ |
|
|
490 |
linking [ |
|
|
491 |
linking [ |
|
|
492 | linking [ <=> ] 6\r (no-eol) (esc) | |
|
493 | linking [ <=> ] 7\r (no-eol) (esc) | |
|
494 | linking [ <=> ] 8\r (no-eol) (esc) | |
|
495 | linking [ <=> ] 9\r (no-eol) (esc) (reposimplestore !) | |
|
496 | linking [ <=> ] 10\r (no-eol) (esc) (reposimplestore !) | |
|
497 | linking [ <=> ] 11\r (no-eol) (esc) (reposimplestore !) | |
|
498 | linking [ <=> ] 12\r (no-eol) (esc) (reposimplestore !) | |
|
499 | linking [ <=> ] 13\r (no-eol) (esc) (reposimplestore !) | |
|
500 | linking [ <=> ] 14\r (no-eol) (esc) (reposimplestore !) | |
|
501 | linking [ <=> ] 15\r (no-eol) (esc) (reposimplestore !) | |
|
502 | linking [ <=> ] 16\r (no-eol) (esc) (reposimplestore !) | |
|
483 | linking [====> ] 1/8\r (no-eol) (esc) | |
|
484 | linking [==========> ] 2/8\r (no-eol) (esc) | |
|
485 | linking [===============> ] 3/8\r (no-eol) (esc) | |
|
486 | linking [=====================> ] 4/8\r (no-eol) (esc) | |
|
487 | linking [===========================> ] 5/8\r (no-eol) (esc) | |
|
488 | linking [================================> ] 6/8\r (no-eol) (esc) | |
|
489 | linking [======================================> ] 7/8\r (no-eol) (esc) | |
|
490 | linking [============================================>] 8/8\r (no-eol) (esc) | |
|
503 | 491 | \r (no-eol) (esc) |
|
504 | 492 | \r (no-eol) (esc) |
|
505 | 493 | archiving (foo) [ ] 0/3\r (no-eol) (esc) |
|
506 | 494 | archiving (foo) [===========> ] 1/3\r (no-eol) (esc) |
|
507 | 495 | archiving (foo) [=======================> ] 2/3\r (no-eol) (esc) |
|
508 | 496 | archiving (foo) [====================================>] 3/3\r (no-eol) (esc) |
|
509 | 497 | \r (no-eol) (esc) |
|
510 | 498 | \r (no-eol) (esc) |
|
511 |
linking [ |
|
|
512 |
linking [ |
|
|
513 |
linking [ |
|
|
514 |
linking [ |
|
|
515 | linking [ <=> ] 5\r (no-eol) (esc) | |
|
516 | linking [ <=> ] 6\r (no-eol) (esc) | |
|
517 | linking [ <=> ] 7\r (no-eol) (esc) (reposimplestore !) | |
|
518 | linking [ <=> ] 8\r (no-eol) (esc) (reposimplestore !) | |
|
519 | linking [ <=> ] 9\r (no-eol) (esc) (reposimplestore !) | |
|
499 | linking [======> ] 1/6\r (no-eol) (esc) | |
|
500 | linking [==============> ] 2/6\r (no-eol) (esc) | |
|
501 | linking [=====================> ] 3/6\r (no-eol) (esc) | |
|
502 | linking [=============================> ] 4/6\r (no-eol) (esc) | |
|
503 | linking [====================================> ] 5/6\r (no-eol) (esc) | |
|
504 | linking [============================================>] 6/6\r (no-eol) (esc) | |
|
520 | 505 | \r (no-eol) (esc) |
|
521 | 506 | \r (no-eol) (esc) |
|
522 | 507 | archiving (foo/bar) [ ] 0/1\r (no-eol) (esc) |
|
523 | 508 | archiving (foo/bar) [================================>] 1/1\r (no-eol) (esc) |
|
524 | 509 | \r (no-eol) (esc) |
|
525 | 510 | cloning subrepo foo from $TESTTMP/repo/foo |
|
526 | 511 | cloning subrepo foo/bar from $TESTTMP/repo/foo/bar |
|
527 | 512 | #else |
|
528 | 513 | Note there's a slight output glitch on non-hardlink systems: the last |
|
529 | 514 | "linking" progress topic never gets closed, leading to slight output corruption on that platform. |
|
530 | 515 | $ hg archive --subrepos -r tip --prefix './' ../archive.tar.gz |
|
531 | 516 | \r (no-eol) (esc) |
|
532 | 517 | archiving [ ] 0/3\r (no-eol) (esc) |
|
533 | 518 | archiving [=============> ] 1/3\r (no-eol) (esc) |
|
534 | 519 | archiving [===========================> ] 2/3\r (no-eol) (esc) |
|
535 | 520 | archiving [==========================================>] 3/3\r (no-eol) (esc) |
|
536 | 521 | \r (no-eol) (esc) |
|
537 | 522 | \r (no-eol) (esc) |
|
538 | 523 | linking [ <=> ] 1\r (no-eol) (esc) |
|
539 | 524 | cloning subrepo foo/bar from $TESTTMP/repo/foo/bar |
|
540 | 525 | #endif |
|
541 | 526 | |
|
542 | 527 | Archive + subrepos uses '/' for all component separators |
|
543 | 528 | |
|
544 | 529 | $ tar -tzf ../archive.tar.gz | sort |
|
545 | 530 | .hg_archival.txt |
|
546 | 531 | .hgsub |
|
547 | 532 | .hgsubstate |
|
548 | 533 | foo/.hgsub |
|
549 | 534 | foo/.hgsubstate |
|
550 | 535 | foo/bar/z.txt |
|
551 | 536 | foo/y.txt |
|
552 | 537 | x.txt |
|
553 | 538 | |
|
554 | 539 | The newly cloned subrepos contain no working copy: |
|
555 | 540 | |
|
556 | 541 | $ hg -R foo summary |
|
557 | 542 | parent: -1:000000000000 (no revision checked out) |
|
558 | 543 | branch: default |
|
559 | 544 | commit: (clean) |
|
560 | 545 | update: 4 new changesets (update) |
|
561 | 546 | |
|
562 | 547 | Sharing a local repo with missing local subrepos (i.e. it was never updated |
|
563 | 548 | from null) works because the default path is copied from the source repo, |
|
564 | 549 | whereas clone should fail. |
|
565 | 550 | |
|
566 | 551 | $ hg --config progress.disable=True clone -U ../empty ../empty2 |
|
567 | 552 | |
|
568 | 553 | $ hg --config extensions.share= --config progress.disable=True \ |
|
569 | 554 | > share ../empty2 ../empty_share |
|
570 | 555 | updating working directory |
|
571 | 556 | sharing subrepo foo from $TESTTMP/empty/foo |
|
572 | 557 | sharing subrepo foo/bar from $TESTTMP/empty/foo/bar |
|
573 | 558 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
574 | 559 | |
|
575 | 560 | $ hg --config progress.disable=True clone ../empty2 ../empty_clone |
|
576 | 561 | updating to branch default |
|
577 | 562 | cloning subrepo foo from $TESTTMP/empty2/foo |
|
578 | 563 | abort: repository $TESTTMP/empty2/foo not found |
|
579 | 564 | [255] |
|
580 | 565 | |
|
581 | 566 | Disable progress extension and cleanup: |
|
582 | 567 | |
|
583 | 568 | $ mv $HGRCPATH.no-progress $HGRCPATH |
|
584 | 569 | |
|
585 | 570 | Test archiving when there is a directory in the way for a subrepo |
|
586 | 571 | created by archive: |
|
587 | 572 | |
|
588 | 573 | $ hg clone -U . ../almost-empty |
|
589 | 574 | $ cd ../almost-empty |
|
590 | 575 | $ mkdir foo |
|
591 | 576 | $ echo f > foo/f |
|
592 | 577 | $ hg archive --subrepos -r tip archive |
|
593 | 578 | cloning subrepo foo from $TESTTMP/empty/foo |
|
594 | 579 | abort: destination '$TESTTMP/almost-empty/foo' is not empty (in subrepository "foo") |
|
595 | 580 | [255] |
|
596 | 581 | |
|
597 | 582 | Clone and test outgoing: |
|
598 | 583 | |
|
599 | 584 | $ cd .. |
|
600 | 585 | $ hg clone repo repo2 |
|
601 | 586 | updating to branch default |
|
602 | 587 | cloning subrepo foo from $TESTTMP/repo/foo |
|
603 | 588 | cloning subrepo foo/bar from $TESTTMP/repo/foo/bar |
|
604 | 589 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
605 | 590 | $ cd repo2 |
|
606 | 591 | $ hg outgoing -S |
|
607 | 592 | comparing with $TESTTMP/repo |
|
608 | 593 | searching for changes |
|
609 | 594 | no changes found |
|
610 | 595 | comparing with $TESTTMP/repo/foo |
|
611 | 596 | searching for changes |
|
612 | 597 | no changes found |
|
613 | 598 | comparing with $TESTTMP/repo/foo/bar |
|
614 | 599 | searching for changes |
|
615 | 600 | no changes found |
|
616 | 601 | [1] |
|
617 | 602 | |
|
618 | 603 | Make nested change: |
|
619 | 604 | |
|
620 | 605 | $ echo y4 >> foo/y.txt |
|
621 | 606 | $ hg diff --nodates -S |
|
622 | 607 | diff -r 65903cebad86 foo/y.txt |
|
623 | 608 | --- a/foo/y.txt |
|
624 | 609 | +++ b/foo/y.txt |
|
625 | 610 | @@ -1,3 +1,4 @@ |
|
626 | 611 | y1 |
|
627 | 612 | y2 |
|
628 | 613 | y3 |
|
629 | 614 | +y4 |
|
630 | 615 | $ hg commit --subrepos -m 3-4-2 |
|
631 | 616 | committing subrepository foo |
|
632 | 617 | $ hg outgoing -S |
|
633 | 618 | comparing with $TESTTMP/repo |
|
634 | 619 | searching for changes |
|
635 | 620 | changeset: 3:2655b8ecc4ee |
|
636 | 621 | tag: tip |
|
637 | 622 | user: test |
|
638 | 623 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
639 | 624 | summary: 3-4-2 |
|
640 | 625 | |
|
641 | 626 | comparing with $TESTTMP/repo/foo |
|
642 | 627 | searching for changes |
|
643 | 628 | changeset: 4:e96193d6cb36 |
|
644 | 629 | tag: tip |
|
645 | 630 | user: test |
|
646 | 631 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
647 | 632 | summary: 3-4-2 |
|
648 | 633 | |
|
649 | 634 | comparing with $TESTTMP/repo/foo/bar |
|
650 | 635 | searching for changes |
|
651 | 636 | no changes found |
|
652 | 637 | |
|
653 | 638 | |
|
654 | 639 | Switch to original repo and setup default path: |
|
655 | 640 | |
|
656 | 641 | $ cd ../repo |
|
657 | 642 | $ echo '[paths]' >> .hg/hgrc |
|
658 | 643 | $ echo 'default = ../repo2' >> .hg/hgrc |
|
659 | 644 | |
|
660 | 645 | Test incoming: |
|
661 | 646 | |
|
662 | 647 | $ hg incoming -S |
|
663 | 648 | comparing with $TESTTMP/repo2 |
|
664 | 649 | searching for changes |
|
665 | 650 | changeset: 3:2655b8ecc4ee |
|
666 | 651 | tag: tip |
|
667 | 652 | user: test |
|
668 | 653 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
669 | 654 | summary: 3-4-2 |
|
670 | 655 | |
|
671 | 656 | comparing with $TESTTMP/repo2/foo |
|
672 | 657 | searching for changes |
|
673 | 658 | changeset: 4:e96193d6cb36 |
|
674 | 659 | tag: tip |
|
675 | 660 | user: test |
|
676 | 661 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
677 | 662 | summary: 3-4-2 |
|
678 | 663 | |
|
679 | 664 | comparing with $TESTTMP/repo2/foo/bar |
|
680 | 665 | searching for changes |
|
681 | 666 | no changes found |
|
682 | 667 | |
|
683 | 668 | $ hg incoming -S --bundle incoming.hg |
|
684 | 669 | abort: cannot specify both --subrepos and --bundle |
|
685 | 670 | [10] |
|
686 | 671 | |
|
687 | 672 | Test missing subrepo: |
|
688 | 673 | |
|
689 | 674 | $ rm -r foo |
|
690 | 675 | $ hg status -S |
|
691 | 676 | warning: error "unknown revision '65903cebad86f1a84bd4f1134f62fa7dcb7a1c98'" in subrepository "foo" |
|
692 | 677 | |
|
693 | 678 | Issue2619: IndexError: list index out of range on hg add with subrepos |
|
694 | 679 | The subrepo must sorts after the explicit filename. |
|
695 | 680 | |
|
696 | 681 | $ cd .. |
|
697 | 682 | $ hg init test |
|
698 | 683 | $ cd test |
|
699 | 684 | $ hg init x |
|
700 | 685 | $ echo abc > abc.txt |
|
701 | 686 | $ hg ci -Am "abc" |
|
702 | 687 | adding abc.txt |
|
703 | 688 | $ echo "x = x" >> .hgsub |
|
704 | 689 | $ hg add .hgsub |
|
705 | 690 | $ touch a x/a |
|
706 | 691 | $ hg add a x/a |
|
707 | 692 | |
|
708 | 693 | $ hg ci -Sm "added x" |
|
709 | 694 | committing subrepository x |
|
710 | 695 | $ echo abc > x/a |
|
711 | 696 | $ hg revert --rev '.^' "set:subrepo('glob:x*')" |
|
712 | 697 | abort: subrepository 'x' does not exist in 25ac2c9b3180! |
|
713 | 698 | [255] |
|
714 | 699 | |
|
715 | 700 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now