##// END OF EJS Templates
Merge with crew-stable
Patrick Mezard -
r5913:7c2921a6 merge default
parent child Browse files
Show More
@@ -1,984 +1,984 b''
1 1 # Subversion 1.4/1.5 Python API backend
2 2 #
3 3 # Copyright(C) 2007 Daniel Holth et al
4 4 #
5 5 # Configuration options:
6 6 #
7 7 # convert.svn.trunk
8 8 # Relative path to the trunk (default: "trunk")
9 9 # convert.svn.branches
10 10 # Relative path to tree of branches (default: "branches")
11 11 # convert.svn.tags
12 12 # Relative path to tree of tags (default: "tags")
13 13 #
14 14 # Set these in a hgrc, or on the command line as follows:
15 15 #
16 16 # hg convert --config convert.svn.trunk=wackoname [...]
17 17
18 18 import locale
19 19 import os
20 20 import re
21 21 import sys
22 22 import cPickle as pickle
23 23 import tempfile
24 24
25 25 from mercurial import strutil, util
26 26 from mercurial.i18n import _
27 27
28 28 # Subversion stuff. Works best with very recent Python SVN bindings
29 29 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
30 30 # these bindings.
31 31
32 32 from cStringIO import StringIO
33 33
34 34 from common import NoRepo, commit, converter_source, encodeargs, decodeargs
35 35 from common import commandline, converter_sink, mapfile
36 36
37 37 try:
38 38 from svn.core import SubversionException, Pool
39 39 import svn
40 40 import svn.client
41 41 import svn.core
42 42 import svn.ra
43 43 import svn.delta
44 44 import transport
45 45 except ImportError:
46 46 pass
47 47
48 48 def geturl(path):
49 49 try:
50 50 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
51 51 except SubversionException:
52 52 pass
53 53 if os.path.isdir(path):
54 54 path = os.path.normpath(os.path.abspath(path))
55 55 if os.name == 'nt':
56 56 path = '/' + util.normpath(path)
57 57 return 'file://%s' % path
58 58 return path
59 59
60 60 def optrev(number):
61 61 optrev = svn.core.svn_opt_revision_t()
62 62 optrev.kind = svn.core.svn_opt_revision_number
63 63 optrev.value.number = number
64 64 return optrev
65 65
66 66 class changedpath(object):
67 67 def __init__(self, p):
68 68 self.copyfrom_path = p.copyfrom_path
69 69 self.copyfrom_rev = p.copyfrom_rev
70 70 self.action = p.action
71 71
72 72 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
73 73 strict_node_history=False):
74 74 protocol = -1
75 75 def receiver(orig_paths, revnum, author, date, message, pool):
76 76 if orig_paths is not None:
77 77 for k, v in orig_paths.iteritems():
78 78 orig_paths[k] = changedpath(v)
79 79 pickle.dump((orig_paths, revnum, author, date, message),
80 80 fp, protocol)
81 81
82 82 try:
83 83 # Use an ra of our own so that our parent can consume
84 84 # our results without confusing the server.
85 85 t = transport.SvnRaTransport(url=url)
86 86 svn.ra.get_log(t.ra, paths, start, end, limit,
87 87 discover_changed_paths,
88 88 strict_node_history,
89 89 receiver)
90 90 except SubversionException, (inst, num):
91 91 pickle.dump(num, fp, protocol)
92 92 except IOError:
93 93 # Caller may interrupt the iteration
94 94 pickle.dump(None, fp, protocol)
95 95 else:
96 96 pickle.dump(None, fp, protocol)
97 97 fp.close()
98 98
99 99 def debugsvnlog(ui, **opts):
100 100 """Fetch SVN log in a subprocess and channel them back to parent to
101 101 avoid memory collection issues.
102 102 """
103 103 util.set_binary(sys.stdin)
104 104 util.set_binary(sys.stdout)
105 105 args = decodeargs(sys.stdin.read())
106 106 get_log_child(sys.stdout, *args)
107 107
108 108 class logstream:
109 109 """Interruptible revision log iterator."""
110 110 def __init__(self, stdout):
111 111 self._stdout = stdout
112 112
113 113 def __iter__(self):
114 114 while True:
115 115 entry = pickle.load(self._stdout)
116 116 try:
117 117 orig_paths, revnum, author, date, message = entry
118 118 except:
119 119 if entry is None:
120 120 break
121 121 raise SubversionException("child raised exception", entry)
122 122 yield entry
123 123
124 124 def close(self):
125 125 if self._stdout:
126 126 self._stdout.close()
127 127 self._stdout = None
128 128
129 129 def get_log(url, paths, start, end, limit=0, discover_changed_paths=True,
130 130 strict_node_history=False):
131 131 args = [url, paths, start, end, limit, discover_changed_paths,
132 132 strict_node_history]
133 133 arg = encodeargs(args)
134 134 hgexe = util.hgexecutable()
135 135 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
136 136 stdin, stdout = os.popen2(cmd, 'b')
137 137 stdin.write(arg)
138 138 stdin.close()
139 139 return logstream(stdout)
140 140
141 141 # SVN conversion code stolen from bzr-svn and tailor
142 142 #
143 143 # Subversion looks like a versioned filesystem, branches structures
144 144 # are defined by conventions and not enforced by the tool. First,
145 145 # we define the potential branches (modules) as "trunk" and "branches"
146 146 # children directories. Revisions are then identified by their
147 147 # module and revision number (and a repository identifier).
148 148 #
149 149 # The revision graph is really a tree (or a forest). By default, a
150 150 # revision parent is the previous revision in the same module. If the
151 151 # module directory is copied/moved from another module then the
152 152 # revision is the module root and its parent the source revision in
153 153 # the parent module. A revision has at most one parent.
154 154 #
155 155 class svn_source(converter_source):
156 156 def __init__(self, ui, url, rev=None):
157 157 super(svn_source, self).__init__(ui, url, rev=rev)
158 158
159 159 try:
160 160 SubversionException
161 161 except NameError:
162 162 raise NoRepo('Subversion python bindings could not be loaded')
163 163
164 164 self.encoding = locale.getpreferredencoding()
165 165 self.lastrevs = {}
166 166
167 167 latest = None
168 168 try:
169 169 # Support file://path@rev syntax. Useful e.g. to convert
170 170 # deleted branches.
171 171 at = url.rfind('@')
172 172 if at >= 0:
173 173 latest = int(url[at+1:])
174 174 url = url[:at]
175 175 except ValueError, e:
176 176 pass
177 177 self.url = geturl(url)
178 178 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
179 179 try:
180 180 self.transport = transport.SvnRaTransport(url=self.url)
181 181 self.ra = self.transport.ra
182 182 self.ctx = self.transport.client
183 183 self.base = svn.ra.get_repos_root(self.ra)
184 184 self.module = self.url[len(self.base):]
185 185 self.commits = {}
186 186 self.paths = {}
187 187 self.uuid = svn.ra.get_uuid(self.ra).decode(self.encoding)
188 188 except SubversionException, e:
189 189 ui.print_exc()
190 190 raise NoRepo("%s does not look like a Subversion repo" % self.url)
191 191
192 192 if rev:
193 193 try:
194 194 latest = int(rev)
195 195 except ValueError:
196 196 raise util.Abort('svn: revision %s is not an integer' % rev)
197 197
198 198 try:
199 199 self.get_blacklist()
200 200 except IOError, e:
201 201 pass
202 202
203 203 self.last_changed = self.latest(self.module, latest)
204 204
205 205 self.head = self.revid(self.last_changed)
206 206 self._changescache = None
207 207
208 208 if os.path.exists(os.path.join(url, '.svn/entries')):
209 209 self.wc = url
210 210 else:
211 211 self.wc = None
212 212 self.convertfp = None
213 213
214 214 def setrevmap(self, revmap):
215 215 lastrevs = {}
216 216 for revid in revmap.iterkeys():
217 217 uuid, module, revnum = self.revsplit(revid)
218 218 lastrevnum = lastrevs.setdefault(module, revnum)
219 219 if revnum > lastrevnum:
220 220 lastrevs[module] = revnum
221 221 self.lastrevs = lastrevs
222 222
223 223 def exists(self, path, optrev):
224 224 try:
225 225 svn.client.ls(self.url.rstrip('/') + '/' + path,
226 226 optrev, False, self.ctx)
227 227 return True
228 228 except SubversionException, err:
229 229 return False
230 230
231 231 def getheads(self):
232 232
233 233 def getcfgpath(name, rev):
234 234 cfgpath = self.ui.config('convert', 'svn.' + name)
235 235 path = (cfgpath or name).strip('/')
236 236 if not self.exists(path, rev):
237 237 if cfgpath:
238 238 raise util.Abort(_('expected %s to be at %r, but not found')
239 239 % (name, path))
240 240 return None
241 241 self.ui.note(_('found %s at %r\n') % (name, path))
242 242 return path
243 243
244 244 rev = optrev(self.last_changed)
245 245 oldmodule = ''
246 246 trunk = getcfgpath('trunk', rev)
247 247 tags = getcfgpath('tags', rev)
248 248 branches = getcfgpath('branches', rev)
249 249
250 250 # If the project has a trunk or branches, we will extract heads
251 251 # from them. We keep the project root otherwise.
252 252 if trunk:
253 253 oldmodule = self.module or ''
254 254 self.module += '/' + trunk
255 255 lt = self.latest(self.module, self.last_changed)
256 256 self.head = self.revid(lt)
257 257
258 258 # First head in the list is the module's head
259 259 self.heads = [self.head]
260 260 self.tags = '%s/%s' % (oldmodule , (tags or 'tags'))
261 261
262 262 # Check if branches bring a few more heads to the list
263 263 if branches:
264 264 rpath = self.url.strip('/')
265 265 branchnames = svn.client.ls(rpath + '/' + branches, rev, False,
266 266 self.ctx)
267 267 for branch in branchnames.keys():
268 268 module = '%s/%s/%s' % (oldmodule, branches, branch)
269 269 brevnum = self.latest(module, self.last_changed)
270 270 brev = self.revid(brevnum, module)
271 271 self.ui.note('found branch %s at %d\n' % (branch, brevnum))
272 272 self.heads.append(brev)
273 273
274 274 return self.heads
275 275
276 276 def getfile(self, file, rev):
277 277 data, mode = self._getfile(file, rev)
278 278 self.modecache[(file, rev)] = mode
279 279 return data
280 280
281 281 def getmode(self, file, rev):
282 282 return self.modecache[(file, rev)]
283 283
284 284 def getchanges(self, rev):
285 285 if self._changescache and self._changescache[0] == rev:
286 286 return self._changescache[1]
287 287 self._changescache = None
288 288 self.modecache = {}
289 289 (paths, parents) = self.paths[rev]
290 290 files, copies = self.expandpaths(rev, paths, parents)
291 291 files.sort()
292 292 files = zip(files, [rev] * len(files))
293 293
294 294 # caller caches the result, so free it here to release memory
295 295 del self.paths[rev]
296 296 return (files, copies)
297 297
298 298 def getchangedfiles(self, rev, i):
299 299 changes = self.getchanges(rev)
300 300 self._changescache = (rev, changes)
301 301 return [f[0] for f in changes[0]]
302 302
303 303 def getcommit(self, rev):
304 304 if rev not in self.commits:
305 305 uuid, module, revnum = self.revsplit(rev)
306 306 self.module = module
307 307 self.reparent(module)
308 308 # We assume that:
309 309 # - requests for revisions after "stop" come from the
310 310 # revision graph backward traversal. Cache all of them
311 311 # down to stop, they will be used eventually.
312 312 # - requests for revisions before "stop" come to get
313 313 # isolated branches parents. Just fetch what is needed.
314 314 stop = self.lastrevs.get(module, 0)
315 315 if revnum < stop:
316 316 stop = revnum + 1
317 317 self._fetch_revisions(revnum, stop)
318 318 commit = self.commits[rev]
319 319 # caller caches the result, so free it here to release memory
320 320 del self.commits[rev]
321 321 return commit
322 322
323 323 def gettags(self):
324 324 tags = {}
325 325 start = self.revnum(self.head)
326 326 try:
327 327 for entry in get_log(self.url, [self.tags], 0, start):
328 328 orig_paths, revnum, author, date, message = entry
329 329 for path in orig_paths:
330 330 if not path.startswith(self.tags+'/'):
331 331 continue
332 332 ent = orig_paths[path]
333 333 source = ent.copyfrom_path
334 334 rev = ent.copyfrom_rev
335 335 tag = path.split('/')[-1]
336 336 tags[tag] = self.revid(rev, module=source)
337 337 except SubversionException, (inst, num):
338 338 self.ui.note('no tags found at revision %d\n' % start)
339 339 return tags
340 340
341 341 def converted(self, rev, destrev):
342 342 if not self.wc:
343 343 return
344 344 if self.convertfp is None:
345 345 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
346 346 'a')
347 347 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
348 348 self.convertfp.flush()
349 349
350 350 # -- helper functions --
351 351
352 352 def revid(self, revnum, module=None):
353 353 if not module:
354 354 module = self.module
355 355 return u"svn:%s%s@%s" % (self.uuid, module.decode(self.encoding),
356 356 revnum)
357 357
358 358 def revnum(self, rev):
359 359 return int(rev.split('@')[-1])
360 360
361 361 def revsplit(self, rev):
362 362 url, revnum = rev.encode(self.encoding).split('@', 1)
363 363 revnum = int(revnum)
364 364 parts = url.split('/', 1)
365 365 uuid = parts.pop(0)[4:]
366 366 mod = ''
367 367 if parts:
368 368 mod = '/' + parts[0]
369 369 return uuid, mod, revnum
370 370
371 371 def latest(self, path, stop=0):
372 372 'find the latest revision affecting path, up to stop'
373 373 if not stop:
374 374 stop = svn.ra.get_latest_revnum(self.ra)
375 375 try:
376 376 self.reparent('')
377 377 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
378 378 self.reparent(self.module)
379 379 except SubversionException:
380 380 dirent = None
381 381 if not dirent:
382 382 raise util.Abort('%s not found up to revision %d' % (path, stop))
383 383
384 384 return dirent.created_rev
385 385
386 386 def get_blacklist(self):
387 387 """Avoid certain revision numbers.
388 388 It is not uncommon for two nearby revisions to cancel each other
389 389 out, e.g. 'I copied trunk into a subdirectory of itself instead
390 390 of making a branch'. The converted repository is significantly
391 391 smaller if we ignore such revisions."""
392 392 self.blacklist = util.set()
393 393 blacklist = self.blacklist
394 394 for line in file("blacklist.txt", "r"):
395 395 if not line.startswith("#"):
396 396 try:
397 397 svn_rev = int(line.strip())
398 398 blacklist.add(svn_rev)
399 399 except ValueError, e:
400 400 pass # not an integer or a comment
401 401
402 402 def is_blacklisted(self, svn_rev):
403 403 return svn_rev in self.blacklist
404 404
405 405 def reparent(self, module):
406 406 svn_url = self.base + module
407 407 self.ui.debug("reparent to %s\n" % svn_url.encode(self.encoding))
408 408 svn.ra.reparent(self.ra, svn_url.encode(self.encoding))
409 409
410 410 def expandpaths(self, rev, paths, parents):
411 411 def get_entry_from_path(path, module=self.module):
412 412 # Given the repository url of this wc, say
413 413 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
414 414 # extract the "entry" portion (a relative path) from what
415 415 # svn log --xml says, ie
416 416 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
417 417 # that is to say "tests/PloneTestCase.py"
418 418 if path.startswith(module):
419 419 relative = path[len(module):]
420 420 if relative.startswith('/'):
421 421 return relative[1:]
422 422 else:
423 423 return relative
424 424
425 425 # The path is outside our tracked tree...
426 426 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
427 427 return None
428 428
429 429 entries = []
430 430 copyfrom = {} # Map of entrypath, revision for finding source of deleted revisions.
431 431 copies = {}
432 432
433 433 new_module, revnum = self.revsplit(rev)[1:]
434 434 if new_module != self.module:
435 435 self.module = new_module
436 436 self.reparent(self.module)
437 437
438 438 for path, ent in paths:
439 439 entrypath = get_entry_from_path(path, module=self.module)
440 440 entry = entrypath.decode(self.encoding)
441 441
442 442 kind = svn.ra.check_path(self.ra, entrypath, revnum)
443 443 if kind == svn.core.svn_node_file:
444 444 if ent.copyfrom_path:
445 445 copyfrom_path = get_entry_from_path(ent.copyfrom_path)
446 446 if copyfrom_path:
447 447 self.ui.debug("Copied to %s from %s@%s\n" %
448 448 (entrypath, copyfrom_path,
449 449 ent.copyfrom_rev))
450 450 # It's probably important for hg that the source
451 451 # exists in the revision's parent, not just the
452 452 # ent.copyfrom_rev
453 453 fromkind = svn.ra.check_path(self.ra, copyfrom_path, ent.copyfrom_rev)
454 454 if fromkind != 0:
455 455 copies[self.recode(entry)] = self.recode(copyfrom_path)
456 456 entries.append(self.recode(entry))
457 457 elif kind == 0: # gone, but had better be a deleted *file*
458 458 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
459 459
460 460 # if a branch is created but entries are removed in the same
461 461 # changeset, get the right fromrev
462 462 # parents cannot be empty here, you cannot remove things from
463 463 # a root revision.
464 464 uuid, old_module, fromrev = self.revsplit(parents[0])
465 465
466 466 basepath = old_module + "/" + get_entry_from_path(path, module=self.module)
467 467 entrypath = old_module + "/" + get_entry_from_path(path, module=self.module)
468 468
469 469 def lookup_parts(p):
470 470 rc = None
471 471 parts = p.split("/")
472 472 for i in range(len(parts)):
473 473 part = "/".join(parts[:i])
474 474 info = part, copyfrom.get(part, None)
475 475 if info[1] is not None:
476 476 self.ui.debug("Found parent directory %s\n" % info[1])
477 477 rc = info
478 478 return rc
479 479
480 480 self.ui.debug("base, entry %s %s\n" % (basepath, entrypath))
481 481
482 482 frompath, froment = lookup_parts(entrypath) or (None, revnum - 1)
483 483
484 484 # need to remove fragment from lookup_parts and replace with copyfrom_path
485 485 if frompath is not None:
486 486 self.ui.debug("munge-o-matic\n")
487 487 self.ui.debug(entrypath + '\n')
488 488 self.ui.debug(entrypath[len(frompath):] + '\n')
489 489 entrypath = froment.copyfrom_path + entrypath[len(frompath):]
490 490 fromrev = froment.copyfrom_rev
491 491 self.ui.debug("Info: %s %s %s %s\n" % (frompath, froment, ent, entrypath))
492 492
493 493 # We can avoid the reparent calls if the module has not changed
494 494 # but it probably does not worth the pain.
495 495 self.reparent('')
496 496 fromkind = svn.ra.check_path(self.ra, entrypath.strip('/'), fromrev)
497 497 self.reparent(self.module)
498 498
499 499 if fromkind == svn.core.svn_node_file: # a deleted file
500 500 entries.append(self.recode(entry))
501 501 elif fromkind == svn.core.svn_node_dir:
502 502 # print "Deleted/moved non-file:", revnum, path, ent
503 503 # children = self._find_children(path, revnum - 1)
504 504 # print "find children %s@%d from %d action %s" % (path, revnum, ent.copyfrom_rev, ent.action)
505 505 # Sometimes this is tricky. For example: in
506 506 # The Subversion Repository revision 6940 a dir
507 507 # was copied and one of its files was deleted
508 508 # from the new location in the same commit. This
509 509 # code can't deal with that yet.
510 510 if ent.action == 'C':
511 511 children = self._find_children(path, fromrev)
512 512 else:
513 513 oroot = entrypath.strip('/')
514 514 nroot = path.strip('/')
515 515 children = self._find_children(oroot, fromrev)
516 516 children = [s.replace(oroot,nroot) for s in children]
517 517 # Mark all [files, not directories] as deleted.
518 518 for child in children:
519 519 # Can we move a child directory and its
520 520 # parent in the same commit? (probably can). Could
521 521 # cause problems if instead of revnum -1,
522 522 # we have to look in (copyfrom_path, revnum - 1)
523 523 entrypath = get_entry_from_path("/" + child, module=old_module)
524 524 if entrypath:
525 525 entry = self.recode(entrypath.decode(self.encoding))
526 526 if entry in copies:
527 527 # deleted file within a copy
528 528 del copies[entry]
529 529 else:
530 530 entries.append(entry)
531 531 else:
532 532 self.ui.debug('unknown path in revision %d: %s\n' % \
533 533 (revnum, path))
534 534 elif kind == svn.core.svn_node_dir:
535 535 # Should probably synthesize normal file entries
536 536 # and handle as above to clean up copy/rename handling.
537 537
538 538 # If the directory just had a prop change,
539 539 # then we shouldn't need to look for its children.
540 540 if ent.action == 'M':
541 541 continue
542 542
543 543 # Also this could create duplicate entries. Not sure
544 544 # whether this will matter. Maybe should make entries a set.
545 545 # print "Changed directory", revnum, path, ent.action, ent.copyfrom_path, ent.copyfrom_rev
546 546 # This will fail if a directory was copied
547 547 # from another branch and then some of its files
548 548 # were deleted in the same transaction.
549 549 children = self._find_children(path, revnum)
550 550 children.sort()
551 551 for child in children:
552 552 # Can we move a child directory and its
553 553 # parent in the same commit? (probably can). Could
554 554 # cause problems if instead of revnum -1,
555 555 # we have to look in (copyfrom_path, revnum - 1)
556 556 entrypath = get_entry_from_path("/" + child, module=self.module)
557 557 # print child, self.module, entrypath
558 558 if entrypath:
559 559 # Need to filter out directories here...
560 560 kind = svn.ra.check_path(self.ra, entrypath, revnum)
561 561 if kind != svn.core.svn_node_dir:
562 562 entries.append(self.recode(entrypath))
563 563
564 564 # Copies here (must copy all from source)
565 565 # Probably not a real problem for us if
566 566 # source does not exist
567 567
568 568 # Can do this with the copy command "hg copy"
569 569 # if ent.copyfrom_path:
570 570 # copyfrom_entry = get_entry_from_path(ent.copyfrom_path.decode(self.encoding),
571 571 # module=self.module)
572 572 # copyto_entry = entrypath
573 573 #
574 574 # print "copy directory", copyfrom_entry, 'to', copyto_entry
575 575 #
576 576 # copies.append((copyfrom_entry, copyto_entry))
577 577
578 578 if ent.copyfrom_path:
579 579 copyfrom_path = ent.copyfrom_path.decode(self.encoding)
580 580 copyfrom_entry = get_entry_from_path(copyfrom_path, module=self.module)
581 581 if copyfrom_entry:
582 582 copyfrom[path] = ent
583 583 self.ui.debug("mark %s came from %s\n" % (path, copyfrom[path]))
584 584
585 585 # Good, /probably/ a regular copy. Really should check
586 586 # to see whether the parent revision actually contains
587 587 # the directory in question.
588 588 children = self._find_children(self.recode(copyfrom_path), ent.copyfrom_rev)
589 589 children.sort()
590 590 for child in children:
591 591 entrypath = get_entry_from_path("/" + child, module=self.module)
592 592 if entrypath:
593 593 entry = entrypath.decode(self.encoding)
594 594 # print "COPY COPY From", copyfrom_entry, entry
595 595 copyto_path = path + entry[len(copyfrom_entry):]
596 596 copyto_entry = get_entry_from_path(copyto_path, module=self.module)
597 597 # print "COPY", entry, "COPY To", copyto_entry
598 598 copies[self.recode(copyto_entry)] = self.recode(entry)
599 599 # copy from quux splort/quuxfile
600 600
601 return (entries, copies)
601 return (util.unique(entries), copies)
602 602
603 603 def _fetch_revisions(self, from_revnum, to_revnum):
604 604 if from_revnum < to_revnum:
605 605 from_revnum, to_revnum = to_revnum, from_revnum
606 606
607 607 self.child_cset = None
608 608 def parselogentry(orig_paths, revnum, author, date, message):
609 609 """Return the parsed commit object or None, and True if
610 610 the revision is a branch root.
611 611 """
612 612 self.ui.debug("parsing revision %d (%d changes)\n" %
613 613 (revnum, len(orig_paths)))
614 614
615 615 rev = self.revid(revnum)
616 616 # branch log might return entries for a parent we already have
617 617
618 618 if (rev in self.commits or revnum < to_revnum):
619 619 return None, False
620 620
621 621 parents = []
622 622 # check whether this revision is the start of a branch
623 623 if self.module in orig_paths:
624 624 ent = orig_paths[self.module]
625 625 if ent.copyfrom_path:
626 626 # ent.copyfrom_rev may not be the actual last revision
627 627 prev = self.latest(ent.copyfrom_path, ent.copyfrom_rev)
628 628 parents = [self.revid(prev, ent.copyfrom_path)]
629 629 self.ui.note('found parent of branch %s at %d: %s\n' % \
630 630 (self.module, prev, ent.copyfrom_path))
631 631 else:
632 632 self.ui.debug("No copyfrom path, don't know what to do.\n")
633 633
634 634 orig_paths = orig_paths.items()
635 635 orig_paths.sort()
636 636 paths = []
637 637 # filter out unrelated paths
638 638 for path, ent in orig_paths:
639 639 if not path.startswith(self.module):
640 640 self.ui.debug("boring@%s: %s\n" % (revnum, path))
641 641 continue
642 642 paths.append((path, ent))
643 643
644 644 # Example SVN datetime. Includes microseconds.
645 645 # ISO-8601 conformant
646 646 # '2007-01-04T17:35:00.902377Z'
647 647 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
648 648
649 649 log = message and self.recode(message)
650 650 author = author and self.recode(author) or ''
651 651 try:
652 652 branch = self.module.split("/")[-1]
653 653 if branch == 'trunk':
654 654 branch = ''
655 655 except IndexError:
656 656 branch = None
657 657
658 658 cset = commit(author=author,
659 659 date=util.datestr(date),
660 660 desc=log,
661 661 parents=parents,
662 662 branch=branch,
663 663 rev=rev.encode('utf-8'))
664 664
665 665 self.commits[rev] = cset
666 666 # The parents list is *shared* among self.paths and the
667 667 # commit object. Both will be updated below.
668 668 self.paths[rev] = (paths, cset.parents)
669 669 if self.child_cset and not self.child_cset.parents:
670 670 self.child_cset.parents[:] = [rev]
671 671 self.child_cset = cset
672 672 return cset, len(parents) > 0
673 673
674 674 self.ui.note('fetching revision log for "%s" from %d to %d\n' %
675 675 (self.module, from_revnum, to_revnum))
676 676
677 677 try:
678 678 firstcset = None
679 679 stream = get_log(self.url, [self.module], from_revnum, to_revnum)
680 680 try:
681 681 for entry in stream:
682 682 paths, revnum, author, date, message = entry
683 683 if self.is_blacklisted(revnum):
684 684 self.ui.note('skipping blacklisted revision %d\n'
685 685 % revnum)
686 686 continue
687 687 if paths is None:
688 688 self.ui.debug('revision %d has no entries\n' % revnum)
689 689 continue
690 690 cset, branched = parselogentry(paths, revnum, author,
691 691 date, message)
692 692 if cset:
693 693 firstcset = cset
694 694 if branched:
695 695 break
696 696 finally:
697 697 stream.close()
698 698
699 699 if firstcset and not firstcset.parents:
700 700 # The first revision of the sequence (the last fetched one)
701 701 # has invalid parents if not a branch root. Find the parent
702 702 # revision now, if any.
703 703 try:
704 704 firstrevnum = self.revnum(firstcset.rev)
705 705 if firstrevnum > 1:
706 706 latest = self.latest(self.module, firstrevnum - 1)
707 707 firstcset.parents.append(self.revid(latest))
708 708 except util.Abort:
709 709 pass
710 710 except SubversionException, (inst, num):
711 711 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
712 712 raise NoSuchRevision(branch=self,
713 713 revision="Revision number %d" % to_revnum)
714 714 raise
715 715
716 716 def _getfile(self, file, rev):
717 717 io = StringIO()
718 718 # TODO: ra.get_file transmits the whole file instead of diffs.
719 719 mode = ''
720 720 try:
721 721 new_module, revnum = self.revsplit(rev)[1:]
722 722 if self.module != new_module:
723 723 self.module = new_module
724 724 self.reparent(self.module)
725 725 info = svn.ra.get_file(self.ra, file, revnum, io)
726 726 if isinstance(info, list):
727 727 info = info[-1]
728 728 mode = ("svn:executable" in info) and 'x' or ''
729 729 mode = ("svn:special" in info) and 'l' or mode
730 730 except SubversionException, e:
731 731 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
732 732 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
733 733 if e.apr_err in notfound: # File not found
734 734 raise IOError()
735 735 raise
736 736 data = io.getvalue()
737 737 if mode == 'l':
738 738 link_prefix = "link "
739 739 if data.startswith(link_prefix):
740 740 data = data[len(link_prefix):]
741 741 return data, mode
742 742
743 743 def _find_children(self, path, revnum):
744 744 path = path.strip('/')
745 745 pool = Pool()
746 746 rpath = '/'.join([self.base, path]).strip('/')
747 747 return ['%s/%s' % (path, x) for x in svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
748 748
749 749 pre_revprop_change = '''#!/bin/sh
750 750
751 751 REPOS="$1"
752 752 REV="$2"
753 753 USER="$3"
754 754 PROPNAME="$4"
755 755 ACTION="$5"
756 756
757 757 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
758 758 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
759 759 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
760 760
761 761 echo "Changing prohibited revision property" >&2
762 762 exit 1
763 763 '''
764 764
765 765 class svn_sink(converter_sink, commandline):
766 766 commit_re = re.compile(r'Committed revision (\d+).', re.M)
767 767
768 768 def prerun(self):
769 769 if self.wc:
770 770 os.chdir(self.wc)
771 771
772 772 def postrun(self):
773 773 if self.wc:
774 774 os.chdir(self.cwd)
775 775
776 776 def join(self, name):
777 777 return os.path.join(self.wc, '.svn', name)
778 778
779 779 def revmapfile(self):
780 780 return self.join('hg-shamap')
781 781
782 782 def authorfile(self):
783 783 return self.join('hg-authormap')
784 784
785 785 def __init__(self, ui, path):
786 786 converter_sink.__init__(self, ui, path)
787 787 commandline.__init__(self, ui, 'svn')
788 788 self.delete = []
789 789 self.setexec = []
790 790 self.delexec = []
791 791 self.copies = []
792 792 self.wc = None
793 793 self.cwd = os.getcwd()
794 794
795 795 path = os.path.realpath(path)
796 796
797 797 created = False
798 798 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
799 799 self.wc = path
800 800 self.run0('update')
801 801 else:
802 802 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
803 803
804 804 if os.path.isdir(os.path.dirname(path)):
805 805 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
806 806 ui.status(_('initializing svn repo %r\n') %
807 807 os.path.basename(path))
808 808 commandline(ui, 'svnadmin').run0('create', path)
809 809 created = path
810 810 path = util.normpath(path)
811 811 if not path.startswith('/'):
812 812 path = '/' + path
813 813 path = 'file://' + path
814 814
815 815 ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
816 816 self.run0('checkout', path, wcpath)
817 817
818 818 self.wc = wcpath
819 819 self.opener = util.opener(self.wc)
820 820 self.wopener = util.opener(self.wc)
821 821 self.childmap = mapfile(ui, self.join('hg-childmap'))
822 822 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
823 823
824 824 if created:
825 825 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
826 826 fp = open(hook, 'w')
827 827 fp.write(pre_revprop_change)
828 828 fp.close()
829 829 util.set_flags(hook, "x")
830 830
831 831 xport = transport.SvnRaTransport(url=geturl(path))
832 832 self.uuid = svn.ra.get_uuid(xport.ra)
833 833
834 834 def wjoin(self, *names):
835 835 return os.path.join(self.wc, *names)
836 836
837 837 def putfile(self, filename, flags, data):
838 838 if 'l' in flags:
839 839 self.wopener.symlink(data, filename)
840 840 else:
841 841 try:
842 842 if os.path.islink(self.wjoin(filename)):
843 843 os.unlink(filename)
844 844 except OSError:
845 845 pass
846 846 self.wopener(filename, 'w').write(data)
847 847
848 848 if self.is_exec:
849 849 was_exec = self.is_exec(self.wjoin(filename))
850 850 else:
851 851 # On filesystems not supporting execute-bit, there is no way
852 852 # to know if it is set but asking subversion. Setting it
853 853 # systematically is just as expensive and much simpler.
854 854 was_exec = 'x' not in flags
855 855
856 856 util.set_flags(self.wjoin(filename), flags)
857 857 if was_exec:
858 858 if 'x' not in flags:
859 859 self.delexec.append(filename)
860 860 else:
861 861 if 'x' in flags:
862 862 self.setexec.append(filename)
863 863
864 864 def delfile(self, name):
865 865 self.delete.append(name)
866 866
867 867 def copyfile(self, source, dest):
868 868 self.copies.append([source, dest])
869 869
870 870 def _copyfile(self, source, dest):
871 871 # SVN's copy command pukes if the destination file exists, but
872 872 # our copyfile method expects to record a copy that has
873 873 # already occurred. Cross the semantic gap.
874 874 wdest = self.wjoin(dest)
875 875 exists = os.path.exists(wdest)
876 876 if exists:
877 877 fd, tempname = tempfile.mkstemp(
878 878 prefix='hg-copy-', dir=os.path.dirname(wdest))
879 879 os.close(fd)
880 880 os.unlink(tempname)
881 881 os.rename(wdest, tempname)
882 882 try:
883 883 self.run0('copy', source, dest)
884 884 finally:
885 885 if exists:
886 886 try:
887 887 os.unlink(wdest)
888 888 except OSError:
889 889 pass
890 890 os.rename(tempname, wdest)
891 891
892 892 def dirs_of(self, files):
893 893 dirs = set()
894 894 for f in files:
895 895 if os.path.isdir(self.wjoin(f)):
896 896 dirs.add(f)
897 897 for i in strutil.rfindall(f, '/'):
898 898 dirs.add(f[:i])
899 899 return dirs
900 900
901 901 def add_dirs(self, files):
902 902 add_dirs = [d for d in self.dirs_of(files)
903 903 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
904 904 if add_dirs:
905 905 add_dirs.sort()
906 906 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
907 907 return add_dirs
908 908
909 909 def add_files(self, files):
910 910 if files:
911 911 self.xargs(files, 'add', quiet=True)
912 912 return files
913 913
914 914 def tidy_dirs(self, names):
915 915 dirs = list(self.dirs_of(names))
916 916 dirs.sort(reverse=True)
917 917 deleted = []
918 918 for d in dirs:
919 919 wd = self.wjoin(d)
920 920 if os.listdir(wd) == '.svn':
921 921 self.run0('delete', d)
922 922 deleted.append(d)
923 923 return deleted
924 924
925 925 def addchild(self, parent, child):
926 926 self.childmap[parent] = child
927 927
928 928 def revid(self, rev):
929 929 return u"svn:%s@%s" % (self.uuid, rev)
930 930
931 931 def putcommit(self, files, parents, commit):
932 932 for parent in parents:
933 933 try:
934 934 return self.revid(self.childmap[parent])
935 935 except KeyError:
936 936 pass
937 937 entries = set(self.delete)
938 938 files = util.frozenset(files)
939 939 entries.update(self.add_dirs(files.difference(entries)))
940 940 if self.copies:
941 941 for s, d in self.copies:
942 942 self._copyfile(s, d)
943 943 self.copies = []
944 944 if self.delete:
945 945 self.xargs(self.delete, 'delete')
946 946 self.delete = []
947 947 entries.update(self.add_files(files.difference(entries)))
948 948 entries.update(self.tidy_dirs(entries))
949 949 if self.delexec:
950 950 self.xargs(self.delexec, 'propdel', 'svn:executable')
951 951 self.delexec = []
952 952 if self.setexec:
953 953 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
954 954 self.setexec = []
955 955
956 956 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
957 957 fp = os.fdopen(fd, 'w')
958 958 fp.write(commit.desc)
959 959 fp.close()
960 960 try:
961 961 output = self.run0('commit',
962 962 username=util.shortuser(commit.author),
963 963 file=messagefile,
964 964 encoding='utf-8')
965 965 try:
966 966 rev = self.commit_re.search(output).group(1)
967 967 except AttributeError:
968 968 self.ui.warn(_('unexpected svn output:\n'))
969 969 self.ui.warn(output)
970 970 raise util.Abort(_('unable to cope with svn output'))
971 971 if commit.rev:
972 972 self.run('propset', 'hg:convert-rev', commit.rev,
973 973 revprop=True, revision=rev)
974 974 if commit.branch and commit.branch != 'default':
975 975 self.run('propset', 'hg:convert-branch', commit.branch,
976 976 revprop=True, revision=rev)
977 977 for parent in parents:
978 978 self.addchild(parent, rev)
979 979 return self.revid(rev)
980 980 finally:
981 981 os.unlink(messagefile)
982 982
983 983 def puttags(self, tags):
984 984 self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
@@ -1,2059 +1,2061 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71 self.sopener = util.encodedopener(util.opener(self.spath),
72 72 self.encodefn)
73 73
74 74 self.ui = ui.ui(parentui=parentui)
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.filterpats = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError, name
102 102
103 103 def url(self):
104 104 return 'file:' + self.root
105 105
106 106 def hook(self, name, throw=False, **args):
107 107 return hook.hook(self.ui, self, name, throw, **args)
108 108
109 109 tag_disallowed = ':\r\n'
110 110
111 111 def _tag(self, name, node, message, local, user, date, parent=None,
112 112 extra={}):
113 113 use_dirstate = parent is None
114 114
115 115 for c in self.tag_disallowed:
116 116 if c in name:
117 117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 118
119 119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 120
121 121 def writetag(fp, name, munge, prevtags):
122 122 if prevtags and prevtags[-1] != '\n':
123 123 fp.write('\n')
124 124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 125 fp.close()
126 126
127 127 prevtags = ''
128 128 if local:
129 129 try:
130 130 fp = self.opener('localtags', 'r+')
131 131 except IOError, err:
132 132 fp = self.opener('localtags', 'a')
133 133 else:
134 134 prevtags = fp.read()
135 135
136 136 # local tags are stored in the current charset
137 137 writetag(fp, name, None, prevtags)
138 138 self.hook('tag', node=hex(node), tag=name, local=local)
139 139 return
140 140
141 141 if use_dirstate:
142 142 try:
143 143 fp = self.wfile('.hgtags', 'rb+')
144 144 except IOError, err:
145 145 fp = self.wfile('.hgtags', 'ab')
146 146 else:
147 147 prevtags = fp.read()
148 148 else:
149 149 try:
150 150 prevtags = self.filectx('.hgtags', parent).data()
151 151 except revlog.LookupError:
152 152 pass
153 153 fp = self.wfile('.hgtags', 'wb')
154 154 if prevtags:
155 155 fp.write(prevtags)
156 156
157 157 # committed tags are stored in UTF-8
158 158 writetag(fp, name, util.fromlocal, prevtags)
159 159
160 160 if use_dirstate and '.hgtags' not in self.dirstate:
161 161 self.add(['.hgtags'])
162 162
163 163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 164 extra=extra)
165 165
166 166 self.hook('tag', node=hex(node), tag=name, local=local)
167 167
168 168 return tagnode
169 169
170 170 def tag(self, name, node, message, local, user, date):
171 171 '''tag a revision with a symbolic name.
172 172
173 173 if local is True, the tag is stored in a per-repository file.
174 174 otherwise, it is stored in the .hgtags file, and a new
175 175 changeset is committed with the change.
176 176
177 177 keyword arguments:
178 178
179 179 local: whether to store tag in non-version-controlled file
180 180 (default False)
181 181
182 182 message: commit message to use if committing
183 183
184 184 user: name of user to use if committing
185 185
186 186 date: date tuple to use if committing'''
187 187
188 188 for x in self.status()[:5]:
189 189 if '.hgtags' in x:
190 190 raise util.Abort(_('working copy of .hgtags is changed '
191 191 '(please commit .hgtags manually)'))
192 192
193 193
194 194 self._tag(name, node, message, local, user, date)
195 195
196 196 def tags(self):
197 197 '''return a mapping of tag to node'''
198 198 if self.tagscache:
199 199 return self.tagscache
200 200
201 201 globaltags = {}
202 202 tagtypes = {}
203 203
204 204 def readtags(lines, fn, tagtype):
205 205 filetags = {}
206 206 count = 0
207 207
208 208 def warn(msg):
209 209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
210 210
211 211 for l in lines:
212 212 count += 1
213 213 if not l:
214 214 continue
215 215 s = l.split(" ", 1)
216 216 if len(s) != 2:
217 217 warn(_("cannot parse entry"))
218 218 continue
219 219 node, key = s
220 220 key = util.tolocal(key.strip()) # stored in UTF-8
221 221 try:
222 222 bin_n = bin(node)
223 223 except TypeError:
224 224 warn(_("node '%s' is not well formed") % node)
225 225 continue
226 226 if bin_n not in self.changelog.nodemap:
227 227 warn(_("tag '%s' refers to unknown node") % key)
228 228 continue
229 229
230 230 h = []
231 231 if key in filetags:
232 232 n, h = filetags[key]
233 233 h.append(n)
234 234 filetags[key] = (bin_n, h)
235 235
236 236 for k, nh in filetags.items():
237 237 if k not in globaltags:
238 238 globaltags[k] = nh
239 239 tagtypes[k] = tagtype
240 240 continue
241 241
242 242 # we prefer the global tag if:
243 243 # it supercedes us OR
244 244 # mutual supercedes and it has a higher rank
245 245 # otherwise we win because we're tip-most
246 246 an, ah = nh
247 247 bn, bh = globaltags[k]
248 248 if (bn != an and an in bh and
249 249 (bn not in ah or len(bh) > len(ah))):
250 250 an = bn
251 251 ah.extend([n for n in bh if n not in ah])
252 252 globaltags[k] = an, ah
253 253 tagtypes[k] = tagtype
254 254
255 255 # read the tags file from each head, ending with the tip
256 256 f = None
257 257 for rev, node, fnode in self._hgtagsnodes():
258 258 f = (f and f.filectx(fnode) or
259 259 self.filectx('.hgtags', fileid=fnode))
260 260 readtags(f.data().splitlines(), f, "global")
261 261
262 262 try:
263 263 data = util.fromlocal(self.opener("localtags").read())
264 264 # localtags are stored in the local character set
265 265 # while the internal tag table is stored in UTF-8
266 266 readtags(data.splitlines(), "localtags", "local")
267 267 except IOError:
268 268 pass
269 269
270 270 self.tagscache = {}
271 271 self._tagstypecache = {}
272 272 for k,nh in globaltags.items():
273 273 n = nh[0]
274 274 if n != nullid:
275 275 self.tagscache[k] = n
276 276 self._tagstypecache[k] = tagtypes[k]
277 277 self.tagscache['tip'] = self.changelog.tip()
278 278
279 279 return self.tagscache
280 280
281 281 def tagtype(self, tagname):
282 282 '''
283 283 return the type of the given tag. result can be:
284 284
285 285 'local' : a local tag
286 286 'global' : a global tag
287 287 None : tag does not exist
288 288 '''
289 289
290 290 self.tags()
291 291
292 292 return self._tagstypecache.get(tagname)
293 293
294 294 def _hgtagsnodes(self):
295 295 heads = self.heads()
296 296 heads.reverse()
297 297 last = {}
298 298 ret = []
299 299 for node in heads:
300 300 c = self.changectx(node)
301 301 rev = c.rev()
302 302 try:
303 303 fnode = c.filenode('.hgtags')
304 304 except revlog.LookupError:
305 305 continue
306 306 ret.append((rev, node, fnode))
307 307 if fnode in last:
308 308 ret[last[fnode]] = None
309 309 last[fnode] = len(ret) - 1
310 310 return [item for item in ret if item]
311 311
312 312 def tagslist(self):
313 313 '''return a list of tags ordered by revision'''
314 314 l = []
315 315 for t, n in self.tags().items():
316 316 try:
317 317 r = self.changelog.rev(n)
318 318 except:
319 319 r = -2 # sort to the beginning of the list if unknown
320 320 l.append((r, t, n))
321 321 l.sort()
322 322 return [(t, n) for r, t, n in l]
323 323
324 324 def nodetags(self, node):
325 325 '''return the tags associated with a node'''
326 326 if not self.nodetagscache:
327 327 self.nodetagscache = {}
328 328 for t, n in self.tags().items():
329 329 self.nodetagscache.setdefault(n, []).append(t)
330 330 return self.nodetagscache.get(node, [])
331 331
332 332 def _branchtags(self):
333 333 partial, last, lrev = self._readbranchcache()
334 334
335 335 tiprev = self.changelog.count() - 1
336 336 if lrev != tiprev:
337 337 self._updatebranchcache(partial, lrev+1, tiprev+1)
338 338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
339 339
340 340 return partial
341 341
342 342 def branchtags(self):
343 343 if self.branchcache is not None:
344 344 return self.branchcache
345 345
346 346 self.branchcache = {} # avoid recursion in changectx
347 347 partial = self._branchtags()
348 348
349 349 # the branch cache is stored on disk as UTF-8, but in the local
350 350 # charset internally
351 351 for k, v in partial.items():
352 352 self.branchcache[util.tolocal(k)] = v
353 353 return self.branchcache
354 354
355 355 def _readbranchcache(self):
356 356 partial = {}
357 357 try:
358 358 f = self.opener("branch.cache")
359 359 lines = f.read().split('\n')
360 360 f.close()
361 361 except (IOError, OSError):
362 362 return {}, nullid, nullrev
363 363
364 364 try:
365 365 last, lrev = lines.pop(0).split(" ", 1)
366 366 last, lrev = bin(last), int(lrev)
367 367 if not (lrev < self.changelog.count() and
368 368 self.changelog.node(lrev) == last): # sanity check
369 369 # invalidate the cache
370 370 raise ValueError('Invalid branch cache: unknown tip')
371 371 for l in lines:
372 372 if not l: continue
373 373 node, label = l.split(" ", 1)
374 374 partial[label.strip()] = bin(node)
375 375 except (KeyboardInterrupt, util.SignalInterrupt):
376 376 raise
377 377 except Exception, inst:
378 378 if self.ui.debugflag:
379 379 self.ui.warn(str(inst), '\n')
380 380 partial, last, lrev = {}, nullid, nullrev
381 381 return partial, last, lrev
382 382
383 383 def _writebranchcache(self, branches, tip, tiprev):
384 384 try:
385 385 f = self.opener("branch.cache", "w", atomictemp=True)
386 386 f.write("%s %s\n" % (hex(tip), tiprev))
387 387 for label, node in branches.iteritems():
388 388 f.write("%s %s\n" % (hex(node), label))
389 389 f.rename()
390 390 except (IOError, OSError):
391 391 pass
392 392
393 393 def _updatebranchcache(self, partial, start, end):
394 394 for r in xrange(start, end):
395 395 c = self.changectx(r)
396 396 b = c.branch()
397 397 partial[b] = c.node()
398 398
399 399 def lookup(self, key):
400 400 if key == '.':
401 401 key, second = self.dirstate.parents()
402 402 if key == nullid:
403 403 raise repo.RepoError(_("no revision checked out"))
404 404 if second != nullid:
405 405 self.ui.warn(_("warning: working directory has two parents, "
406 406 "tag '.' uses the first\n"))
407 407 elif key == 'null':
408 408 return nullid
409 409 n = self.changelog._match(key)
410 410 if n:
411 411 return n
412 412 if key in self.tags():
413 413 return self.tags()[key]
414 414 if key in self.branchtags():
415 415 return self.branchtags()[key]
416 416 n = self.changelog._partialmatch(key)
417 417 if n:
418 418 return n
419 419 try:
420 420 if len(key) == 20:
421 421 key = hex(key)
422 422 except:
423 423 pass
424 424 raise repo.RepoError(_("unknown revision '%s'") % key)
425 425
426 426 def dev(self):
427 427 return os.lstat(self.path).st_dev
428 428
429 429 def local(self):
430 430 return True
431 431
432 432 def join(self, f):
433 433 return os.path.join(self.path, f)
434 434
435 435 def sjoin(self, f):
436 436 f = self.encodefn(f)
437 437 return os.path.join(self.spath, f)
438 438
439 439 def wjoin(self, f):
440 440 return os.path.join(self.root, f)
441 441
442 442 def file(self, f):
443 443 if f[0] == '/':
444 444 f = f[1:]
445 445 return filelog.filelog(self.sopener, f)
446 446
447 447 def changectx(self, changeid=None):
448 448 return context.changectx(self, changeid)
449 449
450 450 def workingctx(self):
451 451 return context.workingctx(self)
452 452
453 453 def parents(self, changeid=None):
454 454 '''
455 455 get list of changectxs for parents of changeid or working directory
456 456 '''
457 457 if changeid is None:
458 458 pl = self.dirstate.parents()
459 459 else:
460 460 n = self.changelog.lookup(changeid)
461 461 pl = self.changelog.parents(n)
462 462 if pl[1] == nullid:
463 463 return [self.changectx(pl[0])]
464 464 return [self.changectx(pl[0]), self.changectx(pl[1])]
465 465
466 466 def filectx(self, path, changeid=None, fileid=None):
467 467 """changeid can be a changeset revision, node, or tag.
468 468 fileid can be a file revision or node."""
469 469 return context.filectx(self, path, changeid, fileid)
470 470
471 471 def getcwd(self):
472 472 return self.dirstate.getcwd()
473 473
474 474 def pathto(self, f, cwd=None):
475 475 return self.dirstate.pathto(f, cwd)
476 476
477 477 def wfile(self, f, mode='r'):
478 478 return self.wopener(f, mode)
479 479
480 480 def _link(self, f):
481 481 return os.path.islink(self.wjoin(f))
482 482
483 483 def _filter(self, filter, filename, data):
484 484 if filter not in self.filterpats:
485 485 l = []
486 486 for pat, cmd in self.ui.configitems(filter):
487 487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 488 l.append((mf, cmd))
489 489 self.filterpats[filter] = l
490 490
491 491 for mf, cmd in self.filterpats[filter]:
492 492 if mf(filename):
493 493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 494 data = util.filter(data, cmd)
495 495 break
496 496
497 497 return data
498 498
499 499 def wread(self, filename):
500 500 if self._link(filename):
501 501 data = os.readlink(self.wjoin(filename))
502 502 else:
503 503 data = self.wopener(filename, 'r').read()
504 504 return self._filter("encode", filename, data)
505 505
506 506 def wwrite(self, filename, data, flags):
507 507 data = self._filter("decode", filename, data)
508 508 try:
509 509 os.unlink(self.wjoin(filename))
510 510 except OSError:
511 511 pass
512 512 self.wopener(filename, 'w').write(data)
513 513 util.set_flags(self.wjoin(filename), flags)
514 514
515 515 def wwritedata(self, filename, data):
516 516 return self._filter("decode", filename, data)
517 517
518 518 def transaction(self):
519 519 if self._transref and self._transref():
520 520 return self._transref().nest()
521 521
522 522 # abort here if the journal already exists
523 523 if os.path.exists(self.sjoin("journal")):
524 524 raise repo.RepoError(_("journal already exists - run hg recover"))
525 525
526 526 # save dirstate for rollback
527 527 try:
528 528 ds = self.opener("dirstate").read()
529 529 except IOError:
530 530 ds = ""
531 531 self.opener("journal.dirstate", "w").write(ds)
532 532 self.opener("journal.branch", "w").write(self.dirstate.branch())
533 533
534 534 renames = [(self.sjoin("journal"), self.sjoin("undo")),
535 535 (self.join("journal.dirstate"), self.join("undo.dirstate")),
536 536 (self.join("journal.branch"), self.join("undo.branch"))]
537 537 tr = transaction.transaction(self.ui.warn, self.sopener,
538 538 self.sjoin("journal"),
539 539 aftertrans(renames))
540 540 self._transref = weakref.ref(tr)
541 541 return tr
542 542
543 543 def recover(self):
544 544 l = self.lock()
545 545 try:
546 546 if os.path.exists(self.sjoin("journal")):
547 547 self.ui.status(_("rolling back interrupted transaction\n"))
548 548 transaction.rollback(self.sopener, self.sjoin("journal"))
549 549 self.invalidate()
550 550 return True
551 551 else:
552 552 self.ui.warn(_("no interrupted transaction available\n"))
553 553 return False
554 554 finally:
555 555 del l
556 556
557 557 def rollback(self):
558 558 wlock = lock = None
559 559 try:
560 560 wlock = self.wlock()
561 561 lock = self.lock()
562 562 if os.path.exists(self.sjoin("undo")):
563 563 self.ui.status(_("rolling back last transaction\n"))
564 564 transaction.rollback(self.sopener, self.sjoin("undo"))
565 565 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
566 566 branch = self.opener("undo.branch").read()
567 567 self.dirstate.setbranch(branch)
568 568 self.invalidate()
569 569 self.dirstate.invalidate()
570 570 else:
571 571 self.ui.warn(_("no rollback information available\n"))
572 572 finally:
573 573 del lock, wlock
574 574
575 575 def invalidate(self):
576 576 for a in "changelog manifest".split():
577 577 if hasattr(self, a):
578 578 self.__delattr__(a)
579 579 self.tagscache = None
580 580 self._tagstypecache = None
581 581 self.nodetagscache = None
582 582
583 583 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
584 584 try:
585 585 l = lock.lock(lockname, 0, releasefn, desc=desc)
586 586 except lock.LockHeld, inst:
587 587 if not wait:
588 588 raise
589 589 self.ui.warn(_("waiting for lock on %s held by %r\n") %
590 590 (desc, inst.locker))
591 591 # default to 600 seconds timeout
592 592 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
593 593 releasefn, desc=desc)
594 594 if acquirefn:
595 595 acquirefn()
596 596 return l
597 597
598 598 def lock(self, wait=True):
599 599 if self._lockref and self._lockref():
600 600 return self._lockref()
601 601
602 602 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
603 603 _('repository %s') % self.origroot)
604 604 self._lockref = weakref.ref(l)
605 605 return l
606 606
607 607 def wlock(self, wait=True):
608 608 if self._wlockref and self._wlockref():
609 609 return self._wlockref()
610 610
611 611 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
612 612 self.dirstate.invalidate, _('working directory of %s') %
613 613 self.origroot)
614 614 self._wlockref = weakref.ref(l)
615 615 return l
616 616
617 617 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
618 618 """
619 619 commit an individual file as part of a larger transaction
620 620 """
621 621
622 622 t = self.wread(fn)
623 623 fl = self.file(fn)
624 624 fp1 = manifest1.get(fn, nullid)
625 625 fp2 = manifest2.get(fn, nullid)
626 626
627 627 meta = {}
628 628 cp = self.dirstate.copied(fn)
629 629 if cp:
630 630 # Mark the new revision of this file as a copy of another
631 631 # file. This copy data will effectively act as a parent
632 632 # of this new revision. If this is a merge, the first
633 633 # parent will be the nullid (meaning "look up the copy data")
634 634 # and the second one will be the other parent. For example:
635 635 #
636 636 # 0 --- 1 --- 3 rev1 changes file foo
637 637 # \ / rev2 renames foo to bar and changes it
638 638 # \- 2 -/ rev3 should have bar with all changes and
639 639 # should record that bar descends from
640 640 # bar in rev2 and foo in rev1
641 641 #
642 642 # this allows this merge to succeed:
643 643 #
644 644 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
645 645 # \ / merging rev3 and rev4 should use bar@rev2
646 646 # \- 2 --- 4 as the merge base
647 647 #
648 648 meta["copy"] = cp
649 649 if not manifest2: # not a branch merge
650 650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
651 651 fp2 = nullid
652 652 elif fp2 != nullid: # copied on remote side
653 653 meta["copyrev"] = hex(manifest1.get(cp, nullid))
654 654 elif fp1 != nullid: # copied on local side, reversed
655 655 meta["copyrev"] = hex(manifest2.get(cp))
656 656 fp2 = fp1
657 657 elif cp in manifest2: # directory rename on local side
658 658 meta["copyrev"] = hex(manifest2[cp])
659 659 else: # directory rename on remote side
660 660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
661 661 self.ui.debug(_(" %s: copy %s:%s\n") %
662 662 (fn, cp, meta["copyrev"]))
663 663 fp1 = nullid
664 664 elif fp2 != nullid:
665 665 # is one parent an ancestor of the other?
666 666 fpa = fl.ancestor(fp1, fp2)
667 667 if fpa == fp1:
668 668 fp1, fp2 = fp2, nullid
669 669 elif fpa == fp2:
670 670 fp2 = nullid
671 671
672 672 # is the file unmodified from the parent? report existing entry
673 673 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
674 674 return fp1
675 675
676 676 changelist.append(fn)
677 677 return fl.add(t, meta, tr, linkrev, fp1, fp2)
678 678
679 679 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
680 680 if p1 is None:
681 681 p1, p2 = self.dirstate.parents()
682 682 return self.commit(files=files, text=text, user=user, date=date,
683 683 p1=p1, p2=p2, extra=extra, empty_ok=True)
684 684
685 685 def commit(self, files=None, text="", user=None, date=None,
686 686 match=util.always, force=False, force_editor=False,
687 687 p1=None, p2=None, extra={}, empty_ok=False):
688 688 wlock = lock = tr = None
689 689 valid = 0 # don't save the dirstate if this isn't set
690 if files:
691 files = util.unique(files)
690 692 try:
691 693 commit = []
692 694 remove = []
693 695 changed = []
694 696 use_dirstate = (p1 is None) # not rawcommit
695 697 extra = extra.copy()
696 698
697 699 if use_dirstate:
698 700 if files:
699 701 for f in files:
700 702 s = self.dirstate[f]
701 703 if s in 'nma':
702 704 commit.append(f)
703 705 elif s == 'r':
704 706 remove.append(f)
705 707 else:
706 708 self.ui.warn(_("%s not tracked!\n") % f)
707 709 else:
708 710 changes = self.status(match=match)[:5]
709 711 modified, added, removed, deleted, unknown = changes
710 712 commit = modified + added
711 713 remove = removed
712 714 else:
713 715 commit = files
714 716
715 717 if use_dirstate:
716 718 p1, p2 = self.dirstate.parents()
717 719 update_dirstate = True
718 720 else:
719 721 p1, p2 = p1, p2 or nullid
720 722 update_dirstate = (self.dirstate.parents()[0] == p1)
721 723
722 724 c1 = self.changelog.read(p1)
723 725 c2 = self.changelog.read(p2)
724 726 m1 = self.manifest.read(c1[0]).copy()
725 727 m2 = self.manifest.read(c2[0])
726 728
727 729 if use_dirstate:
728 730 branchname = self.workingctx().branch()
729 731 try:
730 732 branchname = branchname.decode('UTF-8').encode('UTF-8')
731 733 except UnicodeDecodeError:
732 734 raise util.Abort(_('branch name not in UTF-8!'))
733 735 else:
734 736 branchname = ""
735 737
736 738 if use_dirstate:
737 739 oldname = c1[5].get("branch") # stored in UTF-8
738 740 if (not commit and not remove and not force and p2 == nullid
739 741 and branchname == oldname):
740 742 self.ui.status(_("nothing changed\n"))
741 743 return None
742 744
743 745 xp1 = hex(p1)
744 746 if p2 == nullid: xp2 = ''
745 747 else: xp2 = hex(p2)
746 748
747 749 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
748 750
749 751 wlock = self.wlock()
750 752 lock = self.lock()
751 753 tr = self.transaction()
752 754 trp = weakref.proxy(tr)
753 755
754 756 # check in files
755 757 new = {}
756 758 linkrev = self.changelog.count()
757 759 commit.sort()
758 760 is_exec = util.execfunc(self.root, m1.execf)
759 761 is_link = util.linkfunc(self.root, m1.linkf)
760 762 for f in commit:
761 763 self.ui.note(f + "\n")
762 764 try:
763 765 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
764 766 new_exec = is_exec(f)
765 767 new_link = is_link(f)
766 768 if ((not changed or changed[-1] != f) and
767 769 m2.get(f) != new[f]):
768 770 # mention the file in the changelog if some
769 771 # flag changed, even if there was no content
770 772 # change.
771 773 old_exec = m1.execf(f)
772 774 old_link = m1.linkf(f)
773 775 if old_exec != new_exec or old_link != new_link:
774 776 changed.append(f)
775 777 m1.set(f, new_exec, new_link)
776 778 if use_dirstate:
777 779 self.dirstate.normal(f)
778 780
779 781 except (OSError, IOError):
780 782 if use_dirstate:
781 783 self.ui.warn(_("trouble committing %s!\n") % f)
782 784 raise
783 785 else:
784 786 remove.append(f)
785 787
786 788 # update manifest
787 789 m1.update(new)
788 790 remove.sort()
789 791 removed = []
790 792
791 793 for f in remove:
792 794 if f in m1:
793 795 del m1[f]
794 796 removed.append(f)
795 797 elif f in m2:
796 798 removed.append(f)
797 799 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
798 800 (new, removed))
799 801
800 802 # add changeset
801 803 new = new.keys()
802 804 new.sort()
803 805
804 806 user = user or self.ui.username()
805 807 if (not empty_ok and not text) or force_editor:
806 808 edittext = []
807 809 if text:
808 810 edittext.append(text)
809 811 edittext.append("")
810 812 edittext.append(_("HG: Enter commit message."
811 813 " Lines beginning with 'HG:' are removed."))
812 814 edittext.append("HG: --")
813 815 edittext.append("HG: user: %s" % user)
814 816 if p2 != nullid:
815 817 edittext.append("HG: branch merge")
816 818 if branchname:
817 819 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
818 820 edittext.extend(["HG: changed %s" % f for f in changed])
819 821 edittext.extend(["HG: removed %s" % f for f in removed])
820 822 if not changed and not remove:
821 823 edittext.append("HG: no files changed")
822 824 edittext.append("")
823 825 # run editor in the repository root
824 826 olddir = os.getcwd()
825 827 os.chdir(self.root)
826 828 text = self.ui.edit("\n".join(edittext), user)
827 829 os.chdir(olddir)
828 830
829 831 if branchname:
830 832 extra["branch"] = branchname
831 833
832 834 if use_dirstate:
833 835 lines = [line.rstrip() for line in text.rstrip().splitlines()]
834 836 while lines and not lines[0]:
835 837 del lines[0]
836 838 if not lines:
837 839 raise util.Abort(_("empty commit message"))
838 840 text = '\n'.join(lines)
839 841
840 842 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
841 843 user, date, extra)
842 844 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
843 845 parent2=xp2)
844 846 tr.close()
845 847
846 848 if self.branchcache and "branch" in extra:
847 849 self.branchcache[util.tolocal(extra["branch"])] = n
848 850
849 851 if use_dirstate or update_dirstate:
850 852 self.dirstate.setparents(n)
851 853 if use_dirstate:
852 854 for f in removed:
853 855 self.dirstate.forget(f)
854 856 valid = 1 # our dirstate updates are complete
855 857
856 858 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
857 859 return n
858 860 finally:
859 861 if not valid: # don't save our updated dirstate
860 862 self.dirstate.invalidate()
861 863 del tr, lock, wlock
862 864
863 865 def walk(self, node=None, files=[], match=util.always, badmatch=None):
864 866 '''
865 867 walk recursively through the directory tree or a given
866 868 changeset, finding all files matched by the match
867 869 function
868 870
869 871 results are yielded in a tuple (src, filename), where src
870 872 is one of:
871 873 'f' the file was found in the directory tree
872 874 'm' the file was only in the dirstate and not in the tree
873 875 'b' file was not found and matched badmatch
874 876 '''
875 877
876 878 if node:
877 879 fdict = dict.fromkeys(files)
878 880 # for dirstate.walk, files=['.'] means "walk the whole tree".
879 881 # follow that here, too
880 882 fdict.pop('.', None)
881 883 mdict = self.manifest.read(self.changelog.read(node)[0])
882 884 mfiles = mdict.keys()
883 885 mfiles.sort()
884 886 for fn in mfiles:
885 887 for ffn in fdict:
886 888 # match if the file is the exact name or a directory
887 889 if ffn == fn or fn.startswith("%s/" % ffn):
888 890 del fdict[ffn]
889 891 break
890 892 if match(fn):
891 893 yield 'm', fn
892 894 ffiles = fdict.keys()
893 895 ffiles.sort()
894 896 for fn in ffiles:
895 897 if badmatch and badmatch(fn):
896 898 if match(fn):
897 899 yield 'b', fn
898 900 else:
899 901 self.ui.warn(_('%s: No such file in rev %s\n')
900 902 % (self.pathto(fn), short(node)))
901 903 else:
902 904 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
903 905 yield src, fn
904 906
905 907 def status(self, node1=None, node2=None, files=[], match=util.always,
906 908 list_ignored=False, list_clean=False):
907 909 """return status of files between two nodes or node and working directory
908 910
909 911 If node1 is None, use the first dirstate parent instead.
910 912 If node2 is None, compare node1 with working directory.
911 913 """
912 914
913 915 def fcmp(fn, getnode):
914 916 t1 = self.wread(fn)
915 917 return self.file(fn).cmp(getnode(fn), t1)
916 918
917 919 def mfmatches(node):
918 920 change = self.changelog.read(node)
919 921 mf = self.manifest.read(change[0]).copy()
920 922 for fn in mf.keys():
921 923 if not match(fn):
922 924 del mf[fn]
923 925 return mf
924 926
925 927 modified, added, removed, deleted, unknown = [], [], [], [], []
926 928 ignored, clean = [], []
927 929
928 930 compareworking = False
929 931 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
930 932 compareworking = True
931 933
932 934 if not compareworking:
933 935 # read the manifest from node1 before the manifest from node2,
934 936 # so that we'll hit the manifest cache if we're going through
935 937 # all the revisions in parent->child order.
936 938 mf1 = mfmatches(node1)
937 939
938 940 # are we comparing the working directory?
939 941 if not node2:
940 942 (lookup, modified, added, removed, deleted, unknown,
941 943 ignored, clean) = self.dirstate.status(files, match,
942 944 list_ignored, list_clean)
943 945
944 946 # are we comparing working dir against its parent?
945 947 if compareworking:
946 948 if lookup:
947 949 fixup = []
948 950 # do a full compare of any files that might have changed
949 951 ctx = self.changectx()
950 952 for f in lookup:
951 953 if f not in ctx or ctx[f].cmp(self.wread(f)):
952 954 modified.append(f)
953 955 else:
954 956 fixup.append(f)
955 957 if list_clean:
956 958 clean.append(f)
957 959
958 960 # update dirstate for files that are actually clean
959 961 if fixup:
960 962 wlock = None
961 963 try:
962 964 try:
963 965 wlock = self.wlock(False)
964 966 except lock.LockException:
965 967 pass
966 968 if wlock:
967 969 for f in fixup:
968 970 self.dirstate.normal(f)
969 971 finally:
970 972 del wlock
971 973 else:
972 974 # we are comparing working dir against non-parent
973 975 # generate a pseudo-manifest for the working dir
974 976 # XXX: create it in dirstate.py ?
975 977 mf2 = mfmatches(self.dirstate.parents()[0])
976 978 is_exec = util.execfunc(self.root, mf2.execf)
977 979 is_link = util.linkfunc(self.root, mf2.linkf)
978 980 for f in lookup + modified + added:
979 981 mf2[f] = ""
980 982 mf2.set(f, is_exec(f), is_link(f))
981 983 for f in removed:
982 984 if f in mf2:
983 985 del mf2[f]
984 986
985 987 else:
986 988 # we are comparing two revisions
987 989 mf2 = mfmatches(node2)
988 990
989 991 if not compareworking:
990 992 # flush lists from dirstate before comparing manifests
991 993 modified, added, clean = [], [], []
992 994
993 995 # make sure to sort the files so we talk to the disk in a
994 996 # reasonable order
995 997 mf2keys = mf2.keys()
996 998 mf2keys.sort()
997 999 getnode = lambda fn: mf1.get(fn, nullid)
998 1000 for fn in mf2keys:
999 1001 if mf1.has_key(fn):
1000 1002 if (mf1.flags(fn) != mf2.flags(fn) or
1001 1003 (mf1[fn] != mf2[fn] and
1002 1004 (mf2[fn] != "" or fcmp(fn, getnode)))):
1003 1005 modified.append(fn)
1004 1006 elif list_clean:
1005 1007 clean.append(fn)
1006 1008 del mf1[fn]
1007 1009 else:
1008 1010 added.append(fn)
1009 1011
1010 1012 removed = mf1.keys()
1011 1013
1012 1014 # sort and return results:
1013 1015 for l in modified, added, removed, deleted, unknown, ignored, clean:
1014 1016 l.sort()
1015 1017 return (modified, added, removed, deleted, unknown, ignored, clean)
1016 1018
1017 1019 def add(self, list):
1018 1020 wlock = self.wlock()
1019 1021 try:
1020 1022 rejected = []
1021 1023 for f in list:
1022 1024 p = self.wjoin(f)
1023 1025 try:
1024 1026 st = os.lstat(p)
1025 1027 except:
1026 1028 self.ui.warn(_("%s does not exist!\n") % f)
1027 1029 rejected.append(f)
1028 1030 continue
1029 1031 if st.st_size > 10000000:
1030 1032 self.ui.warn(_("%s: files over 10MB may cause memory and"
1031 1033 " performance problems\n"
1032 1034 "(use 'hg revert %s' to unadd the file)\n")
1033 1035 % (f, f))
1034 1036 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1035 1037 self.ui.warn(_("%s not added: only files and symlinks "
1036 1038 "supported currently\n") % f)
1037 1039 rejected.append(p)
1038 1040 elif self.dirstate[f] in 'amn':
1039 1041 self.ui.warn(_("%s already tracked!\n") % f)
1040 1042 elif self.dirstate[f] == 'r':
1041 1043 self.dirstate.normallookup(f)
1042 1044 else:
1043 1045 self.dirstate.add(f)
1044 1046 return rejected
1045 1047 finally:
1046 1048 del wlock
1047 1049
1048 1050 def forget(self, list):
1049 1051 wlock = self.wlock()
1050 1052 try:
1051 1053 for f in list:
1052 1054 if self.dirstate[f] != 'a':
1053 1055 self.ui.warn(_("%s not added!\n") % f)
1054 1056 else:
1055 1057 self.dirstate.forget(f)
1056 1058 finally:
1057 1059 del wlock
1058 1060
1059 1061 def remove(self, list, unlink=False):
1060 1062 wlock = None
1061 1063 try:
1062 1064 if unlink:
1063 1065 for f in list:
1064 1066 try:
1065 1067 util.unlink(self.wjoin(f))
1066 1068 except OSError, inst:
1067 1069 if inst.errno != errno.ENOENT:
1068 1070 raise
1069 1071 wlock = self.wlock()
1070 1072 for f in list:
1071 1073 if unlink and os.path.exists(self.wjoin(f)):
1072 1074 self.ui.warn(_("%s still exists!\n") % f)
1073 1075 elif self.dirstate[f] == 'a':
1074 1076 self.dirstate.forget(f)
1075 1077 elif f not in self.dirstate:
1076 1078 self.ui.warn(_("%s not tracked!\n") % f)
1077 1079 else:
1078 1080 self.dirstate.remove(f)
1079 1081 finally:
1080 1082 del wlock
1081 1083
1082 1084 def undelete(self, list):
1083 1085 wlock = None
1084 1086 try:
1085 1087 manifests = [self.manifest.read(self.changelog.read(p)[0])
1086 1088 for p in self.dirstate.parents() if p != nullid]
1087 1089 wlock = self.wlock()
1088 1090 for f in list:
1089 1091 if self.dirstate[f] != 'r':
1090 1092 self.ui.warn("%s not removed!\n" % f)
1091 1093 else:
1092 1094 m = f in manifests[0] and manifests[0] or manifests[1]
1093 1095 t = self.file(f).read(m[f])
1094 1096 self.wwrite(f, t, m.flags(f))
1095 1097 self.dirstate.normal(f)
1096 1098 finally:
1097 1099 del wlock
1098 1100
1099 1101 def copy(self, source, dest):
1100 1102 wlock = None
1101 1103 try:
1102 1104 p = self.wjoin(dest)
1103 1105 if not (os.path.exists(p) or os.path.islink(p)):
1104 1106 self.ui.warn(_("%s does not exist!\n") % dest)
1105 1107 elif not (os.path.isfile(p) or os.path.islink(p)):
1106 1108 self.ui.warn(_("copy failed: %s is not a file or a "
1107 1109 "symbolic link\n") % dest)
1108 1110 else:
1109 1111 wlock = self.wlock()
1110 1112 if dest not in self.dirstate:
1111 1113 self.dirstate.add(dest)
1112 1114 self.dirstate.copy(source, dest)
1113 1115 finally:
1114 1116 del wlock
1115 1117
1116 1118 def heads(self, start=None):
1117 1119 heads = self.changelog.heads(start)
1118 1120 # sort the output in rev descending order
1119 1121 heads = [(-self.changelog.rev(h), h) for h in heads]
1120 1122 heads.sort()
1121 1123 return [n for (r, n) in heads]
1122 1124
1123 1125 def branchheads(self, branch, start=None):
1124 1126 branches = self.branchtags()
1125 1127 if branch not in branches:
1126 1128 return []
1127 1129 # The basic algorithm is this:
1128 1130 #
1129 1131 # Start from the branch tip since there are no later revisions that can
1130 1132 # possibly be in this branch, and the tip is a guaranteed head.
1131 1133 #
1132 1134 # Remember the tip's parents as the first ancestors, since these by
1133 1135 # definition are not heads.
1134 1136 #
1135 1137 # Step backwards from the brach tip through all the revisions. We are
1136 1138 # guaranteed by the rules of Mercurial that we will now be visiting the
1137 1139 # nodes in reverse topological order (children before parents).
1138 1140 #
1139 1141 # If a revision is one of the ancestors of a head then we can toss it
1140 1142 # out of the ancestors set (we've already found it and won't be
1141 1143 # visiting it again) and put its parents in the ancestors set.
1142 1144 #
1143 1145 # Otherwise, if a revision is in the branch it's another head, since it
1144 1146 # wasn't in the ancestor list of an existing head. So add it to the
1145 1147 # head list, and add its parents to the ancestor list.
1146 1148 #
1147 1149 # If it is not in the branch ignore it.
1148 1150 #
1149 1151 # Once we have a list of heads, use nodesbetween to filter out all the
1150 1152 # heads that cannot be reached from startrev. There may be a more
1151 1153 # efficient way to do this as part of the previous algorithm.
1152 1154
1153 1155 set = util.set
1154 1156 heads = [self.changelog.rev(branches[branch])]
1155 1157 # Don't care if ancestors contains nullrev or not.
1156 1158 ancestors = set(self.changelog.parentrevs(heads[0]))
1157 1159 for rev in xrange(heads[0] - 1, nullrev, -1):
1158 1160 if rev in ancestors:
1159 1161 ancestors.update(self.changelog.parentrevs(rev))
1160 1162 ancestors.remove(rev)
1161 1163 elif self.changectx(rev).branch() == branch:
1162 1164 heads.append(rev)
1163 1165 ancestors.update(self.changelog.parentrevs(rev))
1164 1166 heads = [self.changelog.node(rev) for rev in heads]
1165 1167 if start is not None:
1166 1168 heads = self.changelog.nodesbetween([start], heads)[2]
1167 1169 return heads
1168 1170
1169 1171 def branches(self, nodes):
1170 1172 if not nodes:
1171 1173 nodes = [self.changelog.tip()]
1172 1174 b = []
1173 1175 for n in nodes:
1174 1176 t = n
1175 1177 while 1:
1176 1178 p = self.changelog.parents(n)
1177 1179 if p[1] != nullid or p[0] == nullid:
1178 1180 b.append((t, n, p[0], p[1]))
1179 1181 break
1180 1182 n = p[0]
1181 1183 return b
1182 1184
1183 1185 def between(self, pairs):
1184 1186 r = []
1185 1187
1186 1188 for top, bottom in pairs:
1187 1189 n, l, i = top, [], 0
1188 1190 f = 1
1189 1191
1190 1192 while n != bottom:
1191 1193 p = self.changelog.parents(n)[0]
1192 1194 if i == f:
1193 1195 l.append(n)
1194 1196 f = f * 2
1195 1197 n = p
1196 1198 i += 1
1197 1199
1198 1200 r.append(l)
1199 1201
1200 1202 return r
1201 1203
1202 1204 def findincoming(self, remote, base=None, heads=None, force=False):
1203 1205 """Return list of roots of the subsets of missing nodes from remote
1204 1206
1205 1207 If base dict is specified, assume that these nodes and their parents
1206 1208 exist on the remote side and that no child of a node of base exists
1207 1209 in both remote and self.
1208 1210 Furthermore base will be updated to include the nodes that exists
1209 1211 in self and remote but no children exists in self and remote.
1210 1212 If a list of heads is specified, return only nodes which are heads
1211 1213 or ancestors of these heads.
1212 1214
1213 1215 All the ancestors of base are in self and in remote.
1214 1216 All the descendants of the list returned are missing in self.
1215 1217 (and so we know that the rest of the nodes are missing in remote, see
1216 1218 outgoing)
1217 1219 """
1218 1220 m = self.changelog.nodemap
1219 1221 search = []
1220 1222 fetch = {}
1221 1223 seen = {}
1222 1224 seenbranch = {}
1223 1225 if base == None:
1224 1226 base = {}
1225 1227
1226 1228 if not heads:
1227 1229 heads = remote.heads()
1228 1230
1229 1231 if self.changelog.tip() == nullid:
1230 1232 base[nullid] = 1
1231 1233 if heads != [nullid]:
1232 1234 return [nullid]
1233 1235 return []
1234 1236
1235 1237 # assume we're closer to the tip than the root
1236 1238 # and start by examining the heads
1237 1239 self.ui.status(_("searching for changes\n"))
1238 1240
1239 1241 unknown = []
1240 1242 for h in heads:
1241 1243 if h not in m:
1242 1244 unknown.append(h)
1243 1245 else:
1244 1246 base[h] = 1
1245 1247
1246 1248 if not unknown:
1247 1249 return []
1248 1250
1249 1251 req = dict.fromkeys(unknown)
1250 1252 reqcnt = 0
1251 1253
1252 1254 # search through remote branches
1253 1255 # a 'branch' here is a linear segment of history, with four parts:
1254 1256 # head, root, first parent, second parent
1255 1257 # (a branch always has two parents (or none) by definition)
1256 1258 unknown = remote.branches(unknown)
1257 1259 while unknown:
1258 1260 r = []
1259 1261 while unknown:
1260 1262 n = unknown.pop(0)
1261 1263 if n[0] in seen:
1262 1264 continue
1263 1265
1264 1266 self.ui.debug(_("examining %s:%s\n")
1265 1267 % (short(n[0]), short(n[1])))
1266 1268 if n[0] == nullid: # found the end of the branch
1267 1269 pass
1268 1270 elif n in seenbranch:
1269 1271 self.ui.debug(_("branch already found\n"))
1270 1272 continue
1271 1273 elif n[1] and n[1] in m: # do we know the base?
1272 1274 self.ui.debug(_("found incomplete branch %s:%s\n")
1273 1275 % (short(n[0]), short(n[1])))
1274 1276 search.append(n) # schedule branch range for scanning
1275 1277 seenbranch[n] = 1
1276 1278 else:
1277 1279 if n[1] not in seen and n[1] not in fetch:
1278 1280 if n[2] in m and n[3] in m:
1279 1281 self.ui.debug(_("found new changeset %s\n") %
1280 1282 short(n[1]))
1281 1283 fetch[n[1]] = 1 # earliest unknown
1282 1284 for p in n[2:4]:
1283 1285 if p in m:
1284 1286 base[p] = 1 # latest known
1285 1287
1286 1288 for p in n[2:4]:
1287 1289 if p not in req and p not in m:
1288 1290 r.append(p)
1289 1291 req[p] = 1
1290 1292 seen[n[0]] = 1
1291 1293
1292 1294 if r:
1293 1295 reqcnt += 1
1294 1296 self.ui.debug(_("request %d: %s\n") %
1295 1297 (reqcnt, " ".join(map(short, r))))
1296 1298 for p in xrange(0, len(r), 10):
1297 1299 for b in remote.branches(r[p:p+10]):
1298 1300 self.ui.debug(_("received %s:%s\n") %
1299 1301 (short(b[0]), short(b[1])))
1300 1302 unknown.append(b)
1301 1303
1302 1304 # do binary search on the branches we found
1303 1305 while search:
1304 1306 n = search.pop(0)
1305 1307 reqcnt += 1
1306 1308 l = remote.between([(n[0], n[1])])[0]
1307 1309 l.append(n[1])
1308 1310 p = n[0]
1309 1311 f = 1
1310 1312 for i in l:
1311 1313 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1312 1314 if i in m:
1313 1315 if f <= 2:
1314 1316 self.ui.debug(_("found new branch changeset %s\n") %
1315 1317 short(p))
1316 1318 fetch[p] = 1
1317 1319 base[i] = 1
1318 1320 else:
1319 1321 self.ui.debug(_("narrowed branch search to %s:%s\n")
1320 1322 % (short(p), short(i)))
1321 1323 search.append((p, i))
1322 1324 break
1323 1325 p, f = i, f * 2
1324 1326
1325 1327 # sanity check our fetch list
1326 1328 for f in fetch.keys():
1327 1329 if f in m:
1328 1330 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1329 1331
1330 1332 if base.keys() == [nullid]:
1331 1333 if force:
1332 1334 self.ui.warn(_("warning: repository is unrelated\n"))
1333 1335 else:
1334 1336 raise util.Abort(_("repository is unrelated"))
1335 1337
1336 1338 self.ui.debug(_("found new changesets starting at ") +
1337 1339 " ".join([short(f) for f in fetch]) + "\n")
1338 1340
1339 1341 self.ui.debug(_("%d total queries\n") % reqcnt)
1340 1342
1341 1343 return fetch.keys()
1342 1344
1343 1345 def findoutgoing(self, remote, base=None, heads=None, force=False):
1344 1346 """Return list of nodes that are roots of subsets not in remote
1345 1347
1346 1348 If base dict is specified, assume that these nodes and their parents
1347 1349 exist on the remote side.
1348 1350 If a list of heads is specified, return only nodes which are heads
1349 1351 or ancestors of these heads, and return a second element which
1350 1352 contains all remote heads which get new children.
1351 1353 """
1352 1354 if base == None:
1353 1355 base = {}
1354 1356 self.findincoming(remote, base, heads, force=force)
1355 1357
1356 1358 self.ui.debug(_("common changesets up to ")
1357 1359 + " ".join(map(short, base.keys())) + "\n")
1358 1360
1359 1361 remain = dict.fromkeys(self.changelog.nodemap)
1360 1362
1361 1363 # prune everything remote has from the tree
1362 1364 del remain[nullid]
1363 1365 remove = base.keys()
1364 1366 while remove:
1365 1367 n = remove.pop(0)
1366 1368 if n in remain:
1367 1369 del remain[n]
1368 1370 for p in self.changelog.parents(n):
1369 1371 remove.append(p)
1370 1372
1371 1373 # find every node whose parents have been pruned
1372 1374 subset = []
1373 1375 # find every remote head that will get new children
1374 1376 updated_heads = {}
1375 1377 for n in remain:
1376 1378 p1, p2 = self.changelog.parents(n)
1377 1379 if p1 not in remain and p2 not in remain:
1378 1380 subset.append(n)
1379 1381 if heads:
1380 1382 if p1 in heads:
1381 1383 updated_heads[p1] = True
1382 1384 if p2 in heads:
1383 1385 updated_heads[p2] = True
1384 1386
1385 1387 # this is the set of all roots we have to push
1386 1388 if heads:
1387 1389 return subset, updated_heads.keys()
1388 1390 else:
1389 1391 return subset
1390 1392
1391 1393 def pull(self, remote, heads=None, force=False):
1392 1394 lock = self.lock()
1393 1395 try:
1394 1396 fetch = self.findincoming(remote, heads=heads, force=force)
1395 1397 if fetch == [nullid]:
1396 1398 self.ui.status(_("requesting all changes\n"))
1397 1399
1398 1400 if not fetch:
1399 1401 self.ui.status(_("no changes found\n"))
1400 1402 return 0
1401 1403
1402 1404 if heads is None:
1403 1405 cg = remote.changegroup(fetch, 'pull')
1404 1406 else:
1405 1407 if 'changegroupsubset' not in remote.capabilities:
1406 1408 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1407 1409 cg = remote.changegroupsubset(fetch, heads, 'pull')
1408 1410 return self.addchangegroup(cg, 'pull', remote.url())
1409 1411 finally:
1410 1412 del lock
1411 1413
1412 1414 def push(self, remote, force=False, revs=None):
1413 1415 # there are two ways to push to remote repo:
1414 1416 #
1415 1417 # addchangegroup assumes local user can lock remote
1416 1418 # repo (local filesystem, old ssh servers).
1417 1419 #
1418 1420 # unbundle assumes local user cannot lock remote repo (new ssh
1419 1421 # servers, http servers).
1420 1422
1421 1423 if remote.capable('unbundle'):
1422 1424 return self.push_unbundle(remote, force, revs)
1423 1425 return self.push_addchangegroup(remote, force, revs)
1424 1426
1425 1427 def prepush(self, remote, force, revs):
1426 1428 base = {}
1427 1429 remote_heads = remote.heads()
1428 1430 inc = self.findincoming(remote, base, remote_heads, force=force)
1429 1431
1430 1432 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1431 1433 if revs is not None:
1432 1434 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1433 1435 else:
1434 1436 bases, heads = update, self.changelog.heads()
1435 1437
1436 1438 if not bases:
1437 1439 self.ui.status(_("no changes found\n"))
1438 1440 return None, 1
1439 1441 elif not force:
1440 1442 # check if we're creating new remote heads
1441 1443 # to be a remote head after push, node must be either
1442 1444 # - unknown locally
1443 1445 # - a local outgoing head descended from update
1444 1446 # - a remote head that's known locally and not
1445 1447 # ancestral to an outgoing head
1446 1448
1447 1449 warn = 0
1448 1450
1449 1451 if remote_heads == [nullid]:
1450 1452 warn = 0
1451 1453 elif not revs and len(heads) > len(remote_heads):
1452 1454 warn = 1
1453 1455 else:
1454 1456 newheads = list(heads)
1455 1457 for r in remote_heads:
1456 1458 if r in self.changelog.nodemap:
1457 1459 desc = self.changelog.heads(r, heads)
1458 1460 l = [h for h in heads if h in desc]
1459 1461 if not l:
1460 1462 newheads.append(r)
1461 1463 else:
1462 1464 newheads.append(r)
1463 1465 if len(newheads) > len(remote_heads):
1464 1466 warn = 1
1465 1467
1466 1468 if warn:
1467 1469 self.ui.warn(_("abort: push creates new remote branches!\n"))
1468 1470 self.ui.status(_("(did you forget to merge?"
1469 1471 " use push -f to force)\n"))
1470 1472 return None, 1
1471 1473 elif inc:
1472 1474 self.ui.warn(_("note: unsynced remote changes!\n"))
1473 1475
1474 1476
1475 1477 if revs is None:
1476 1478 cg = self.changegroup(update, 'push')
1477 1479 else:
1478 1480 cg = self.changegroupsubset(update, revs, 'push')
1479 1481 return cg, remote_heads
1480 1482
1481 1483 def push_addchangegroup(self, remote, force, revs):
1482 1484 lock = remote.lock()
1483 1485 try:
1484 1486 ret = self.prepush(remote, force, revs)
1485 1487 if ret[0] is not None:
1486 1488 cg, remote_heads = ret
1487 1489 return remote.addchangegroup(cg, 'push', self.url())
1488 1490 return ret[1]
1489 1491 finally:
1490 1492 del lock
1491 1493
1492 1494 def push_unbundle(self, remote, force, revs):
1493 1495 # local repo finds heads on server, finds out what revs it
1494 1496 # must push. once revs transferred, if server finds it has
1495 1497 # different heads (someone else won commit/push race), server
1496 1498 # aborts.
1497 1499
1498 1500 ret = self.prepush(remote, force, revs)
1499 1501 if ret[0] is not None:
1500 1502 cg, remote_heads = ret
1501 1503 if force: remote_heads = ['force']
1502 1504 return remote.unbundle(cg, remote_heads, 'push')
1503 1505 return ret[1]
1504 1506
1505 1507 def changegroupinfo(self, nodes, source):
1506 1508 if self.ui.verbose or source == 'bundle':
1507 1509 self.ui.status(_("%d changesets found\n") % len(nodes))
1508 1510 if self.ui.debugflag:
1509 1511 self.ui.debug(_("List of changesets:\n"))
1510 1512 for node in nodes:
1511 1513 self.ui.debug("%s\n" % hex(node))
1512 1514
1513 1515 def changegroupsubset(self, bases, heads, source, extranodes=None):
1514 1516 """This function generates a changegroup consisting of all the nodes
1515 1517 that are descendents of any of the bases, and ancestors of any of
1516 1518 the heads.
1517 1519
1518 1520 It is fairly complex as determining which filenodes and which
1519 1521 manifest nodes need to be included for the changeset to be complete
1520 1522 is non-trivial.
1521 1523
1522 1524 Another wrinkle is doing the reverse, figuring out which changeset in
1523 1525 the changegroup a particular filenode or manifestnode belongs to.
1524 1526
1525 1527 The caller can specify some nodes that must be included in the
1526 1528 changegroup using the extranodes argument. It should be a dict
1527 1529 where the keys are the filenames (or 1 for the manifest), and the
1528 1530 values are lists of (node, linknode) tuples, where node is a wanted
1529 1531 node and linknode is the changelog node that should be transmitted as
1530 1532 the linkrev.
1531 1533 """
1532 1534
1533 1535 self.hook('preoutgoing', throw=True, source=source)
1534 1536
1535 1537 # Set up some initial variables
1536 1538 # Make it easy to refer to self.changelog
1537 1539 cl = self.changelog
1538 1540 # msng is short for missing - compute the list of changesets in this
1539 1541 # changegroup.
1540 1542 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1541 1543 self.changegroupinfo(msng_cl_lst, source)
1542 1544 # Some bases may turn out to be superfluous, and some heads may be
1543 1545 # too. nodesbetween will return the minimal set of bases and heads
1544 1546 # necessary to re-create the changegroup.
1545 1547
1546 1548 # Known heads are the list of heads that it is assumed the recipient
1547 1549 # of this changegroup will know about.
1548 1550 knownheads = {}
1549 1551 # We assume that all parents of bases are known heads.
1550 1552 for n in bases:
1551 1553 for p in cl.parents(n):
1552 1554 if p != nullid:
1553 1555 knownheads[p] = 1
1554 1556 knownheads = knownheads.keys()
1555 1557 if knownheads:
1556 1558 # Now that we know what heads are known, we can compute which
1557 1559 # changesets are known. The recipient must know about all
1558 1560 # changesets required to reach the known heads from the null
1559 1561 # changeset.
1560 1562 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1561 1563 junk = None
1562 1564 # Transform the list into an ersatz set.
1563 1565 has_cl_set = dict.fromkeys(has_cl_set)
1564 1566 else:
1565 1567 # If there were no known heads, the recipient cannot be assumed to
1566 1568 # know about any changesets.
1567 1569 has_cl_set = {}
1568 1570
1569 1571 # Make it easy to refer to self.manifest
1570 1572 mnfst = self.manifest
1571 1573 # We don't know which manifests are missing yet
1572 1574 msng_mnfst_set = {}
1573 1575 # Nor do we know which filenodes are missing.
1574 1576 msng_filenode_set = {}
1575 1577
1576 1578 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1577 1579 junk = None
1578 1580
1579 1581 # A changeset always belongs to itself, so the changenode lookup
1580 1582 # function for a changenode is identity.
1581 1583 def identity(x):
1582 1584 return x
1583 1585
1584 1586 # A function generating function. Sets up an environment for the
1585 1587 # inner function.
1586 1588 def cmp_by_rev_func(revlog):
1587 1589 # Compare two nodes by their revision number in the environment's
1588 1590 # revision history. Since the revision number both represents the
1589 1591 # most efficient order to read the nodes in, and represents a
1590 1592 # topological sorting of the nodes, this function is often useful.
1591 1593 def cmp_by_rev(a, b):
1592 1594 return cmp(revlog.rev(a), revlog.rev(b))
1593 1595 return cmp_by_rev
1594 1596
1595 1597 # If we determine that a particular file or manifest node must be a
1596 1598 # node that the recipient of the changegroup will already have, we can
1597 1599 # also assume the recipient will have all the parents. This function
1598 1600 # prunes them from the set of missing nodes.
1599 1601 def prune_parents(revlog, hasset, msngset):
1600 1602 haslst = hasset.keys()
1601 1603 haslst.sort(cmp_by_rev_func(revlog))
1602 1604 for node in haslst:
1603 1605 parentlst = [p for p in revlog.parents(node) if p != nullid]
1604 1606 while parentlst:
1605 1607 n = parentlst.pop()
1606 1608 if n not in hasset:
1607 1609 hasset[n] = 1
1608 1610 p = [p for p in revlog.parents(n) if p != nullid]
1609 1611 parentlst.extend(p)
1610 1612 for n in hasset:
1611 1613 msngset.pop(n, None)
1612 1614
1613 1615 # This is a function generating function used to set up an environment
1614 1616 # for the inner function to execute in.
1615 1617 def manifest_and_file_collector(changedfileset):
1616 1618 # This is an information gathering function that gathers
1617 1619 # information from each changeset node that goes out as part of
1618 1620 # the changegroup. The information gathered is a list of which
1619 1621 # manifest nodes are potentially required (the recipient may
1620 1622 # already have them) and total list of all files which were
1621 1623 # changed in any changeset in the changegroup.
1622 1624 #
1623 1625 # We also remember the first changenode we saw any manifest
1624 1626 # referenced by so we can later determine which changenode 'owns'
1625 1627 # the manifest.
1626 1628 def collect_manifests_and_files(clnode):
1627 1629 c = cl.read(clnode)
1628 1630 for f in c[3]:
1629 1631 # This is to make sure we only have one instance of each
1630 1632 # filename string for each filename.
1631 1633 changedfileset.setdefault(f, f)
1632 1634 msng_mnfst_set.setdefault(c[0], clnode)
1633 1635 return collect_manifests_and_files
1634 1636
1635 1637 # Figure out which manifest nodes (of the ones we think might be part
1636 1638 # of the changegroup) the recipient must know about and remove them
1637 1639 # from the changegroup.
1638 1640 def prune_manifests():
1639 1641 has_mnfst_set = {}
1640 1642 for n in msng_mnfst_set:
1641 1643 # If a 'missing' manifest thinks it belongs to a changenode
1642 1644 # the recipient is assumed to have, obviously the recipient
1643 1645 # must have that manifest.
1644 1646 linknode = cl.node(mnfst.linkrev(n))
1645 1647 if linknode in has_cl_set:
1646 1648 has_mnfst_set[n] = 1
1647 1649 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1648 1650
1649 1651 # Use the information collected in collect_manifests_and_files to say
1650 1652 # which changenode any manifestnode belongs to.
1651 1653 def lookup_manifest_link(mnfstnode):
1652 1654 return msng_mnfst_set[mnfstnode]
1653 1655
1654 1656 # A function generating function that sets up the initial environment
1655 1657 # the inner function.
1656 1658 def filenode_collector(changedfiles):
1657 1659 next_rev = [0]
1658 1660 # This gathers information from each manifestnode included in the
1659 1661 # changegroup about which filenodes the manifest node references
1660 1662 # so we can include those in the changegroup too.
1661 1663 #
1662 1664 # It also remembers which changenode each filenode belongs to. It
1663 1665 # does this by assuming the a filenode belongs to the changenode
1664 1666 # the first manifest that references it belongs to.
1665 1667 def collect_msng_filenodes(mnfstnode):
1666 1668 r = mnfst.rev(mnfstnode)
1667 1669 if r == next_rev[0]:
1668 1670 # If the last rev we looked at was the one just previous,
1669 1671 # we only need to see a diff.
1670 1672 deltamf = mnfst.readdelta(mnfstnode)
1671 1673 # For each line in the delta
1672 1674 for f, fnode in deltamf.items():
1673 1675 f = changedfiles.get(f, None)
1674 1676 # And if the file is in the list of files we care
1675 1677 # about.
1676 1678 if f is not None:
1677 1679 # Get the changenode this manifest belongs to
1678 1680 clnode = msng_mnfst_set[mnfstnode]
1679 1681 # Create the set of filenodes for the file if
1680 1682 # there isn't one already.
1681 1683 ndset = msng_filenode_set.setdefault(f, {})
1682 1684 # And set the filenode's changelog node to the
1683 1685 # manifest's if it hasn't been set already.
1684 1686 ndset.setdefault(fnode, clnode)
1685 1687 else:
1686 1688 # Otherwise we need a full manifest.
1687 1689 m = mnfst.read(mnfstnode)
1688 1690 # For every file in we care about.
1689 1691 for f in changedfiles:
1690 1692 fnode = m.get(f, None)
1691 1693 # If it's in the manifest
1692 1694 if fnode is not None:
1693 1695 # See comments above.
1694 1696 clnode = msng_mnfst_set[mnfstnode]
1695 1697 ndset = msng_filenode_set.setdefault(f, {})
1696 1698 ndset.setdefault(fnode, clnode)
1697 1699 # Remember the revision we hope to see next.
1698 1700 next_rev[0] = r + 1
1699 1701 return collect_msng_filenodes
1700 1702
1701 1703 # We have a list of filenodes we think we need for a file, lets remove
1702 1704 # all those we now the recipient must have.
1703 1705 def prune_filenodes(f, filerevlog):
1704 1706 msngset = msng_filenode_set[f]
1705 1707 hasset = {}
1706 1708 # If a 'missing' filenode thinks it belongs to a changenode we
1707 1709 # assume the recipient must have, then the recipient must have
1708 1710 # that filenode.
1709 1711 for n in msngset:
1710 1712 clnode = cl.node(filerevlog.linkrev(n))
1711 1713 if clnode in has_cl_set:
1712 1714 hasset[n] = 1
1713 1715 prune_parents(filerevlog, hasset, msngset)
1714 1716
1715 1717 # A function generator function that sets up the a context for the
1716 1718 # inner function.
1717 1719 def lookup_filenode_link_func(fname):
1718 1720 msngset = msng_filenode_set[fname]
1719 1721 # Lookup the changenode the filenode belongs to.
1720 1722 def lookup_filenode_link(fnode):
1721 1723 return msngset[fnode]
1722 1724 return lookup_filenode_link
1723 1725
1724 1726 # Add the nodes that were explicitly requested.
1725 1727 def add_extra_nodes(name, nodes):
1726 1728 if not extranodes or name not in extranodes:
1727 1729 return
1728 1730
1729 1731 for node, linknode in extranodes[name]:
1730 1732 if node not in nodes:
1731 1733 nodes[node] = linknode
1732 1734
1733 1735 # Now that we have all theses utility functions to help out and
1734 1736 # logically divide up the task, generate the group.
1735 1737 def gengroup():
1736 1738 # The set of changed files starts empty.
1737 1739 changedfiles = {}
1738 1740 # Create a changenode group generator that will call our functions
1739 1741 # back to lookup the owning changenode and collect information.
1740 1742 group = cl.group(msng_cl_lst, identity,
1741 1743 manifest_and_file_collector(changedfiles))
1742 1744 for chnk in group:
1743 1745 yield chnk
1744 1746
1745 1747 # The list of manifests has been collected by the generator
1746 1748 # calling our functions back.
1747 1749 prune_manifests()
1748 1750 add_extra_nodes(1, msng_mnfst_set)
1749 1751 msng_mnfst_lst = msng_mnfst_set.keys()
1750 1752 # Sort the manifestnodes by revision number.
1751 1753 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1752 1754 # Create a generator for the manifestnodes that calls our lookup
1753 1755 # and data collection functions back.
1754 1756 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1755 1757 filenode_collector(changedfiles))
1756 1758 for chnk in group:
1757 1759 yield chnk
1758 1760
1759 1761 # These are no longer needed, dereference and toss the memory for
1760 1762 # them.
1761 1763 msng_mnfst_lst = None
1762 1764 msng_mnfst_set.clear()
1763 1765
1764 1766 if extranodes:
1765 1767 for fname in extranodes:
1766 1768 if isinstance(fname, int):
1767 1769 continue
1768 1770 add_extra_nodes(fname,
1769 1771 msng_filenode_set.setdefault(fname, {}))
1770 1772 changedfiles[fname] = 1
1771 1773 changedfiles = changedfiles.keys()
1772 1774 changedfiles.sort()
1773 1775 # Go through all our files in order sorted by name.
1774 1776 for fname in changedfiles:
1775 1777 filerevlog = self.file(fname)
1776 1778 if filerevlog.count() == 0:
1777 1779 raise util.Abort(_("empty or missing revlog for %s") % fname)
1778 1780 # Toss out the filenodes that the recipient isn't really
1779 1781 # missing.
1780 1782 if msng_filenode_set.has_key(fname):
1781 1783 prune_filenodes(fname, filerevlog)
1782 1784 msng_filenode_lst = msng_filenode_set[fname].keys()
1783 1785 else:
1784 1786 msng_filenode_lst = []
1785 1787 # If any filenodes are left, generate the group for them,
1786 1788 # otherwise don't bother.
1787 1789 if len(msng_filenode_lst) > 0:
1788 1790 yield changegroup.chunkheader(len(fname))
1789 1791 yield fname
1790 1792 # Sort the filenodes by their revision #
1791 1793 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1792 1794 # Create a group generator and only pass in a changenode
1793 1795 # lookup function as we need to collect no information
1794 1796 # from filenodes.
1795 1797 group = filerevlog.group(msng_filenode_lst,
1796 1798 lookup_filenode_link_func(fname))
1797 1799 for chnk in group:
1798 1800 yield chnk
1799 1801 if msng_filenode_set.has_key(fname):
1800 1802 # Don't need this anymore, toss it to free memory.
1801 1803 del msng_filenode_set[fname]
1802 1804 # Signal that no more groups are left.
1803 1805 yield changegroup.closechunk()
1804 1806
1805 1807 if msng_cl_lst:
1806 1808 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1807 1809
1808 1810 return util.chunkbuffer(gengroup())
1809 1811
1810 1812 def changegroup(self, basenodes, source):
1811 1813 """Generate a changegroup of all nodes that we have that a recipient
1812 1814 doesn't.
1813 1815
1814 1816 This is much easier than the previous function as we can assume that
1815 1817 the recipient has any changenode we aren't sending them."""
1816 1818
1817 1819 self.hook('preoutgoing', throw=True, source=source)
1818 1820
1819 1821 cl = self.changelog
1820 1822 nodes = cl.nodesbetween(basenodes, None)[0]
1821 1823 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1822 1824 self.changegroupinfo(nodes, source)
1823 1825
1824 1826 def identity(x):
1825 1827 return x
1826 1828
1827 1829 def gennodelst(revlog):
1828 1830 for r in xrange(0, revlog.count()):
1829 1831 n = revlog.node(r)
1830 1832 if revlog.linkrev(n) in revset:
1831 1833 yield n
1832 1834
1833 1835 def changed_file_collector(changedfileset):
1834 1836 def collect_changed_files(clnode):
1835 1837 c = cl.read(clnode)
1836 1838 for fname in c[3]:
1837 1839 changedfileset[fname] = 1
1838 1840 return collect_changed_files
1839 1841
1840 1842 def lookuprevlink_func(revlog):
1841 1843 def lookuprevlink(n):
1842 1844 return cl.node(revlog.linkrev(n))
1843 1845 return lookuprevlink
1844 1846
1845 1847 def gengroup():
1846 1848 # construct a list of all changed files
1847 1849 changedfiles = {}
1848 1850
1849 1851 for chnk in cl.group(nodes, identity,
1850 1852 changed_file_collector(changedfiles)):
1851 1853 yield chnk
1852 1854 changedfiles = changedfiles.keys()
1853 1855 changedfiles.sort()
1854 1856
1855 1857 mnfst = self.manifest
1856 1858 nodeiter = gennodelst(mnfst)
1857 1859 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1858 1860 yield chnk
1859 1861
1860 1862 for fname in changedfiles:
1861 1863 filerevlog = self.file(fname)
1862 1864 if filerevlog.count() == 0:
1863 1865 raise util.Abort(_("empty or missing revlog for %s") % fname)
1864 1866 nodeiter = gennodelst(filerevlog)
1865 1867 nodeiter = list(nodeiter)
1866 1868 if nodeiter:
1867 1869 yield changegroup.chunkheader(len(fname))
1868 1870 yield fname
1869 1871 lookup = lookuprevlink_func(filerevlog)
1870 1872 for chnk in filerevlog.group(nodeiter, lookup):
1871 1873 yield chnk
1872 1874
1873 1875 yield changegroup.closechunk()
1874 1876
1875 1877 if nodes:
1876 1878 self.hook('outgoing', node=hex(nodes[0]), source=source)
1877 1879
1878 1880 return util.chunkbuffer(gengroup())
1879 1881
1880 1882 def addchangegroup(self, source, srctype, url, emptyok=False):
1881 1883 """add changegroup to repo.
1882 1884
1883 1885 return values:
1884 1886 - nothing changed or no source: 0
1885 1887 - more heads than before: 1+added heads (2..n)
1886 1888 - less heads than before: -1-removed heads (-2..-n)
1887 1889 - number of heads stays the same: 1
1888 1890 """
1889 1891 def csmap(x):
1890 1892 self.ui.debug(_("add changeset %s\n") % short(x))
1891 1893 return cl.count()
1892 1894
1893 1895 def revmap(x):
1894 1896 return cl.rev(x)
1895 1897
1896 1898 if not source:
1897 1899 return 0
1898 1900
1899 1901 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1900 1902
1901 1903 changesets = files = revisions = 0
1902 1904
1903 1905 # write changelog data to temp files so concurrent readers will not see
1904 1906 # inconsistent view
1905 1907 cl = self.changelog
1906 1908 cl.delayupdate()
1907 1909 oldheads = len(cl.heads())
1908 1910
1909 1911 tr = self.transaction()
1910 1912 try:
1911 1913 trp = weakref.proxy(tr)
1912 1914 # pull off the changeset group
1913 1915 self.ui.status(_("adding changesets\n"))
1914 1916 cor = cl.count() - 1
1915 1917 chunkiter = changegroup.chunkiter(source)
1916 1918 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1917 1919 raise util.Abort(_("received changelog group is empty"))
1918 1920 cnr = cl.count() - 1
1919 1921 changesets = cnr - cor
1920 1922
1921 1923 # pull off the manifest group
1922 1924 self.ui.status(_("adding manifests\n"))
1923 1925 chunkiter = changegroup.chunkiter(source)
1924 1926 # no need to check for empty manifest group here:
1925 1927 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1926 1928 # no new manifest will be created and the manifest group will
1927 1929 # be empty during the pull
1928 1930 self.manifest.addgroup(chunkiter, revmap, trp)
1929 1931
1930 1932 # process the files
1931 1933 self.ui.status(_("adding file changes\n"))
1932 1934 while 1:
1933 1935 f = changegroup.getchunk(source)
1934 1936 if not f:
1935 1937 break
1936 1938 self.ui.debug(_("adding %s revisions\n") % f)
1937 1939 fl = self.file(f)
1938 1940 o = fl.count()
1939 1941 chunkiter = changegroup.chunkiter(source)
1940 1942 if fl.addgroup(chunkiter, revmap, trp) is None:
1941 1943 raise util.Abort(_("received file revlog group is empty"))
1942 1944 revisions += fl.count() - o
1943 1945 files += 1
1944 1946
1945 1947 # make changelog see real files again
1946 1948 cl.finalize(trp)
1947 1949
1948 1950 newheads = len(self.changelog.heads())
1949 1951 heads = ""
1950 1952 if oldheads and newheads != oldheads:
1951 1953 heads = _(" (%+d heads)") % (newheads - oldheads)
1952 1954
1953 1955 self.ui.status(_("added %d changesets"
1954 1956 " with %d changes to %d files%s\n")
1955 1957 % (changesets, revisions, files, heads))
1956 1958
1957 1959 if changesets > 0:
1958 1960 self.hook('pretxnchangegroup', throw=True,
1959 1961 node=hex(self.changelog.node(cor+1)), source=srctype,
1960 1962 url=url)
1961 1963
1962 1964 tr.close()
1963 1965 finally:
1964 1966 del tr
1965 1967
1966 1968 if changesets > 0:
1967 1969 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1968 1970 source=srctype, url=url)
1969 1971
1970 1972 for i in xrange(cor + 1, cnr + 1):
1971 1973 self.hook("incoming", node=hex(self.changelog.node(i)),
1972 1974 source=srctype, url=url)
1973 1975
1974 1976 # never return 0 here:
1975 1977 if newheads < oldheads:
1976 1978 return newheads - oldheads - 1
1977 1979 else:
1978 1980 return newheads - oldheads + 1
1979 1981
1980 1982
1981 1983 def stream_in(self, remote):
1982 1984 fp = remote.stream_out()
1983 1985 l = fp.readline()
1984 1986 try:
1985 1987 resp = int(l)
1986 1988 except ValueError:
1987 1989 raise util.UnexpectedOutput(
1988 1990 _('Unexpected response from remote server:'), l)
1989 1991 if resp == 1:
1990 1992 raise util.Abort(_('operation forbidden by server'))
1991 1993 elif resp == 2:
1992 1994 raise util.Abort(_('locking the remote repository failed'))
1993 1995 elif resp != 0:
1994 1996 raise util.Abort(_('the server sent an unknown error code'))
1995 1997 self.ui.status(_('streaming all changes\n'))
1996 1998 l = fp.readline()
1997 1999 try:
1998 2000 total_files, total_bytes = map(int, l.split(' ', 1))
1999 2001 except ValueError, TypeError:
2000 2002 raise util.UnexpectedOutput(
2001 2003 _('Unexpected response from remote server:'), l)
2002 2004 self.ui.status(_('%d files to transfer, %s of data\n') %
2003 2005 (total_files, util.bytecount(total_bytes)))
2004 2006 start = time.time()
2005 2007 for i in xrange(total_files):
2006 2008 # XXX doesn't support '\n' or '\r' in filenames
2007 2009 l = fp.readline()
2008 2010 try:
2009 2011 name, size = l.split('\0', 1)
2010 2012 size = int(size)
2011 2013 except ValueError, TypeError:
2012 2014 raise util.UnexpectedOutput(
2013 2015 _('Unexpected response from remote server:'), l)
2014 2016 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2015 2017 ofp = self.sopener(name, 'w')
2016 2018 for chunk in util.filechunkiter(fp, limit=size):
2017 2019 ofp.write(chunk)
2018 2020 ofp.close()
2019 2021 elapsed = time.time() - start
2020 2022 if elapsed <= 0:
2021 2023 elapsed = 0.001
2022 2024 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2023 2025 (util.bytecount(total_bytes), elapsed,
2024 2026 util.bytecount(total_bytes / elapsed)))
2025 2027 self.invalidate()
2026 2028 return len(self.heads()) + 1
2027 2029
2028 2030 def clone(self, remote, heads=[], stream=False):
2029 2031 '''clone remote repository.
2030 2032
2031 2033 keyword arguments:
2032 2034 heads: list of revs to clone (forces use of pull)
2033 2035 stream: use streaming clone if possible'''
2034 2036
2035 2037 # now, all clients that can request uncompressed clones can
2036 2038 # read repo formats supported by all servers that can serve
2037 2039 # them.
2038 2040
2039 2041 # if revlog format changes, client will have to check version
2040 2042 # and format flags on "stream" capability, and use
2041 2043 # uncompressed only if compatible.
2042 2044
2043 2045 if stream and not heads and remote.capable('stream'):
2044 2046 return self.stream_in(remote)
2045 2047 return self.pull(remote, heads)
2046 2048
2047 2049 # used to avoid circular references so destructors work
2048 2050 def aftertrans(files):
2049 2051 renamefiles = [tuple(t) for t in files]
2050 2052 def a():
2051 2053 for src, dest in renamefiles:
2052 2054 util.rename(src, dest)
2053 2055 return a
2054 2056
2055 2057 def instance(ui, path, create):
2056 2058 return localrepository(ui, util.drop_scheme('file', path), create)
2057 2059
2058 2060 def islocal(path):
2059 2061 return True
@@ -1,101 +1,101 b''
1 1 % initial svn import
2 2 Adding projA/trunk
3 3 Adding projA/branches
4 4 Adding projA/tags
5 5
6 6 Committed revision 1.
7 7 % update svn repository
8 8 A A/trunk
9 9 A A/branches
10 10 A A/tags
11 11 Checked out revision 1.
12 12 A trunk/letter.txt
13 13 A trunk/letter2.txt
14 14 A trunk/letter3.txt
15 15 Adding trunk/letter.txt
16 16 Adding trunk/letter2.txt
17 17 Adding trunk/letter3.txt
18 18 Transmitting file data ...
19 19 Committed revision 2.
20 20 % branch to old letters
21 21 A branches/old
22 22 D branches/old/letter3.txt
23 23 Adding branches/old
24 24 Adding branches/old/letter.txt
25 25 Adding branches/old/letter2.txt
26 26 Deleting branches/old/letter3.txt
27 27
28 28 Committed revision 3.
29 29 At revision 3.
30 30 % update trunk
31 31 Sending trunk/letter.txt
32 32 Transmitting file data .
33 33 Committed revision 4.
34 34 % update old branch
35 35 Sending branches/old/letter2.txt
36 36 Transmitting file data .
37 37 Committed revision 5.
38 38 % create a cross-branch revision
39 39 A branches/old/letter3.txt
40 40 D trunk/letter2.txt
41 41 Adding branches/old/letter3.txt
42 42 Deleting trunk/letter2.txt
43 43 Transmitting file data .
44 44 Committed revision 6.
45 45 % update old branch again
46 46 Sending branches/old/letter2.txt
47 47 Transmitting file data .
48 48 Committed revision 7.
49 49 % update trunk again
50 50 Sending trunk/letter.txt
51 51 Transmitting file data .
52 52 Committed revision 8.
53 53 % convert trunk and branches
54 54 initializing destination A-hg repository
55 55 scanning source...
56 56 sorting...
57 57 converting...
58 58 8 init projA
59 59 7 hello
60 60 6 branch trunk, remove letter3
61 61 5 change letter
62 62 4 change letter2
63 63 3 move and update letter3.txt
64 64 2 move and update letter3.txt
65 65 1 change letter2 again
66 66 0 last change to letter
67 67 % branch again from a converted revision
68 68 Checked out revision 1.
69 69 A branches/old2
70 70 Adding branches/old2
71 71
72 72 Committed revision 9.
73 73 % convert again
74 74 scanning source...
75 75 sorting...
76 76 converting...
77 77 0 branch trunk@1 into old2
78 78 o 9 branch trunk@1 into old2 files:
79 79 |
80 80 | o 8 last change to letter files: letter.txt
81 81 | |
82 82 | | o 7 change letter2 again files: letter2.txt
83 83 | | |
84 84 | o | 6 move and update letter3.txt files: letter2.txt
85 85 | | |
86 86 | | o 5 move and update letter3.txt files: letter3.txt
87 87 | | |
88 88 | | o 4 change letter2 files: letter2.txt
89 89 | | |
90 90 | o | 3 change letter files: letter.txt
91 91 | | |
92 +---o 2 branch trunk, remove letter3 files: letter.txt letter.txt letter2.txt letter2.txt
92 +---o 2 branch trunk, remove letter3 files: letter.txt letter2.txt
93 93 | |
94 94 | o 1 hello files: letter.txt letter2.txt letter3.txt
95 95 |/
96 96 o 0 init projA files:
97 97
98 98 old2 9:
99 99 default 8:
100 100 old 7:
101 101 tip
@@ -1,177 +1,180 b''
1 1 #!/bin/sh
2 2
3 3 "$TESTDIR/hghave" svn svn-bindings || exit 80
4 4
5 5 fix_path()
6 6 {
7 7 tr '\\' /
8 8 }
9 9
10 10 echo "[extensions]" >> $HGRCPATH
11 11 echo "convert = " >> $HGRCPATH
12 12
13 13 svnadmin create svn-repo
14 14
15 15 echo % initial svn import
16 16 mkdir t
17 17 cd t
18 18 echo a > a
19 19 cd ..
20 20
21 21 svnpath=`pwd | fix_path`
22 22 # SVN wants all paths to start with a slash. Unfortunately,
23 23 # Windows ones don't. Handle that.
24 24 expr $svnpath : "\/" > /dev/null
25 25 if [ $? -ne 0 ]; then
26 26 svnpath='/'$svnpath
27 27 fi
28 28
29 29 svnurl=file://$svnpath/svn-repo/trunk/test
30 30 svn import -m init t $svnurl | fix_path
31 31
32 32 echo % update svn repository
33 33 svn co $svnurl t2 | fix_path
34 34 cd t2
35 35 echo b >> a
36 36 echo b > b
37 37 svn add b
38 38 svn ci -m changea
39 39 cd ..
40 40
41 41 echo % convert to hg once
42 42 hg convert $svnurl
43 43
44 44 echo % update svn repository again
45 45 cd t2
46 46 echo c >> a
47 47 echo c >> b
48 48 svn ci -m changeb
49 49 cd ..
50 50
51 51 echo % test incremental conversion
52 52 hg convert $svnurl
53 53
54 54 echo % test filemap
55 55 echo 'include b' > filemap
56 56 hg convert --filemap filemap $svnurl fmap
57 57 echo '[extensions]' >> $HGRCPATH
58 58 echo 'hgext.graphlog =' >> $HGRCPATH
59 59 hg glog -R fmap --template '#rev# #desc|firstline# files: #files#\n'
60 60
61 61 echo % test stop revision
62 62 hg convert --rev 1 $svnurl stoprev
63 63 # Check convert_revision extra-records.
64 64 # This is also the only place testing more than one extra field
65 65 # in a revision.
66 66 hg --cwd stoprev tip --debug | grep extra | sed 's/=.*/=/'
67 67
68 68 ########################################
69 69
70 70 echo "# now tests that it works with trunk/branches/tags layout"
71 71 echo
72 72 echo % initial svn import
73 73 mkdir projA
74 74 cd projA
75 75 mkdir trunk
76 76 mkdir branches
77 77 mkdir tags
78 78 cd ..
79 79
80 80 svnurl=file://$svnpath/svn-repo/projA
81 81 svn import -m "init projA" projA $svnurl | fix_path
82 82
83 83
84 84 echo % update svn repository
85 85 svn co $svnurl/trunk A | fix_path
86 86 cd A
87 87 echo hello > letter.txt
88 88 svn add letter.txt
89 89 svn ci -m hello
90 90
91 91 echo world >> letter.txt
92 92 svn ci -m world
93 93
94 94 svn copy -m "tag v0.1" $svnurl/trunk $svnurl/tags/v0.1
95 95
96 96 echo 'nice day today!' >> letter.txt
97 97 svn ci -m "nice day"
98 98 cd ..
99 99
100 100 echo % convert to hg once
101 101 hg convert $svnurl A-hg
102 102
103 103 echo % update svn repository again
104 104 cd A
105 105 echo "see second letter" >> letter.txt
106 echo "nice to meet you" > letter2.txt
107 svn add letter2.txt
106 # Put it in a subdirectory to test duplicate file records
107 # from svn source (issue 714)
108 mkdir todo
109 echo "nice to meet you" > todo/letter2.txt
110 svn add todo
108 111 svn ci -m "second letter"
109 112
110 113 svn copy -m "tag v0.2" $svnurl/trunk $svnurl/tags/v0.2
111 114
112 echo "blah-blah-blah" >> letter2.txt
115 echo "blah-blah-blah" >> todo/letter2.txt
113 116 svn ci -m "work in progress"
114 117 cd ..
115 118
116 119 echo % test incremental conversion
117 120 hg convert $svnurl A-hg
118 121
119 122 cd A-hg
120 123 hg glog --template '#rev# #desc|firstline# files: #files#\n'
121 124 hg tags -q
122 125 cd ..
123 126
124 127 ########################################
125 128
126 129 echo "# now tests that it works with trunk/tags layout, but no branches yet"
127 130 echo
128 131 echo % initial svn import
129 132 mkdir projB
130 133 cd projB
131 134 mkdir trunk
132 135 mkdir tags
133 136 cd ..
134 137
135 138 svnurl=file://$svnpath/svn-repo/projB
136 139 svn import -m "init projB" projB $svnurl | fix_path
137 140
138 141
139 142 echo % update svn repository
140 143 svn co $svnurl/trunk B | fix_path
141 144 cd B
142 145 echo hello > letter.txt
143 146 svn add letter.txt
144 147 svn ci -m hello
145 148
146 149 echo world >> letter.txt
147 150 svn ci -m world
148 151
149 152 svn copy -m "tag v0.1" $svnurl/trunk $svnurl/tags/v0.1
150 153
151 154 echo 'nice day today!' >> letter.txt
152 155 svn ci -m "nice day"
153 156 cd ..
154 157
155 158 echo % convert to hg once
156 159 hg convert $svnurl B-hg
157 160
158 161 echo % update svn repository again
159 162 cd B
160 163 echo "see second letter" >> letter.txt
161 164 echo "nice to meet you" > letter2.txt
162 165 svn add letter2.txt
163 166 svn ci -m "second letter"
164 167
165 168 svn copy -m "tag v0.2" $svnurl/trunk $svnurl/tags/v0.2
166 169
167 170 echo "blah-blah-blah" >> letter2.txt
168 171 svn ci -m "work in progress"
169 172 cd ..
170 173
171 174 echo % test incremental conversion
172 175 hg convert $svnurl B-hg
173 176
174 177 cd B-hg
175 178 hg glog --template '#rev# #desc|firstline# files: #files#\n'
176 179 hg tags -q
177 180 cd ..
@@ -1,188 +1,190 b''
1 1 % initial svn import
2 2 Adding t/a
3 3
4 4 Committed revision 1.
5 5 % update svn repository
6 6 A t2/a
7 7 Checked out revision 1.
8 8 A b
9 9 Sending a
10 10 Adding b
11 11 Transmitting file data ..
12 12 Committed revision 2.
13 13 % convert to hg once
14 14 assuming destination test-hg
15 15 initializing destination test-hg repository
16 16 scanning source...
17 17 sorting...
18 18 converting...
19 19 1 init
20 20 0 changea
21 21 % update svn repository again
22 22 Sending a
23 23 Sending b
24 24 Transmitting file data ..
25 25 Committed revision 3.
26 26 % test incremental conversion
27 27 assuming destination test-hg
28 28 scanning source...
29 29 sorting...
30 30 converting...
31 31 0 changeb
32 32 % test filemap
33 33 initializing destination fmap repository
34 34 scanning source...
35 35 sorting...
36 36 converting...
37 37 2 init
38 38 1 changea
39 39 0 changeb
40 40 o 1 changeb files: b
41 41 |
42 42 o 0 changea files: b
43 43
44 44 % test stop revision
45 45 initializing destination stoprev repository
46 46 scanning source...
47 47 sorting...
48 48 converting...
49 49 0 init
50 50 extra: branch=
51 51 extra: convert_revision=
52 52 # now tests that it works with trunk/branches/tags layout
53 53
54 54 % initial svn import
55 55 Adding projA/trunk
56 56 Adding projA/branches
57 57 Adding projA/tags
58 58
59 59 Committed revision 4.
60 60 % update svn repository
61 61 Checked out revision 4.
62 62 A letter.txt
63 63 Adding letter.txt
64 64 Transmitting file data .
65 65 Committed revision 5.
66 66 Sending letter.txt
67 67 Transmitting file data .
68 68 Committed revision 6.
69 69
70 70 Committed revision 7.
71 71 Sending letter.txt
72 72 Transmitting file data .
73 73 Committed revision 8.
74 74 % convert to hg once
75 75 initializing destination A-hg repository
76 76 scanning source...
77 77 sorting...
78 78 converting...
79 79 3 init projA
80 80 2 hello
81 81 1 world
82 82 0 nice day
83 83 updating tags
84 84 % update svn repository again
85 A letter2.txt
85 A todo
86 A todo/letter2.txt
86 87 Sending letter.txt
87 Adding letter2.txt
88 Adding todo
89 Adding todo/letter2.txt
88 90 Transmitting file data ..
89 91 Committed revision 9.
90 92
91 93 Committed revision 10.
92 Sending letter2.txt
94 Sending todo/letter2.txt
93 95 Transmitting file data .
94 96 Committed revision 11.
95 97 % test incremental conversion
96 98 scanning source...
97 99 sorting...
98 100 converting...
99 101 1 second letter
100 102 0 work in progress
101 103 updating tags
102 104 o 7 update tags files: .hgtags
103 105 |
104 o 6 work in progress files: letter2.txt
106 o 6 work in progress files: todo/letter2.txt
105 107 |
106 o 5 second letter files: letter.txt letter2.txt
108 o 5 second letter files: letter.txt todo/letter2.txt
107 109 |
108 110 o 4 update tags files: .hgtags
109 111 |
110 112 o 3 nice day files: letter.txt
111 113 |
112 114 o 2 world files: letter.txt
113 115 |
114 116 o 1 hello files: letter.txt
115 117 |
116 118 o 0 init projA files:
117 119
118 120 tip
119 121 v0.2
120 122 v0.1
121 123 # now tests that it works with trunk/tags layout, but no branches yet
122 124
123 125 % initial svn import
124 126 Adding projB/trunk
125 127 Adding projB/tags
126 128
127 129 Committed revision 12.
128 130 % update svn repository
129 131 Checked out revision 12.
130 132 A letter.txt
131 133 Adding letter.txt
132 134 Transmitting file data .
133 135 Committed revision 13.
134 136 Sending letter.txt
135 137 Transmitting file data .
136 138 Committed revision 14.
137 139
138 140 Committed revision 15.
139 141 Sending letter.txt
140 142 Transmitting file data .
141 143 Committed revision 16.
142 144 % convert to hg once
143 145 initializing destination B-hg repository
144 146 scanning source...
145 147 sorting...
146 148 converting...
147 149 3 init projB
148 150 2 hello
149 151 1 world
150 152 0 nice day
151 153 updating tags
152 154 % update svn repository again
153 155 A letter2.txt
154 156 Sending letter.txt
155 157 Adding letter2.txt
156 158 Transmitting file data ..
157 159 Committed revision 17.
158 160
159 161 Committed revision 18.
160 162 Sending letter2.txt
161 163 Transmitting file data .
162 164 Committed revision 19.
163 165 % test incremental conversion
164 166 scanning source...
165 167 sorting...
166 168 converting...
167 169 1 second letter
168 170 0 work in progress
169 171 updating tags
170 172 o 7 update tags files: .hgtags
171 173 |
172 174 o 6 work in progress files: letter2.txt
173 175 |
174 176 o 5 second letter files: letter.txt letter2.txt
175 177 |
176 178 o 4 update tags files: .hgtags
177 179 |
178 180 o 3 nice day files: letter.txt
179 181 |
180 182 o 2 world files: letter.txt
181 183 |
182 184 o 1 hello files: letter.txt
183 185 |
184 186 o 0 init projB files:
185 187
186 188 tip
187 189 v0.2
188 190 v0.1
General Comments 0
You need to be logged in to leave comments. Login now