##// END OF EJS Templates
Provide better context for custom Python encode/decode filters....
Jesse Glick -
r5967:f8ad3b76 default
parent child Browse files
Show More
@@ -1,108 +1,107 b''
1 1 # win32text.py - LF <-> CRLF translation utilities for Windows users
2 2 #
3 3 # This software may be used and distributed according to the terms
4 4 # of the GNU General Public License, incorporated herein by reference.
5 5 #
6 6 # To perform automatic newline conversion, use:
7 7 #
8 8 # [extensions]
9 9 # hgext.win32text =
10 10 # [encode]
11 11 # ** = cleverencode:
12 12 # [decode]
13 13 # ** = cleverdecode:
14 14 #
15 15 # If not doing conversion, to make sure you do not commit CRLF by accident:
16 16 #
17 17 # [hooks]
18 18 # pretxncommit.crlf = python:hgext.win32text.forbidcrlf
19 19 #
20 20 # To do the same check on a server to prevent CRLF from being pushed or pulled:
21 21 #
22 22 # [hooks]
23 23 # pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
24 24
25 25 from mercurial import util, ui
26 26 from mercurial.i18n import gettext as _
27 27 from mercurial.node import *
28 28 import re
29 29
30 30 # regexp for single LF without CR preceding.
31 31 re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
32 32
33 def dumbdecode(s, cmd):
33 def dumbdecode(s, cmd, ui=None, repo=None, filename=None, **kwargs):
34 34 # warn if already has CRLF in repository.
35 35 # it might cause unexpected eol conversion.
36 36 # see issue 302:
37 37 # http://www.selenic.com/mercurial/bts/issue302
38 if '\r\n' in s:
39 u = ui.ui()
40 u.warn(_('WARNING: file in repository already has CRLF line ending \n'
41 ' which does not need eol conversion by win32text plugin.\n'
42 ' Please reconsider encode/decode setting in'
43 ' mercurial.ini or .hg/hgrc\n'
44 ' before next commit.\n'))
38 if '\r\n' in s and ui and filename and repo:
39 ui.warn(_('WARNING: %s already has CRLF line endings\n'
40 'and does not need EOL conversion by the win32text plugin.\n'
41 'Before your next commit, please reconsider your '
42 'encode/decode settings in \nMercurial.ini or %s.\n') %
43 (filename, repo.join('hgrc')))
45 44 # replace single LF to CRLF
46 45 return re_single_lf.sub('\\1\r\n', s)
47 46
48 47 def dumbencode(s, cmd):
49 48 return s.replace('\r\n', '\n')
50 49
51 50 def clevertest(s, cmd):
52 51 if '\0' in s: return False
53 52 return True
54 53
55 def cleverdecode(s, cmd):
54 def cleverdecode(s, cmd, **kwargs):
56 55 if clevertest(s, cmd):
57 return dumbdecode(s, cmd)
56 return dumbdecode(s, cmd, **kwargs)
58 57 return s
59 58
60 59 def cleverencode(s, cmd):
61 60 if clevertest(s, cmd):
62 61 return dumbencode(s, cmd)
63 62 return s
64 63
65 64 _filters = {
66 65 'dumbdecode:': dumbdecode,
67 66 'dumbencode:': dumbencode,
68 67 'cleverdecode:': cleverdecode,
69 68 'cleverencode:': cleverencode,
70 69 }
71 70
72 71 def forbidcrlf(ui, repo, hooktype, node, **kwargs):
73 72 halt = False
74 73 for rev in xrange(repo.changelog.rev(bin(node)), repo.changelog.count()):
75 74 c = repo.changectx(rev)
76 75 for f in c.files():
77 76 if f not in c:
78 77 continue
79 78 data = c[f].data()
80 79 if '\0' not in data and '\r\n' in data:
81 80 if not halt:
82 81 ui.warn(_('Attempt to commit or push text file(s) '
83 82 'using CRLF line endings\n'))
84 83 ui.warn(_('in %s: %s\n') % (short(c.node()), f))
85 84 halt = True
86 85 if halt and hooktype == 'pretxnchangegroup':
87 86 ui.warn(_('\nTo prevent this mistake in your local repository,\n'
88 87 'add to Mercurial.ini or .hg/hgrc:\n'
89 88 '\n'
90 89 '[hooks]\n'
91 90 'pretxncommit.crlf = python:hgext.win32text.forbidcrlf\n'
92 91 '\n'
93 92 'and also consider adding:\n'
94 93 '\n'
95 94 '[extensions]\n'
96 95 'hgext.win32text =\n'
97 96 '[encode]\n'
98 97 '** = cleverencode:\n'
99 98 '[decode]\n'
100 99 '** = cleverdecode:\n'))
101 100 return halt
102 101
103 102 def reposetup(ui, repo):
104 103 if not repo.local():
105 104 return
106 105 for name, fn in _filters.iteritems():
107 106 repo.adddatafilter(name, fn)
108 107
@@ -1,2072 +1,2076 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71 self.sopener = util.encodedopener(util.opener(self.spath),
72 72 self.encodefn)
73 73
74 74 self.ui = ui.ui(parentui=parentui)
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.filterpats = {}
86 86 self._datafilters = {}
87 87 self._transref = self._lockref = self._wlockref = None
88 88
89 89 def __getattr__(self, name):
90 90 if name == 'changelog':
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 return self.changelog
94 94 if name == 'manifest':
95 95 self.changelog
96 96 self.manifest = manifest.manifest(self.sopener)
97 97 return self.manifest
98 98 if name == 'dirstate':
99 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 100 return self.dirstate
101 101 else:
102 102 raise AttributeError, name
103 103
104 104 def url(self):
105 105 return 'file:' + self.root
106 106
107 107 def hook(self, name, throw=False, **args):
108 108 return hook.hook(self.ui, self, name, throw, **args)
109 109
110 110 tag_disallowed = ':\r\n'
111 111
112 112 def _tag(self, name, node, message, local, user, date, parent=None,
113 113 extra={}):
114 114 use_dirstate = parent is None
115 115
116 116 for c in self.tag_disallowed:
117 117 if c in name:
118 118 raise util.Abort(_('%r cannot be used in a tag name') % c)
119 119
120 120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
121 121
122 122 def writetag(fp, name, munge, prevtags):
123 123 if prevtags and prevtags[-1] != '\n':
124 124 fp.write('\n')
125 125 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
126 126 fp.close()
127 127
128 128 prevtags = ''
129 129 if local:
130 130 try:
131 131 fp = self.opener('localtags', 'r+')
132 132 except IOError, err:
133 133 fp = self.opener('localtags', 'a')
134 134 else:
135 135 prevtags = fp.read()
136 136
137 137 # local tags are stored in the current charset
138 138 writetag(fp, name, None, prevtags)
139 139 self.hook('tag', node=hex(node), tag=name, local=local)
140 140 return
141 141
142 142 if use_dirstate:
143 143 try:
144 144 fp = self.wfile('.hgtags', 'rb+')
145 145 except IOError, err:
146 146 fp = self.wfile('.hgtags', 'ab')
147 147 else:
148 148 prevtags = fp.read()
149 149 else:
150 150 try:
151 151 prevtags = self.filectx('.hgtags', parent).data()
152 152 except revlog.LookupError:
153 153 pass
154 154 fp = self.wfile('.hgtags', 'wb')
155 155 if prevtags:
156 156 fp.write(prevtags)
157 157
158 158 # committed tags are stored in UTF-8
159 159 writetag(fp, name, util.fromlocal, prevtags)
160 160
161 161 if use_dirstate and '.hgtags' not in self.dirstate:
162 162 self.add(['.hgtags'])
163 163
164 164 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
165 165 extra=extra)
166 166
167 167 self.hook('tag', node=hex(node), tag=name, local=local)
168 168
169 169 return tagnode
170 170
171 171 def tag(self, name, node, message, local, user, date):
172 172 '''tag a revision with a symbolic name.
173 173
174 174 if local is True, the tag is stored in a per-repository file.
175 175 otherwise, it is stored in the .hgtags file, and a new
176 176 changeset is committed with the change.
177 177
178 178 keyword arguments:
179 179
180 180 local: whether to store tag in non-version-controlled file
181 181 (default False)
182 182
183 183 message: commit message to use if committing
184 184
185 185 user: name of user to use if committing
186 186
187 187 date: date tuple to use if committing'''
188 188
189 189 for x in self.status()[:5]:
190 190 if '.hgtags' in x:
191 191 raise util.Abort(_('working copy of .hgtags is changed '
192 192 '(please commit .hgtags manually)'))
193 193
194 194
195 195 self._tag(name, node, message, local, user, date)
196 196
197 197 def tags(self):
198 198 '''return a mapping of tag to node'''
199 199 if self.tagscache:
200 200 return self.tagscache
201 201
202 202 globaltags = {}
203 203 tagtypes = {}
204 204
205 205 def readtags(lines, fn, tagtype):
206 206 filetags = {}
207 207 count = 0
208 208
209 209 def warn(msg):
210 210 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
211 211
212 212 for l in lines:
213 213 count += 1
214 214 if not l:
215 215 continue
216 216 s = l.split(" ", 1)
217 217 if len(s) != 2:
218 218 warn(_("cannot parse entry"))
219 219 continue
220 220 node, key = s
221 221 key = util.tolocal(key.strip()) # stored in UTF-8
222 222 try:
223 223 bin_n = bin(node)
224 224 except TypeError:
225 225 warn(_("node '%s' is not well formed") % node)
226 226 continue
227 227 if bin_n not in self.changelog.nodemap:
228 228 warn(_("tag '%s' refers to unknown node") % key)
229 229 continue
230 230
231 231 h = []
232 232 if key in filetags:
233 233 n, h = filetags[key]
234 234 h.append(n)
235 235 filetags[key] = (bin_n, h)
236 236
237 237 for k, nh in filetags.items():
238 238 if k not in globaltags:
239 239 globaltags[k] = nh
240 240 tagtypes[k] = tagtype
241 241 continue
242 242
243 243 # we prefer the global tag if:
244 244 # it supercedes us OR
245 245 # mutual supercedes and it has a higher rank
246 246 # otherwise we win because we're tip-most
247 247 an, ah = nh
248 248 bn, bh = globaltags[k]
249 249 if (bn != an and an in bh and
250 250 (bn not in ah or len(bh) > len(ah))):
251 251 an = bn
252 252 ah.extend([n for n in bh if n not in ah])
253 253 globaltags[k] = an, ah
254 254 tagtypes[k] = tagtype
255 255
256 256 # read the tags file from each head, ending with the tip
257 257 f = None
258 258 for rev, node, fnode in self._hgtagsnodes():
259 259 f = (f and f.filectx(fnode) or
260 260 self.filectx('.hgtags', fileid=fnode))
261 261 readtags(f.data().splitlines(), f, "global")
262 262
263 263 try:
264 264 data = util.fromlocal(self.opener("localtags").read())
265 265 # localtags are stored in the local character set
266 266 # while the internal tag table is stored in UTF-8
267 267 readtags(data.splitlines(), "localtags", "local")
268 268 except IOError:
269 269 pass
270 270
271 271 self.tagscache = {}
272 272 self._tagstypecache = {}
273 273 for k,nh in globaltags.items():
274 274 n = nh[0]
275 275 if n != nullid:
276 276 self.tagscache[k] = n
277 277 self._tagstypecache[k] = tagtypes[k]
278 278 self.tagscache['tip'] = self.changelog.tip()
279 279
280 280 return self.tagscache
281 281
282 282 def tagtype(self, tagname):
283 283 '''
284 284 return the type of the given tag. result can be:
285 285
286 286 'local' : a local tag
287 287 'global' : a global tag
288 288 None : tag does not exist
289 289 '''
290 290
291 291 self.tags()
292 292
293 293 return self._tagstypecache.get(tagname)
294 294
295 295 def _hgtagsnodes(self):
296 296 heads = self.heads()
297 297 heads.reverse()
298 298 last = {}
299 299 ret = []
300 300 for node in heads:
301 301 c = self.changectx(node)
302 302 rev = c.rev()
303 303 try:
304 304 fnode = c.filenode('.hgtags')
305 305 except revlog.LookupError:
306 306 continue
307 307 ret.append((rev, node, fnode))
308 308 if fnode in last:
309 309 ret[last[fnode]] = None
310 310 last[fnode] = len(ret) - 1
311 311 return [item for item in ret if item]
312 312
313 313 def tagslist(self):
314 314 '''return a list of tags ordered by revision'''
315 315 l = []
316 316 for t, n in self.tags().items():
317 317 try:
318 318 r = self.changelog.rev(n)
319 319 except:
320 320 r = -2 # sort to the beginning of the list if unknown
321 321 l.append((r, t, n))
322 322 l.sort()
323 323 return [(t, n) for r, t, n in l]
324 324
325 325 def nodetags(self, node):
326 326 '''return the tags associated with a node'''
327 327 if not self.nodetagscache:
328 328 self.nodetagscache = {}
329 329 for t, n in self.tags().items():
330 330 self.nodetagscache.setdefault(n, []).append(t)
331 331 return self.nodetagscache.get(node, [])
332 332
333 333 def _branchtags(self):
334 334 partial, last, lrev = self._readbranchcache()
335 335
336 336 tiprev = self.changelog.count() - 1
337 337 if lrev != tiprev:
338 338 self._updatebranchcache(partial, lrev+1, tiprev+1)
339 339 self._writebranchcache(partial, self.changelog.tip(), tiprev)
340 340
341 341 return partial
342 342
343 343 def branchtags(self):
344 344 if self.branchcache is not None:
345 345 return self.branchcache
346 346
347 347 self.branchcache = {} # avoid recursion in changectx
348 348 partial = self._branchtags()
349 349
350 350 # the branch cache is stored on disk as UTF-8, but in the local
351 351 # charset internally
352 352 for k, v in partial.items():
353 353 self.branchcache[util.tolocal(k)] = v
354 354 return self.branchcache
355 355
356 356 def _readbranchcache(self):
357 357 partial = {}
358 358 try:
359 359 f = self.opener("branch.cache")
360 360 lines = f.read().split('\n')
361 361 f.close()
362 362 except (IOError, OSError):
363 363 return {}, nullid, nullrev
364 364
365 365 try:
366 366 last, lrev = lines.pop(0).split(" ", 1)
367 367 last, lrev = bin(last), int(lrev)
368 368 if not (lrev < self.changelog.count() and
369 369 self.changelog.node(lrev) == last): # sanity check
370 370 # invalidate the cache
371 371 raise ValueError('Invalid branch cache: unknown tip')
372 372 for l in lines:
373 373 if not l: continue
374 374 node, label = l.split(" ", 1)
375 375 partial[label.strip()] = bin(node)
376 376 except (KeyboardInterrupt, util.SignalInterrupt):
377 377 raise
378 378 except Exception, inst:
379 379 if self.ui.debugflag:
380 380 self.ui.warn(str(inst), '\n')
381 381 partial, last, lrev = {}, nullid, nullrev
382 382 return partial, last, lrev
383 383
384 384 def _writebranchcache(self, branches, tip, tiprev):
385 385 try:
386 386 f = self.opener("branch.cache", "w", atomictemp=True)
387 387 f.write("%s %s\n" % (hex(tip), tiprev))
388 388 for label, node in branches.iteritems():
389 389 f.write("%s %s\n" % (hex(node), label))
390 390 f.rename()
391 391 except (IOError, OSError):
392 392 pass
393 393
394 394 def _updatebranchcache(self, partial, start, end):
395 395 for r in xrange(start, end):
396 396 c = self.changectx(r)
397 397 b = c.branch()
398 398 partial[b] = c.node()
399 399
400 400 def lookup(self, key):
401 401 if key == '.':
402 402 key, second = self.dirstate.parents()
403 403 if key == nullid:
404 404 raise repo.RepoError(_("no revision checked out"))
405 405 if second != nullid:
406 406 self.ui.warn(_("warning: working directory has two parents, "
407 407 "tag '.' uses the first\n"))
408 408 elif key == 'null':
409 409 return nullid
410 410 n = self.changelog._match(key)
411 411 if n:
412 412 return n
413 413 if key in self.tags():
414 414 return self.tags()[key]
415 415 if key in self.branchtags():
416 416 return self.branchtags()[key]
417 417 n = self.changelog._partialmatch(key)
418 418 if n:
419 419 return n
420 420 try:
421 421 if len(key) == 20:
422 422 key = hex(key)
423 423 except:
424 424 pass
425 425 raise repo.RepoError(_("unknown revision '%s'") % key)
426 426
427 427 def dev(self):
428 428 return os.lstat(self.path).st_dev
429 429
430 430 def local(self):
431 431 return True
432 432
433 433 def join(self, f):
434 434 return os.path.join(self.path, f)
435 435
436 436 def sjoin(self, f):
437 437 f = self.encodefn(f)
438 438 return os.path.join(self.spath, f)
439 439
440 440 def wjoin(self, f):
441 441 return os.path.join(self.root, f)
442 442
443 443 def file(self, f):
444 444 if f[0] == '/':
445 445 f = f[1:]
446 446 return filelog.filelog(self.sopener, f)
447 447
448 448 def changectx(self, changeid=None):
449 449 return context.changectx(self, changeid)
450 450
451 451 def workingctx(self):
452 452 return context.workingctx(self)
453 453
454 454 def parents(self, changeid=None):
455 455 '''
456 456 get list of changectxs for parents of changeid or working directory
457 457 '''
458 458 if changeid is None:
459 459 pl = self.dirstate.parents()
460 460 else:
461 461 n = self.changelog.lookup(changeid)
462 462 pl = self.changelog.parents(n)
463 463 if pl[1] == nullid:
464 464 return [self.changectx(pl[0])]
465 465 return [self.changectx(pl[0]), self.changectx(pl[1])]
466 466
467 467 def filectx(self, path, changeid=None, fileid=None):
468 468 """changeid can be a changeset revision, node, or tag.
469 469 fileid can be a file revision or node."""
470 470 return context.filectx(self, path, changeid, fileid)
471 471
472 472 def getcwd(self):
473 473 return self.dirstate.getcwd()
474 474
475 475 def pathto(self, f, cwd=None):
476 476 return self.dirstate.pathto(f, cwd)
477 477
478 478 def wfile(self, f, mode='r'):
479 479 return self.wopener(f, mode)
480 480
481 481 def _link(self, f):
482 482 return os.path.islink(self.wjoin(f))
483 483
484 484 def _filter(self, filter, filename, data):
485 485 if filter not in self.filterpats:
486 486 l = []
487 487 for pat, cmd in self.ui.configitems(filter):
488 488 mf = util.matcher(self.root, "", [pat], [], [])[1]
489 489 fn = None
490 490 for name, filterfn in self._datafilters.iteritems():
491 491 if cmd.startswith(name):
492 492 fn = filterfn
493 493 break
494 494 if not fn:
495 fn = lambda s, c: util.filter(s, c)
495 fn = lambda s, c, **kwargs: util.filter(s, c)
496 # Wrap old filters not supporting keyword arguments
497 if not inspect.getargspec(fn)[2]:
498 oldfn = fn
499 fn = lambda s, c, **kwargs: oldfn(s, c)
496 500 l.append((mf, fn, cmd))
497 501 self.filterpats[filter] = l
498 502
499 503 for mf, fn, cmd in self.filterpats[filter]:
500 504 if mf(filename):
501 505 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
502 data = fn(data, cmd)
506 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
503 507 break
504 508
505 509 return data
506 510
507 511 def adddatafilter(self, name, filter):
508 512 self._datafilters[name] = filter
509 513
510 514 def wread(self, filename):
511 515 if self._link(filename):
512 516 data = os.readlink(self.wjoin(filename))
513 517 else:
514 518 data = self.wopener(filename, 'r').read()
515 519 return self._filter("encode", filename, data)
516 520
517 521 def wwrite(self, filename, data, flags):
518 522 data = self._filter("decode", filename, data)
519 523 try:
520 524 os.unlink(self.wjoin(filename))
521 525 except OSError:
522 526 pass
523 527 self.wopener(filename, 'w').write(data)
524 528 util.set_flags(self.wjoin(filename), flags)
525 529
526 530 def wwritedata(self, filename, data):
527 531 return self._filter("decode", filename, data)
528 532
529 533 def transaction(self):
530 534 if self._transref and self._transref():
531 535 return self._transref().nest()
532 536
533 537 # abort here if the journal already exists
534 538 if os.path.exists(self.sjoin("journal")):
535 539 raise repo.RepoError(_("journal already exists - run hg recover"))
536 540
537 541 # save dirstate for rollback
538 542 try:
539 543 ds = self.opener("dirstate").read()
540 544 except IOError:
541 545 ds = ""
542 546 self.opener("journal.dirstate", "w").write(ds)
543 547 self.opener("journal.branch", "w").write(self.dirstate.branch())
544 548
545 549 renames = [(self.sjoin("journal"), self.sjoin("undo")),
546 550 (self.join("journal.dirstate"), self.join("undo.dirstate")),
547 551 (self.join("journal.branch"), self.join("undo.branch"))]
548 552 tr = transaction.transaction(self.ui.warn, self.sopener,
549 553 self.sjoin("journal"),
550 554 aftertrans(renames))
551 555 self._transref = weakref.ref(tr)
552 556 return tr
553 557
554 558 def recover(self):
555 559 l = self.lock()
556 560 try:
557 561 if os.path.exists(self.sjoin("journal")):
558 562 self.ui.status(_("rolling back interrupted transaction\n"))
559 563 transaction.rollback(self.sopener, self.sjoin("journal"))
560 564 self.invalidate()
561 565 return True
562 566 else:
563 567 self.ui.warn(_("no interrupted transaction available\n"))
564 568 return False
565 569 finally:
566 570 del l
567 571
568 572 def rollback(self):
569 573 wlock = lock = None
570 574 try:
571 575 wlock = self.wlock()
572 576 lock = self.lock()
573 577 if os.path.exists(self.sjoin("undo")):
574 578 self.ui.status(_("rolling back last transaction\n"))
575 579 transaction.rollback(self.sopener, self.sjoin("undo"))
576 580 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
577 581 branch = self.opener("undo.branch").read()
578 582 self.dirstate.setbranch(branch)
579 583 self.invalidate()
580 584 self.dirstate.invalidate()
581 585 else:
582 586 self.ui.warn(_("no rollback information available\n"))
583 587 finally:
584 588 del lock, wlock
585 589
586 590 def invalidate(self):
587 591 for a in "changelog manifest".split():
588 592 if hasattr(self, a):
589 593 self.__delattr__(a)
590 594 self.tagscache = None
591 595 self._tagstypecache = None
592 596 self.nodetagscache = None
593 597
594 598 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
595 599 try:
596 600 l = lock.lock(lockname, 0, releasefn, desc=desc)
597 601 except lock.LockHeld, inst:
598 602 if not wait:
599 603 raise
600 604 self.ui.warn(_("waiting for lock on %s held by %r\n") %
601 605 (desc, inst.locker))
602 606 # default to 600 seconds timeout
603 607 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
604 608 releasefn, desc=desc)
605 609 if acquirefn:
606 610 acquirefn()
607 611 return l
608 612
609 613 def lock(self, wait=True):
610 614 if self._lockref and self._lockref():
611 615 return self._lockref()
612 616
613 617 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
614 618 _('repository %s') % self.origroot)
615 619 self._lockref = weakref.ref(l)
616 620 return l
617 621
618 622 def wlock(self, wait=True):
619 623 if self._wlockref and self._wlockref():
620 624 return self._wlockref()
621 625
622 626 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
623 627 self.dirstate.invalidate, _('working directory of %s') %
624 628 self.origroot)
625 629 self._wlockref = weakref.ref(l)
626 630 return l
627 631
628 632 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
629 633 """
630 634 commit an individual file as part of a larger transaction
631 635 """
632 636
633 637 t = self.wread(fn)
634 638 fl = self.file(fn)
635 639 fp1 = manifest1.get(fn, nullid)
636 640 fp2 = manifest2.get(fn, nullid)
637 641
638 642 meta = {}
639 643 cp = self.dirstate.copied(fn)
640 644 if cp:
641 645 # Mark the new revision of this file as a copy of another
642 646 # file. This copy data will effectively act as a parent
643 647 # of this new revision. If this is a merge, the first
644 648 # parent will be the nullid (meaning "look up the copy data")
645 649 # and the second one will be the other parent. For example:
646 650 #
647 651 # 0 --- 1 --- 3 rev1 changes file foo
648 652 # \ / rev2 renames foo to bar and changes it
649 653 # \- 2 -/ rev3 should have bar with all changes and
650 654 # should record that bar descends from
651 655 # bar in rev2 and foo in rev1
652 656 #
653 657 # this allows this merge to succeed:
654 658 #
655 659 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
656 660 # \ / merging rev3 and rev4 should use bar@rev2
657 661 # \- 2 --- 4 as the merge base
658 662 #
659 663 meta["copy"] = cp
660 664 if not manifest2: # not a branch merge
661 665 meta["copyrev"] = hex(manifest1.get(cp, nullid))
662 666 fp2 = nullid
663 667 elif fp2 != nullid: # copied on remote side
664 668 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 669 elif fp1 != nullid: # copied on local side, reversed
666 670 meta["copyrev"] = hex(manifest2.get(cp))
667 671 fp2 = fp1
668 672 elif cp in manifest2: # directory rename on local side
669 673 meta["copyrev"] = hex(manifest2[cp])
670 674 else: # directory rename on remote side
671 675 meta["copyrev"] = hex(manifest1.get(cp, nullid))
672 676 self.ui.debug(_(" %s: copy %s:%s\n") %
673 677 (fn, cp, meta["copyrev"]))
674 678 fp1 = nullid
675 679 elif fp2 != nullid:
676 680 # is one parent an ancestor of the other?
677 681 fpa = fl.ancestor(fp1, fp2)
678 682 if fpa == fp1:
679 683 fp1, fp2 = fp2, nullid
680 684 elif fpa == fp2:
681 685 fp2 = nullid
682 686
683 687 # is the file unmodified from the parent? report existing entry
684 688 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
685 689 return fp1
686 690
687 691 changelist.append(fn)
688 692 return fl.add(t, meta, tr, linkrev, fp1, fp2)
689 693
690 694 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
691 695 if p1 is None:
692 696 p1, p2 = self.dirstate.parents()
693 697 return self.commit(files=files, text=text, user=user, date=date,
694 698 p1=p1, p2=p2, extra=extra, empty_ok=True)
695 699
696 700 def commit(self, files=None, text="", user=None, date=None,
697 701 match=util.always, force=False, force_editor=False,
698 702 p1=None, p2=None, extra={}, empty_ok=False):
699 703 wlock = lock = tr = None
700 704 valid = 0 # don't save the dirstate if this isn't set
701 705 if files:
702 706 files = util.unique(files)
703 707 try:
704 708 commit = []
705 709 remove = []
706 710 changed = []
707 711 use_dirstate = (p1 is None) # not rawcommit
708 712 extra = extra.copy()
709 713
710 714 if use_dirstate:
711 715 if files:
712 716 for f in files:
713 717 s = self.dirstate[f]
714 718 if s in 'nma':
715 719 commit.append(f)
716 720 elif s == 'r':
717 721 remove.append(f)
718 722 else:
719 723 self.ui.warn(_("%s not tracked!\n") % f)
720 724 else:
721 725 changes = self.status(match=match)[:5]
722 726 modified, added, removed, deleted, unknown = changes
723 727 commit = modified + added
724 728 remove = removed
725 729 else:
726 730 commit = files
727 731
728 732 if use_dirstate:
729 733 p1, p2 = self.dirstate.parents()
730 734 update_dirstate = True
731 735 else:
732 736 p1, p2 = p1, p2 or nullid
733 737 update_dirstate = (self.dirstate.parents()[0] == p1)
734 738
735 739 c1 = self.changelog.read(p1)
736 740 c2 = self.changelog.read(p2)
737 741 m1 = self.manifest.read(c1[0]).copy()
738 742 m2 = self.manifest.read(c2[0])
739 743
740 744 if use_dirstate:
741 745 branchname = self.workingctx().branch()
742 746 try:
743 747 branchname = branchname.decode('UTF-8').encode('UTF-8')
744 748 except UnicodeDecodeError:
745 749 raise util.Abort(_('branch name not in UTF-8!'))
746 750 else:
747 751 branchname = ""
748 752
749 753 if use_dirstate:
750 754 oldname = c1[5].get("branch") # stored in UTF-8
751 755 if (not commit and not remove and not force and p2 == nullid
752 756 and branchname == oldname):
753 757 self.ui.status(_("nothing changed\n"))
754 758 return None
755 759
756 760 xp1 = hex(p1)
757 761 if p2 == nullid: xp2 = ''
758 762 else: xp2 = hex(p2)
759 763
760 764 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
761 765
762 766 wlock = self.wlock()
763 767 lock = self.lock()
764 768 tr = self.transaction()
765 769 trp = weakref.proxy(tr)
766 770
767 771 # check in files
768 772 new = {}
769 773 linkrev = self.changelog.count()
770 774 commit.sort()
771 775 is_exec = util.execfunc(self.root, m1.execf)
772 776 is_link = util.linkfunc(self.root, m1.linkf)
773 777 for f in commit:
774 778 self.ui.note(f + "\n")
775 779 try:
776 780 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
777 781 new_exec = is_exec(f)
778 782 new_link = is_link(f)
779 783 if ((not changed or changed[-1] != f) and
780 784 m2.get(f) != new[f]):
781 785 # mention the file in the changelog if some
782 786 # flag changed, even if there was no content
783 787 # change.
784 788 old_exec = m1.execf(f)
785 789 old_link = m1.linkf(f)
786 790 if old_exec != new_exec or old_link != new_link:
787 791 changed.append(f)
788 792 m1.set(f, new_exec, new_link)
789 793 if use_dirstate:
790 794 self.dirstate.normal(f)
791 795
792 796 except (OSError, IOError):
793 797 if use_dirstate:
794 798 self.ui.warn(_("trouble committing %s!\n") % f)
795 799 raise
796 800 else:
797 801 remove.append(f)
798 802
799 803 # update manifest
800 804 m1.update(new)
801 805 remove.sort()
802 806 removed = []
803 807
804 808 for f in remove:
805 809 if f in m1:
806 810 del m1[f]
807 811 removed.append(f)
808 812 elif f in m2:
809 813 removed.append(f)
810 814 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
811 815 (new, removed))
812 816
813 817 # add changeset
814 818 new = new.keys()
815 819 new.sort()
816 820
817 821 user = user or self.ui.username()
818 822 if (not empty_ok and not text) or force_editor:
819 823 edittext = []
820 824 if text:
821 825 edittext.append(text)
822 826 edittext.append("")
823 827 edittext.append(_("HG: Enter commit message."
824 828 " Lines beginning with 'HG:' are removed."))
825 829 edittext.append("HG: --")
826 830 edittext.append("HG: user: %s" % user)
827 831 if p2 != nullid:
828 832 edittext.append("HG: branch merge")
829 833 if branchname:
830 834 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
831 835 edittext.extend(["HG: changed %s" % f for f in changed])
832 836 edittext.extend(["HG: removed %s" % f for f in removed])
833 837 if not changed and not remove:
834 838 edittext.append("HG: no files changed")
835 839 edittext.append("")
836 840 # run editor in the repository root
837 841 olddir = os.getcwd()
838 842 os.chdir(self.root)
839 843 text = self.ui.edit("\n".join(edittext), user)
840 844 os.chdir(olddir)
841 845
842 846 if branchname:
843 847 extra["branch"] = branchname
844 848
845 849 if use_dirstate:
846 850 lines = [line.rstrip() for line in text.rstrip().splitlines()]
847 851 while lines and not lines[0]:
848 852 del lines[0]
849 853 if not lines:
850 854 raise util.Abort(_("empty commit message"))
851 855 text = '\n'.join(lines)
852 856
853 857 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
854 858 user, date, extra)
855 859 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
856 860 parent2=xp2)
857 861 tr.close()
858 862
859 863 if self.branchcache and "branch" in extra:
860 864 self.branchcache[util.tolocal(extra["branch"])] = n
861 865
862 866 if use_dirstate or update_dirstate:
863 867 self.dirstate.setparents(n)
864 868 if use_dirstate:
865 869 for f in removed:
866 870 self.dirstate.forget(f)
867 871 valid = 1 # our dirstate updates are complete
868 872
869 873 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
870 874 return n
871 875 finally:
872 876 if not valid: # don't save our updated dirstate
873 877 self.dirstate.invalidate()
874 878 del tr, lock, wlock
875 879
876 880 def walk(self, node=None, files=[], match=util.always, badmatch=None):
877 881 '''
878 882 walk recursively through the directory tree or a given
879 883 changeset, finding all files matched by the match
880 884 function
881 885
882 886 results are yielded in a tuple (src, filename), where src
883 887 is one of:
884 888 'f' the file was found in the directory tree
885 889 'm' the file was only in the dirstate and not in the tree
886 890 'b' file was not found and matched badmatch
887 891 '''
888 892
889 893 if node:
890 894 fdict = dict.fromkeys(files)
891 895 # for dirstate.walk, files=['.'] means "walk the whole tree".
892 896 # follow that here, too
893 897 fdict.pop('.', None)
894 898 mdict = self.manifest.read(self.changelog.read(node)[0])
895 899 mfiles = mdict.keys()
896 900 mfiles.sort()
897 901 for fn in mfiles:
898 902 for ffn in fdict:
899 903 # match if the file is the exact name or a directory
900 904 if ffn == fn or fn.startswith("%s/" % ffn):
901 905 del fdict[ffn]
902 906 break
903 907 if match(fn):
904 908 yield 'm', fn
905 909 ffiles = fdict.keys()
906 910 ffiles.sort()
907 911 for fn in ffiles:
908 912 if badmatch and badmatch(fn):
909 913 if match(fn):
910 914 yield 'b', fn
911 915 else:
912 916 self.ui.warn(_('%s: No such file in rev %s\n')
913 917 % (self.pathto(fn), short(node)))
914 918 else:
915 919 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
916 920 yield src, fn
917 921
918 922 def status(self, node1=None, node2=None, files=[], match=util.always,
919 923 list_ignored=False, list_clean=False):
920 924 """return status of files between two nodes or node and working directory
921 925
922 926 If node1 is None, use the first dirstate parent instead.
923 927 If node2 is None, compare node1 with working directory.
924 928 """
925 929
926 930 def fcmp(fn, getnode):
927 931 t1 = self.wread(fn)
928 932 return self.file(fn).cmp(getnode(fn), t1)
929 933
930 934 def mfmatches(node):
931 935 change = self.changelog.read(node)
932 936 mf = self.manifest.read(change[0]).copy()
933 937 for fn in mf.keys():
934 938 if not match(fn):
935 939 del mf[fn]
936 940 return mf
937 941
938 942 modified, added, removed, deleted, unknown = [], [], [], [], []
939 943 ignored, clean = [], []
940 944
941 945 compareworking = False
942 946 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
943 947 compareworking = True
944 948
945 949 if not compareworking:
946 950 # read the manifest from node1 before the manifest from node2,
947 951 # so that we'll hit the manifest cache if we're going through
948 952 # all the revisions in parent->child order.
949 953 mf1 = mfmatches(node1)
950 954
951 955 # are we comparing the working directory?
952 956 if not node2:
953 957 (lookup, modified, added, removed, deleted, unknown,
954 958 ignored, clean) = self.dirstate.status(files, match,
955 959 list_ignored, list_clean)
956 960
957 961 # are we comparing working dir against its parent?
958 962 if compareworking:
959 963 if lookup:
960 964 fixup = []
961 965 # do a full compare of any files that might have changed
962 966 ctx = self.changectx()
963 967 for f in lookup:
964 968 if f not in ctx or ctx[f].cmp(self.wread(f)):
965 969 modified.append(f)
966 970 else:
967 971 fixup.append(f)
968 972 if list_clean:
969 973 clean.append(f)
970 974
971 975 # update dirstate for files that are actually clean
972 976 if fixup:
973 977 wlock = None
974 978 try:
975 979 try:
976 980 wlock = self.wlock(False)
977 981 except lock.LockException:
978 982 pass
979 983 if wlock:
980 984 for f in fixup:
981 985 self.dirstate.normal(f)
982 986 finally:
983 987 del wlock
984 988 else:
985 989 # we are comparing working dir against non-parent
986 990 # generate a pseudo-manifest for the working dir
987 991 # XXX: create it in dirstate.py ?
988 992 mf2 = mfmatches(self.dirstate.parents()[0])
989 993 is_exec = util.execfunc(self.root, mf2.execf)
990 994 is_link = util.linkfunc(self.root, mf2.linkf)
991 995 for f in lookup + modified + added:
992 996 mf2[f] = ""
993 997 mf2.set(f, is_exec(f), is_link(f))
994 998 for f in removed:
995 999 if f in mf2:
996 1000 del mf2[f]
997 1001
998 1002 else:
999 1003 # we are comparing two revisions
1000 1004 mf2 = mfmatches(node2)
1001 1005
1002 1006 if not compareworking:
1003 1007 # flush lists from dirstate before comparing manifests
1004 1008 modified, added, clean = [], [], []
1005 1009
1006 1010 # make sure to sort the files so we talk to the disk in a
1007 1011 # reasonable order
1008 1012 mf2keys = mf2.keys()
1009 1013 mf2keys.sort()
1010 1014 getnode = lambda fn: mf1.get(fn, nullid)
1011 1015 for fn in mf2keys:
1012 1016 if fn in mf1:
1013 1017 if (mf1.flags(fn) != mf2.flags(fn) or
1014 1018 (mf1[fn] != mf2[fn] and
1015 1019 (mf2[fn] != "" or fcmp(fn, getnode)))):
1016 1020 modified.append(fn)
1017 1021 elif list_clean:
1018 1022 clean.append(fn)
1019 1023 del mf1[fn]
1020 1024 else:
1021 1025 added.append(fn)
1022 1026
1023 1027 removed = mf1.keys()
1024 1028
1025 1029 # sort and return results:
1026 1030 for l in modified, added, removed, deleted, unknown, ignored, clean:
1027 1031 l.sort()
1028 1032 return (modified, added, removed, deleted, unknown, ignored, clean)
1029 1033
1030 1034 def add(self, list):
1031 1035 wlock = self.wlock()
1032 1036 try:
1033 1037 rejected = []
1034 1038 for f in list:
1035 1039 p = self.wjoin(f)
1036 1040 try:
1037 1041 st = os.lstat(p)
1038 1042 except:
1039 1043 self.ui.warn(_("%s does not exist!\n") % f)
1040 1044 rejected.append(f)
1041 1045 continue
1042 1046 if st.st_size > 10000000:
1043 1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1044 1048 " performance problems\n"
1045 1049 "(use 'hg revert %s' to unadd the file)\n")
1046 1050 % (f, f))
1047 1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1048 1052 self.ui.warn(_("%s not added: only files and symlinks "
1049 1053 "supported currently\n") % f)
1050 1054 rejected.append(p)
1051 1055 elif self.dirstate[f] in 'amn':
1052 1056 self.ui.warn(_("%s already tracked!\n") % f)
1053 1057 elif self.dirstate[f] == 'r':
1054 1058 self.dirstate.normallookup(f)
1055 1059 else:
1056 1060 self.dirstate.add(f)
1057 1061 return rejected
1058 1062 finally:
1059 1063 del wlock
1060 1064
1061 1065 def forget(self, list):
1062 1066 wlock = self.wlock()
1063 1067 try:
1064 1068 for f in list:
1065 1069 if self.dirstate[f] != 'a':
1066 1070 self.ui.warn(_("%s not added!\n") % f)
1067 1071 else:
1068 1072 self.dirstate.forget(f)
1069 1073 finally:
1070 1074 del wlock
1071 1075
1072 1076 def remove(self, list, unlink=False):
1073 1077 wlock = None
1074 1078 try:
1075 1079 if unlink:
1076 1080 for f in list:
1077 1081 try:
1078 1082 util.unlink(self.wjoin(f))
1079 1083 except OSError, inst:
1080 1084 if inst.errno != errno.ENOENT:
1081 1085 raise
1082 1086 wlock = self.wlock()
1083 1087 for f in list:
1084 1088 if unlink and os.path.exists(self.wjoin(f)):
1085 1089 self.ui.warn(_("%s still exists!\n") % f)
1086 1090 elif self.dirstate[f] == 'a':
1087 1091 self.dirstate.forget(f)
1088 1092 elif f not in self.dirstate:
1089 1093 self.ui.warn(_("%s not tracked!\n") % f)
1090 1094 else:
1091 1095 self.dirstate.remove(f)
1092 1096 finally:
1093 1097 del wlock
1094 1098
1095 1099 def undelete(self, list):
1096 1100 wlock = None
1097 1101 try:
1098 1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1099 1103 for p in self.dirstate.parents() if p != nullid]
1100 1104 wlock = self.wlock()
1101 1105 for f in list:
1102 1106 if self.dirstate[f] != 'r':
1103 1107 self.ui.warn("%s not removed!\n" % f)
1104 1108 else:
1105 1109 m = f in manifests[0] and manifests[0] or manifests[1]
1106 1110 t = self.file(f).read(m[f])
1107 1111 self.wwrite(f, t, m.flags(f))
1108 1112 self.dirstate.normal(f)
1109 1113 finally:
1110 1114 del wlock
1111 1115
1112 1116 def copy(self, source, dest):
1113 1117 wlock = None
1114 1118 try:
1115 1119 p = self.wjoin(dest)
1116 1120 if not (os.path.exists(p) or os.path.islink(p)):
1117 1121 self.ui.warn(_("%s does not exist!\n") % dest)
1118 1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1119 1123 self.ui.warn(_("copy failed: %s is not a file or a "
1120 1124 "symbolic link\n") % dest)
1121 1125 else:
1122 1126 wlock = self.wlock()
1123 1127 if dest not in self.dirstate:
1124 1128 self.dirstate.add(dest)
1125 1129 self.dirstate.copy(source, dest)
1126 1130 finally:
1127 1131 del wlock
1128 1132
1129 1133 def heads(self, start=None):
1130 1134 heads = self.changelog.heads(start)
1131 1135 # sort the output in rev descending order
1132 1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1133 1137 heads.sort()
1134 1138 return [n for (r, n) in heads]
1135 1139
1136 1140 def branchheads(self, branch, start=None):
1137 1141 branches = self.branchtags()
1138 1142 if branch not in branches:
1139 1143 return []
1140 1144 # The basic algorithm is this:
1141 1145 #
1142 1146 # Start from the branch tip since there are no later revisions that can
1143 1147 # possibly be in this branch, and the tip is a guaranteed head.
1144 1148 #
1145 1149 # Remember the tip's parents as the first ancestors, since these by
1146 1150 # definition are not heads.
1147 1151 #
1148 1152 # Step backwards from the brach tip through all the revisions. We are
1149 1153 # guaranteed by the rules of Mercurial that we will now be visiting the
1150 1154 # nodes in reverse topological order (children before parents).
1151 1155 #
1152 1156 # If a revision is one of the ancestors of a head then we can toss it
1153 1157 # out of the ancestors set (we've already found it and won't be
1154 1158 # visiting it again) and put its parents in the ancestors set.
1155 1159 #
1156 1160 # Otherwise, if a revision is in the branch it's another head, since it
1157 1161 # wasn't in the ancestor list of an existing head. So add it to the
1158 1162 # head list, and add its parents to the ancestor list.
1159 1163 #
1160 1164 # If it is not in the branch ignore it.
1161 1165 #
1162 1166 # Once we have a list of heads, use nodesbetween to filter out all the
1163 1167 # heads that cannot be reached from startrev. There may be a more
1164 1168 # efficient way to do this as part of the previous algorithm.
1165 1169
1166 1170 set = util.set
1167 1171 heads = [self.changelog.rev(branches[branch])]
1168 1172 # Don't care if ancestors contains nullrev or not.
1169 1173 ancestors = set(self.changelog.parentrevs(heads[0]))
1170 1174 for rev in xrange(heads[0] - 1, nullrev, -1):
1171 1175 if rev in ancestors:
1172 1176 ancestors.update(self.changelog.parentrevs(rev))
1173 1177 ancestors.remove(rev)
1174 1178 elif self.changectx(rev).branch() == branch:
1175 1179 heads.append(rev)
1176 1180 ancestors.update(self.changelog.parentrevs(rev))
1177 1181 heads = [self.changelog.node(rev) for rev in heads]
1178 1182 if start is not None:
1179 1183 heads = self.changelog.nodesbetween([start], heads)[2]
1180 1184 return heads
1181 1185
1182 1186 def branches(self, nodes):
1183 1187 if not nodes:
1184 1188 nodes = [self.changelog.tip()]
1185 1189 b = []
1186 1190 for n in nodes:
1187 1191 t = n
1188 1192 while 1:
1189 1193 p = self.changelog.parents(n)
1190 1194 if p[1] != nullid or p[0] == nullid:
1191 1195 b.append((t, n, p[0], p[1]))
1192 1196 break
1193 1197 n = p[0]
1194 1198 return b
1195 1199
1196 1200 def between(self, pairs):
1197 1201 r = []
1198 1202
1199 1203 for top, bottom in pairs:
1200 1204 n, l, i = top, [], 0
1201 1205 f = 1
1202 1206
1203 1207 while n != bottom:
1204 1208 p = self.changelog.parents(n)[0]
1205 1209 if i == f:
1206 1210 l.append(n)
1207 1211 f = f * 2
1208 1212 n = p
1209 1213 i += 1
1210 1214
1211 1215 r.append(l)
1212 1216
1213 1217 return r
1214 1218
1215 1219 def findincoming(self, remote, base=None, heads=None, force=False):
1216 1220 """Return list of roots of the subsets of missing nodes from remote
1217 1221
1218 1222 If base dict is specified, assume that these nodes and their parents
1219 1223 exist on the remote side and that no child of a node of base exists
1220 1224 in both remote and self.
1221 1225 Furthermore base will be updated to include the nodes that exists
1222 1226 in self and remote but no children exists in self and remote.
1223 1227 If a list of heads is specified, return only nodes which are heads
1224 1228 or ancestors of these heads.
1225 1229
1226 1230 All the ancestors of base are in self and in remote.
1227 1231 All the descendants of the list returned are missing in self.
1228 1232 (and so we know that the rest of the nodes are missing in remote, see
1229 1233 outgoing)
1230 1234 """
1231 1235 m = self.changelog.nodemap
1232 1236 search = []
1233 1237 fetch = {}
1234 1238 seen = {}
1235 1239 seenbranch = {}
1236 1240 if base == None:
1237 1241 base = {}
1238 1242
1239 1243 if not heads:
1240 1244 heads = remote.heads()
1241 1245
1242 1246 if self.changelog.tip() == nullid:
1243 1247 base[nullid] = 1
1244 1248 if heads != [nullid]:
1245 1249 return [nullid]
1246 1250 return []
1247 1251
1248 1252 # assume we're closer to the tip than the root
1249 1253 # and start by examining the heads
1250 1254 self.ui.status(_("searching for changes\n"))
1251 1255
1252 1256 unknown = []
1253 1257 for h in heads:
1254 1258 if h not in m:
1255 1259 unknown.append(h)
1256 1260 else:
1257 1261 base[h] = 1
1258 1262
1259 1263 if not unknown:
1260 1264 return []
1261 1265
1262 1266 req = dict.fromkeys(unknown)
1263 1267 reqcnt = 0
1264 1268
1265 1269 # search through remote branches
1266 1270 # a 'branch' here is a linear segment of history, with four parts:
1267 1271 # head, root, first parent, second parent
1268 1272 # (a branch always has two parents (or none) by definition)
1269 1273 unknown = remote.branches(unknown)
1270 1274 while unknown:
1271 1275 r = []
1272 1276 while unknown:
1273 1277 n = unknown.pop(0)
1274 1278 if n[0] in seen:
1275 1279 continue
1276 1280
1277 1281 self.ui.debug(_("examining %s:%s\n")
1278 1282 % (short(n[0]), short(n[1])))
1279 1283 if n[0] == nullid: # found the end of the branch
1280 1284 pass
1281 1285 elif n in seenbranch:
1282 1286 self.ui.debug(_("branch already found\n"))
1283 1287 continue
1284 1288 elif n[1] and n[1] in m: # do we know the base?
1285 1289 self.ui.debug(_("found incomplete branch %s:%s\n")
1286 1290 % (short(n[0]), short(n[1])))
1287 1291 search.append(n) # schedule branch range for scanning
1288 1292 seenbranch[n] = 1
1289 1293 else:
1290 1294 if n[1] not in seen and n[1] not in fetch:
1291 1295 if n[2] in m and n[3] in m:
1292 1296 self.ui.debug(_("found new changeset %s\n") %
1293 1297 short(n[1]))
1294 1298 fetch[n[1]] = 1 # earliest unknown
1295 1299 for p in n[2:4]:
1296 1300 if p in m:
1297 1301 base[p] = 1 # latest known
1298 1302
1299 1303 for p in n[2:4]:
1300 1304 if p not in req and p not in m:
1301 1305 r.append(p)
1302 1306 req[p] = 1
1303 1307 seen[n[0]] = 1
1304 1308
1305 1309 if r:
1306 1310 reqcnt += 1
1307 1311 self.ui.debug(_("request %d: %s\n") %
1308 1312 (reqcnt, " ".join(map(short, r))))
1309 1313 for p in xrange(0, len(r), 10):
1310 1314 for b in remote.branches(r[p:p+10]):
1311 1315 self.ui.debug(_("received %s:%s\n") %
1312 1316 (short(b[0]), short(b[1])))
1313 1317 unknown.append(b)
1314 1318
1315 1319 # do binary search on the branches we found
1316 1320 while search:
1317 1321 n = search.pop(0)
1318 1322 reqcnt += 1
1319 1323 l = remote.between([(n[0], n[1])])[0]
1320 1324 l.append(n[1])
1321 1325 p = n[0]
1322 1326 f = 1
1323 1327 for i in l:
1324 1328 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1325 1329 if i in m:
1326 1330 if f <= 2:
1327 1331 self.ui.debug(_("found new branch changeset %s\n") %
1328 1332 short(p))
1329 1333 fetch[p] = 1
1330 1334 base[i] = 1
1331 1335 else:
1332 1336 self.ui.debug(_("narrowed branch search to %s:%s\n")
1333 1337 % (short(p), short(i)))
1334 1338 search.append((p, i))
1335 1339 break
1336 1340 p, f = i, f * 2
1337 1341
1338 1342 # sanity check our fetch list
1339 1343 for f in fetch.keys():
1340 1344 if f in m:
1341 1345 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1342 1346
1343 1347 if base.keys() == [nullid]:
1344 1348 if force:
1345 1349 self.ui.warn(_("warning: repository is unrelated\n"))
1346 1350 else:
1347 1351 raise util.Abort(_("repository is unrelated"))
1348 1352
1349 1353 self.ui.debug(_("found new changesets starting at ") +
1350 1354 " ".join([short(f) for f in fetch]) + "\n")
1351 1355
1352 1356 self.ui.debug(_("%d total queries\n") % reqcnt)
1353 1357
1354 1358 return fetch.keys()
1355 1359
1356 1360 def findoutgoing(self, remote, base=None, heads=None, force=False):
1357 1361 """Return list of nodes that are roots of subsets not in remote
1358 1362
1359 1363 If base dict is specified, assume that these nodes and their parents
1360 1364 exist on the remote side.
1361 1365 If a list of heads is specified, return only nodes which are heads
1362 1366 or ancestors of these heads, and return a second element which
1363 1367 contains all remote heads which get new children.
1364 1368 """
1365 1369 if base == None:
1366 1370 base = {}
1367 1371 self.findincoming(remote, base, heads, force=force)
1368 1372
1369 1373 self.ui.debug(_("common changesets up to ")
1370 1374 + " ".join(map(short, base.keys())) + "\n")
1371 1375
1372 1376 remain = dict.fromkeys(self.changelog.nodemap)
1373 1377
1374 1378 # prune everything remote has from the tree
1375 1379 del remain[nullid]
1376 1380 remove = base.keys()
1377 1381 while remove:
1378 1382 n = remove.pop(0)
1379 1383 if n in remain:
1380 1384 del remain[n]
1381 1385 for p in self.changelog.parents(n):
1382 1386 remove.append(p)
1383 1387
1384 1388 # find every node whose parents have been pruned
1385 1389 subset = []
1386 1390 # find every remote head that will get new children
1387 1391 updated_heads = {}
1388 1392 for n in remain:
1389 1393 p1, p2 = self.changelog.parents(n)
1390 1394 if p1 not in remain and p2 not in remain:
1391 1395 subset.append(n)
1392 1396 if heads:
1393 1397 if p1 in heads:
1394 1398 updated_heads[p1] = True
1395 1399 if p2 in heads:
1396 1400 updated_heads[p2] = True
1397 1401
1398 1402 # this is the set of all roots we have to push
1399 1403 if heads:
1400 1404 return subset, updated_heads.keys()
1401 1405 else:
1402 1406 return subset
1403 1407
1404 1408 def pull(self, remote, heads=None, force=False):
1405 1409 lock = self.lock()
1406 1410 try:
1407 1411 fetch = self.findincoming(remote, heads=heads, force=force)
1408 1412 if fetch == [nullid]:
1409 1413 self.ui.status(_("requesting all changes\n"))
1410 1414
1411 1415 if not fetch:
1412 1416 self.ui.status(_("no changes found\n"))
1413 1417 return 0
1414 1418
1415 1419 if heads is None:
1416 1420 cg = remote.changegroup(fetch, 'pull')
1417 1421 else:
1418 1422 if 'changegroupsubset' not in remote.capabilities:
1419 1423 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1420 1424 cg = remote.changegroupsubset(fetch, heads, 'pull')
1421 1425 return self.addchangegroup(cg, 'pull', remote.url())
1422 1426 finally:
1423 1427 del lock
1424 1428
1425 1429 def push(self, remote, force=False, revs=None):
1426 1430 # there are two ways to push to remote repo:
1427 1431 #
1428 1432 # addchangegroup assumes local user can lock remote
1429 1433 # repo (local filesystem, old ssh servers).
1430 1434 #
1431 1435 # unbundle assumes local user cannot lock remote repo (new ssh
1432 1436 # servers, http servers).
1433 1437
1434 1438 if remote.capable('unbundle'):
1435 1439 return self.push_unbundle(remote, force, revs)
1436 1440 return self.push_addchangegroup(remote, force, revs)
1437 1441
1438 1442 def prepush(self, remote, force, revs):
1439 1443 base = {}
1440 1444 remote_heads = remote.heads()
1441 1445 inc = self.findincoming(remote, base, remote_heads, force=force)
1442 1446
1443 1447 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1444 1448 if revs is not None:
1445 1449 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1446 1450 else:
1447 1451 bases, heads = update, self.changelog.heads()
1448 1452
1449 1453 if not bases:
1450 1454 self.ui.status(_("no changes found\n"))
1451 1455 return None, 1
1452 1456 elif not force:
1453 1457 # check if we're creating new remote heads
1454 1458 # to be a remote head after push, node must be either
1455 1459 # - unknown locally
1456 1460 # - a local outgoing head descended from update
1457 1461 # - a remote head that's known locally and not
1458 1462 # ancestral to an outgoing head
1459 1463
1460 1464 warn = 0
1461 1465
1462 1466 if remote_heads == [nullid]:
1463 1467 warn = 0
1464 1468 elif not revs and len(heads) > len(remote_heads):
1465 1469 warn = 1
1466 1470 else:
1467 1471 newheads = list(heads)
1468 1472 for r in remote_heads:
1469 1473 if r in self.changelog.nodemap:
1470 1474 desc = self.changelog.heads(r, heads)
1471 1475 l = [h for h in heads if h in desc]
1472 1476 if not l:
1473 1477 newheads.append(r)
1474 1478 else:
1475 1479 newheads.append(r)
1476 1480 if len(newheads) > len(remote_heads):
1477 1481 warn = 1
1478 1482
1479 1483 if warn:
1480 1484 self.ui.warn(_("abort: push creates new remote branches!\n"))
1481 1485 self.ui.status(_("(did you forget to merge?"
1482 1486 " use push -f to force)\n"))
1483 1487 return None, 1
1484 1488 elif inc:
1485 1489 self.ui.warn(_("note: unsynced remote changes!\n"))
1486 1490
1487 1491
1488 1492 if revs is None:
1489 1493 cg = self.changegroup(update, 'push')
1490 1494 else:
1491 1495 cg = self.changegroupsubset(update, revs, 'push')
1492 1496 return cg, remote_heads
1493 1497
1494 1498 def push_addchangegroup(self, remote, force, revs):
1495 1499 lock = remote.lock()
1496 1500 try:
1497 1501 ret = self.prepush(remote, force, revs)
1498 1502 if ret[0] is not None:
1499 1503 cg, remote_heads = ret
1500 1504 return remote.addchangegroup(cg, 'push', self.url())
1501 1505 return ret[1]
1502 1506 finally:
1503 1507 del lock
1504 1508
1505 1509 def push_unbundle(self, remote, force, revs):
1506 1510 # local repo finds heads on server, finds out what revs it
1507 1511 # must push. once revs transferred, if server finds it has
1508 1512 # different heads (someone else won commit/push race), server
1509 1513 # aborts.
1510 1514
1511 1515 ret = self.prepush(remote, force, revs)
1512 1516 if ret[0] is not None:
1513 1517 cg, remote_heads = ret
1514 1518 if force: remote_heads = ['force']
1515 1519 return remote.unbundle(cg, remote_heads, 'push')
1516 1520 return ret[1]
1517 1521
1518 1522 def changegroupinfo(self, nodes, source):
1519 1523 if self.ui.verbose or source == 'bundle':
1520 1524 self.ui.status(_("%d changesets found\n") % len(nodes))
1521 1525 if self.ui.debugflag:
1522 1526 self.ui.debug(_("List of changesets:\n"))
1523 1527 for node in nodes:
1524 1528 self.ui.debug("%s\n" % hex(node))
1525 1529
1526 1530 def changegroupsubset(self, bases, heads, source, extranodes=None):
1527 1531 """This function generates a changegroup consisting of all the nodes
1528 1532 that are descendents of any of the bases, and ancestors of any of
1529 1533 the heads.
1530 1534
1531 1535 It is fairly complex as determining which filenodes and which
1532 1536 manifest nodes need to be included for the changeset to be complete
1533 1537 is non-trivial.
1534 1538
1535 1539 Another wrinkle is doing the reverse, figuring out which changeset in
1536 1540 the changegroup a particular filenode or manifestnode belongs to.
1537 1541
1538 1542 The caller can specify some nodes that must be included in the
1539 1543 changegroup using the extranodes argument. It should be a dict
1540 1544 where the keys are the filenames (or 1 for the manifest), and the
1541 1545 values are lists of (node, linknode) tuples, where node is a wanted
1542 1546 node and linknode is the changelog node that should be transmitted as
1543 1547 the linkrev.
1544 1548 """
1545 1549
1546 1550 self.hook('preoutgoing', throw=True, source=source)
1547 1551
1548 1552 # Set up some initial variables
1549 1553 # Make it easy to refer to self.changelog
1550 1554 cl = self.changelog
1551 1555 # msng is short for missing - compute the list of changesets in this
1552 1556 # changegroup.
1553 1557 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1554 1558 self.changegroupinfo(msng_cl_lst, source)
1555 1559 # Some bases may turn out to be superfluous, and some heads may be
1556 1560 # too. nodesbetween will return the minimal set of bases and heads
1557 1561 # necessary to re-create the changegroup.
1558 1562
1559 1563 # Known heads are the list of heads that it is assumed the recipient
1560 1564 # of this changegroup will know about.
1561 1565 knownheads = {}
1562 1566 # We assume that all parents of bases are known heads.
1563 1567 for n in bases:
1564 1568 for p in cl.parents(n):
1565 1569 if p != nullid:
1566 1570 knownheads[p] = 1
1567 1571 knownheads = knownheads.keys()
1568 1572 if knownheads:
1569 1573 # Now that we know what heads are known, we can compute which
1570 1574 # changesets are known. The recipient must know about all
1571 1575 # changesets required to reach the known heads from the null
1572 1576 # changeset.
1573 1577 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1574 1578 junk = None
1575 1579 # Transform the list into an ersatz set.
1576 1580 has_cl_set = dict.fromkeys(has_cl_set)
1577 1581 else:
1578 1582 # If there were no known heads, the recipient cannot be assumed to
1579 1583 # know about any changesets.
1580 1584 has_cl_set = {}
1581 1585
1582 1586 # Make it easy to refer to self.manifest
1583 1587 mnfst = self.manifest
1584 1588 # We don't know which manifests are missing yet
1585 1589 msng_mnfst_set = {}
1586 1590 # Nor do we know which filenodes are missing.
1587 1591 msng_filenode_set = {}
1588 1592
1589 1593 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1590 1594 junk = None
1591 1595
1592 1596 # A changeset always belongs to itself, so the changenode lookup
1593 1597 # function for a changenode is identity.
1594 1598 def identity(x):
1595 1599 return x
1596 1600
1597 1601 # A function generating function. Sets up an environment for the
1598 1602 # inner function.
1599 1603 def cmp_by_rev_func(revlog):
1600 1604 # Compare two nodes by their revision number in the environment's
1601 1605 # revision history. Since the revision number both represents the
1602 1606 # most efficient order to read the nodes in, and represents a
1603 1607 # topological sorting of the nodes, this function is often useful.
1604 1608 def cmp_by_rev(a, b):
1605 1609 return cmp(revlog.rev(a), revlog.rev(b))
1606 1610 return cmp_by_rev
1607 1611
1608 1612 # If we determine that a particular file or manifest node must be a
1609 1613 # node that the recipient of the changegroup will already have, we can
1610 1614 # also assume the recipient will have all the parents. This function
1611 1615 # prunes them from the set of missing nodes.
1612 1616 def prune_parents(revlog, hasset, msngset):
1613 1617 haslst = hasset.keys()
1614 1618 haslst.sort(cmp_by_rev_func(revlog))
1615 1619 for node in haslst:
1616 1620 parentlst = [p for p in revlog.parents(node) if p != nullid]
1617 1621 while parentlst:
1618 1622 n = parentlst.pop()
1619 1623 if n not in hasset:
1620 1624 hasset[n] = 1
1621 1625 p = [p for p in revlog.parents(n) if p != nullid]
1622 1626 parentlst.extend(p)
1623 1627 for n in hasset:
1624 1628 msngset.pop(n, None)
1625 1629
1626 1630 # This is a function generating function used to set up an environment
1627 1631 # for the inner function to execute in.
1628 1632 def manifest_and_file_collector(changedfileset):
1629 1633 # This is an information gathering function that gathers
1630 1634 # information from each changeset node that goes out as part of
1631 1635 # the changegroup. The information gathered is a list of which
1632 1636 # manifest nodes are potentially required (the recipient may
1633 1637 # already have them) and total list of all files which were
1634 1638 # changed in any changeset in the changegroup.
1635 1639 #
1636 1640 # We also remember the first changenode we saw any manifest
1637 1641 # referenced by so we can later determine which changenode 'owns'
1638 1642 # the manifest.
1639 1643 def collect_manifests_and_files(clnode):
1640 1644 c = cl.read(clnode)
1641 1645 for f in c[3]:
1642 1646 # This is to make sure we only have one instance of each
1643 1647 # filename string for each filename.
1644 1648 changedfileset.setdefault(f, f)
1645 1649 msng_mnfst_set.setdefault(c[0], clnode)
1646 1650 return collect_manifests_and_files
1647 1651
1648 1652 # Figure out which manifest nodes (of the ones we think might be part
1649 1653 # of the changegroup) the recipient must know about and remove them
1650 1654 # from the changegroup.
1651 1655 def prune_manifests():
1652 1656 has_mnfst_set = {}
1653 1657 for n in msng_mnfst_set:
1654 1658 # If a 'missing' manifest thinks it belongs to a changenode
1655 1659 # the recipient is assumed to have, obviously the recipient
1656 1660 # must have that manifest.
1657 1661 linknode = cl.node(mnfst.linkrev(n))
1658 1662 if linknode in has_cl_set:
1659 1663 has_mnfst_set[n] = 1
1660 1664 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1661 1665
1662 1666 # Use the information collected in collect_manifests_and_files to say
1663 1667 # which changenode any manifestnode belongs to.
1664 1668 def lookup_manifest_link(mnfstnode):
1665 1669 return msng_mnfst_set[mnfstnode]
1666 1670
1667 1671 # A function generating function that sets up the initial environment
1668 1672 # the inner function.
1669 1673 def filenode_collector(changedfiles):
1670 1674 next_rev = [0]
1671 1675 # This gathers information from each manifestnode included in the
1672 1676 # changegroup about which filenodes the manifest node references
1673 1677 # so we can include those in the changegroup too.
1674 1678 #
1675 1679 # It also remembers which changenode each filenode belongs to. It
1676 1680 # does this by assuming the a filenode belongs to the changenode
1677 1681 # the first manifest that references it belongs to.
1678 1682 def collect_msng_filenodes(mnfstnode):
1679 1683 r = mnfst.rev(mnfstnode)
1680 1684 if r == next_rev[0]:
1681 1685 # If the last rev we looked at was the one just previous,
1682 1686 # we only need to see a diff.
1683 1687 deltamf = mnfst.readdelta(mnfstnode)
1684 1688 # For each line in the delta
1685 1689 for f, fnode in deltamf.items():
1686 1690 f = changedfiles.get(f, None)
1687 1691 # And if the file is in the list of files we care
1688 1692 # about.
1689 1693 if f is not None:
1690 1694 # Get the changenode this manifest belongs to
1691 1695 clnode = msng_mnfst_set[mnfstnode]
1692 1696 # Create the set of filenodes for the file if
1693 1697 # there isn't one already.
1694 1698 ndset = msng_filenode_set.setdefault(f, {})
1695 1699 # And set the filenode's changelog node to the
1696 1700 # manifest's if it hasn't been set already.
1697 1701 ndset.setdefault(fnode, clnode)
1698 1702 else:
1699 1703 # Otherwise we need a full manifest.
1700 1704 m = mnfst.read(mnfstnode)
1701 1705 # For every file in we care about.
1702 1706 for f in changedfiles:
1703 1707 fnode = m.get(f, None)
1704 1708 # If it's in the manifest
1705 1709 if fnode is not None:
1706 1710 # See comments above.
1707 1711 clnode = msng_mnfst_set[mnfstnode]
1708 1712 ndset = msng_filenode_set.setdefault(f, {})
1709 1713 ndset.setdefault(fnode, clnode)
1710 1714 # Remember the revision we hope to see next.
1711 1715 next_rev[0] = r + 1
1712 1716 return collect_msng_filenodes
1713 1717
1714 1718 # We have a list of filenodes we think we need for a file, lets remove
1715 1719 # all those we now the recipient must have.
1716 1720 def prune_filenodes(f, filerevlog):
1717 1721 msngset = msng_filenode_set[f]
1718 1722 hasset = {}
1719 1723 # If a 'missing' filenode thinks it belongs to a changenode we
1720 1724 # assume the recipient must have, then the recipient must have
1721 1725 # that filenode.
1722 1726 for n in msngset:
1723 1727 clnode = cl.node(filerevlog.linkrev(n))
1724 1728 if clnode in has_cl_set:
1725 1729 hasset[n] = 1
1726 1730 prune_parents(filerevlog, hasset, msngset)
1727 1731
1728 1732 # A function generator function that sets up the a context for the
1729 1733 # inner function.
1730 1734 def lookup_filenode_link_func(fname):
1731 1735 msngset = msng_filenode_set[fname]
1732 1736 # Lookup the changenode the filenode belongs to.
1733 1737 def lookup_filenode_link(fnode):
1734 1738 return msngset[fnode]
1735 1739 return lookup_filenode_link
1736 1740
1737 1741 # Add the nodes that were explicitly requested.
1738 1742 def add_extra_nodes(name, nodes):
1739 1743 if not extranodes or name not in extranodes:
1740 1744 return
1741 1745
1742 1746 for node, linknode in extranodes[name]:
1743 1747 if node not in nodes:
1744 1748 nodes[node] = linknode
1745 1749
1746 1750 # Now that we have all theses utility functions to help out and
1747 1751 # logically divide up the task, generate the group.
1748 1752 def gengroup():
1749 1753 # The set of changed files starts empty.
1750 1754 changedfiles = {}
1751 1755 # Create a changenode group generator that will call our functions
1752 1756 # back to lookup the owning changenode and collect information.
1753 1757 group = cl.group(msng_cl_lst, identity,
1754 1758 manifest_and_file_collector(changedfiles))
1755 1759 for chnk in group:
1756 1760 yield chnk
1757 1761
1758 1762 # The list of manifests has been collected by the generator
1759 1763 # calling our functions back.
1760 1764 prune_manifests()
1761 1765 add_extra_nodes(1, msng_mnfst_set)
1762 1766 msng_mnfst_lst = msng_mnfst_set.keys()
1763 1767 # Sort the manifestnodes by revision number.
1764 1768 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1765 1769 # Create a generator for the manifestnodes that calls our lookup
1766 1770 # and data collection functions back.
1767 1771 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1768 1772 filenode_collector(changedfiles))
1769 1773 for chnk in group:
1770 1774 yield chnk
1771 1775
1772 1776 # These are no longer needed, dereference and toss the memory for
1773 1777 # them.
1774 1778 msng_mnfst_lst = None
1775 1779 msng_mnfst_set.clear()
1776 1780
1777 1781 if extranodes:
1778 1782 for fname in extranodes:
1779 1783 if isinstance(fname, int):
1780 1784 continue
1781 1785 add_extra_nodes(fname,
1782 1786 msng_filenode_set.setdefault(fname, {}))
1783 1787 changedfiles[fname] = 1
1784 1788 changedfiles = changedfiles.keys()
1785 1789 changedfiles.sort()
1786 1790 # Go through all our files in order sorted by name.
1787 1791 for fname in changedfiles:
1788 1792 filerevlog = self.file(fname)
1789 1793 if filerevlog.count() == 0:
1790 1794 raise util.Abort(_("empty or missing revlog for %s") % fname)
1791 1795 # Toss out the filenodes that the recipient isn't really
1792 1796 # missing.
1793 1797 if fname in msng_filenode_set:
1794 1798 prune_filenodes(fname, filerevlog)
1795 1799 msng_filenode_lst = msng_filenode_set[fname].keys()
1796 1800 else:
1797 1801 msng_filenode_lst = []
1798 1802 # If any filenodes are left, generate the group for them,
1799 1803 # otherwise don't bother.
1800 1804 if len(msng_filenode_lst) > 0:
1801 1805 yield changegroup.chunkheader(len(fname))
1802 1806 yield fname
1803 1807 # Sort the filenodes by their revision #
1804 1808 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1805 1809 # Create a group generator and only pass in a changenode
1806 1810 # lookup function as we need to collect no information
1807 1811 # from filenodes.
1808 1812 group = filerevlog.group(msng_filenode_lst,
1809 1813 lookup_filenode_link_func(fname))
1810 1814 for chnk in group:
1811 1815 yield chnk
1812 1816 if fname in msng_filenode_set:
1813 1817 # Don't need this anymore, toss it to free memory.
1814 1818 del msng_filenode_set[fname]
1815 1819 # Signal that no more groups are left.
1816 1820 yield changegroup.closechunk()
1817 1821
1818 1822 if msng_cl_lst:
1819 1823 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1820 1824
1821 1825 return util.chunkbuffer(gengroup())
1822 1826
1823 1827 def changegroup(self, basenodes, source):
1824 1828 """Generate a changegroup of all nodes that we have that a recipient
1825 1829 doesn't.
1826 1830
1827 1831 This is much easier than the previous function as we can assume that
1828 1832 the recipient has any changenode we aren't sending them."""
1829 1833
1830 1834 self.hook('preoutgoing', throw=True, source=source)
1831 1835
1832 1836 cl = self.changelog
1833 1837 nodes = cl.nodesbetween(basenodes, None)[0]
1834 1838 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1835 1839 self.changegroupinfo(nodes, source)
1836 1840
1837 1841 def identity(x):
1838 1842 return x
1839 1843
1840 1844 def gennodelst(revlog):
1841 1845 for r in xrange(0, revlog.count()):
1842 1846 n = revlog.node(r)
1843 1847 if revlog.linkrev(n) in revset:
1844 1848 yield n
1845 1849
1846 1850 def changed_file_collector(changedfileset):
1847 1851 def collect_changed_files(clnode):
1848 1852 c = cl.read(clnode)
1849 1853 for fname in c[3]:
1850 1854 changedfileset[fname] = 1
1851 1855 return collect_changed_files
1852 1856
1853 1857 def lookuprevlink_func(revlog):
1854 1858 def lookuprevlink(n):
1855 1859 return cl.node(revlog.linkrev(n))
1856 1860 return lookuprevlink
1857 1861
1858 1862 def gengroup():
1859 1863 # construct a list of all changed files
1860 1864 changedfiles = {}
1861 1865
1862 1866 for chnk in cl.group(nodes, identity,
1863 1867 changed_file_collector(changedfiles)):
1864 1868 yield chnk
1865 1869 changedfiles = changedfiles.keys()
1866 1870 changedfiles.sort()
1867 1871
1868 1872 mnfst = self.manifest
1869 1873 nodeiter = gennodelst(mnfst)
1870 1874 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1871 1875 yield chnk
1872 1876
1873 1877 for fname in changedfiles:
1874 1878 filerevlog = self.file(fname)
1875 1879 if filerevlog.count() == 0:
1876 1880 raise util.Abort(_("empty or missing revlog for %s") % fname)
1877 1881 nodeiter = gennodelst(filerevlog)
1878 1882 nodeiter = list(nodeiter)
1879 1883 if nodeiter:
1880 1884 yield changegroup.chunkheader(len(fname))
1881 1885 yield fname
1882 1886 lookup = lookuprevlink_func(filerevlog)
1883 1887 for chnk in filerevlog.group(nodeiter, lookup):
1884 1888 yield chnk
1885 1889
1886 1890 yield changegroup.closechunk()
1887 1891
1888 1892 if nodes:
1889 1893 self.hook('outgoing', node=hex(nodes[0]), source=source)
1890 1894
1891 1895 return util.chunkbuffer(gengroup())
1892 1896
1893 1897 def addchangegroup(self, source, srctype, url, emptyok=False):
1894 1898 """add changegroup to repo.
1895 1899
1896 1900 return values:
1897 1901 - nothing changed or no source: 0
1898 1902 - more heads than before: 1+added heads (2..n)
1899 1903 - less heads than before: -1-removed heads (-2..-n)
1900 1904 - number of heads stays the same: 1
1901 1905 """
1902 1906 def csmap(x):
1903 1907 self.ui.debug(_("add changeset %s\n") % short(x))
1904 1908 return cl.count()
1905 1909
1906 1910 def revmap(x):
1907 1911 return cl.rev(x)
1908 1912
1909 1913 if not source:
1910 1914 return 0
1911 1915
1912 1916 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1913 1917
1914 1918 changesets = files = revisions = 0
1915 1919
1916 1920 # write changelog data to temp files so concurrent readers will not see
1917 1921 # inconsistent view
1918 1922 cl = self.changelog
1919 1923 cl.delayupdate()
1920 1924 oldheads = len(cl.heads())
1921 1925
1922 1926 tr = self.transaction()
1923 1927 try:
1924 1928 trp = weakref.proxy(tr)
1925 1929 # pull off the changeset group
1926 1930 self.ui.status(_("adding changesets\n"))
1927 1931 cor = cl.count() - 1
1928 1932 chunkiter = changegroup.chunkiter(source)
1929 1933 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1930 1934 raise util.Abort(_("received changelog group is empty"))
1931 1935 cnr = cl.count() - 1
1932 1936 changesets = cnr - cor
1933 1937
1934 1938 # pull off the manifest group
1935 1939 self.ui.status(_("adding manifests\n"))
1936 1940 chunkiter = changegroup.chunkiter(source)
1937 1941 # no need to check for empty manifest group here:
1938 1942 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1939 1943 # no new manifest will be created and the manifest group will
1940 1944 # be empty during the pull
1941 1945 self.manifest.addgroup(chunkiter, revmap, trp)
1942 1946
1943 1947 # process the files
1944 1948 self.ui.status(_("adding file changes\n"))
1945 1949 while 1:
1946 1950 f = changegroup.getchunk(source)
1947 1951 if not f:
1948 1952 break
1949 1953 self.ui.debug(_("adding %s revisions\n") % f)
1950 1954 fl = self.file(f)
1951 1955 o = fl.count()
1952 1956 chunkiter = changegroup.chunkiter(source)
1953 1957 if fl.addgroup(chunkiter, revmap, trp) is None:
1954 1958 raise util.Abort(_("received file revlog group is empty"))
1955 1959 revisions += fl.count() - o
1956 1960 files += 1
1957 1961
1958 1962 # make changelog see real files again
1959 1963 cl.finalize(trp)
1960 1964
1961 1965 newheads = len(self.changelog.heads())
1962 1966 heads = ""
1963 1967 if oldheads and newheads != oldheads:
1964 1968 heads = _(" (%+d heads)") % (newheads - oldheads)
1965 1969
1966 1970 self.ui.status(_("added %d changesets"
1967 1971 " with %d changes to %d files%s\n")
1968 1972 % (changesets, revisions, files, heads))
1969 1973
1970 1974 if changesets > 0:
1971 1975 self.hook('pretxnchangegroup', throw=True,
1972 1976 node=hex(self.changelog.node(cor+1)), source=srctype,
1973 1977 url=url)
1974 1978
1975 1979 tr.close()
1976 1980 finally:
1977 1981 del tr
1978 1982
1979 1983 if changesets > 0:
1980 1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1981 1985 source=srctype, url=url)
1982 1986
1983 1987 for i in xrange(cor + 1, cnr + 1):
1984 1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1985 1989 source=srctype, url=url)
1986 1990
1987 1991 # never return 0 here:
1988 1992 if newheads < oldheads:
1989 1993 return newheads - oldheads - 1
1990 1994 else:
1991 1995 return newheads - oldheads + 1
1992 1996
1993 1997
1994 1998 def stream_in(self, remote):
1995 1999 fp = remote.stream_out()
1996 2000 l = fp.readline()
1997 2001 try:
1998 2002 resp = int(l)
1999 2003 except ValueError:
2000 2004 raise util.UnexpectedOutput(
2001 2005 _('Unexpected response from remote server:'), l)
2002 2006 if resp == 1:
2003 2007 raise util.Abort(_('operation forbidden by server'))
2004 2008 elif resp == 2:
2005 2009 raise util.Abort(_('locking the remote repository failed'))
2006 2010 elif resp != 0:
2007 2011 raise util.Abort(_('the server sent an unknown error code'))
2008 2012 self.ui.status(_('streaming all changes\n'))
2009 2013 l = fp.readline()
2010 2014 try:
2011 2015 total_files, total_bytes = map(int, l.split(' ', 1))
2012 2016 except ValueError, TypeError:
2013 2017 raise util.UnexpectedOutput(
2014 2018 _('Unexpected response from remote server:'), l)
2015 2019 self.ui.status(_('%d files to transfer, %s of data\n') %
2016 2020 (total_files, util.bytecount(total_bytes)))
2017 2021 start = time.time()
2018 2022 for i in xrange(total_files):
2019 2023 # XXX doesn't support '\n' or '\r' in filenames
2020 2024 l = fp.readline()
2021 2025 try:
2022 2026 name, size = l.split('\0', 1)
2023 2027 size = int(size)
2024 2028 except ValueError, TypeError:
2025 2029 raise util.UnexpectedOutput(
2026 2030 _('Unexpected response from remote server:'), l)
2027 2031 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2028 2032 ofp = self.sopener(name, 'w')
2029 2033 for chunk in util.filechunkiter(fp, limit=size):
2030 2034 ofp.write(chunk)
2031 2035 ofp.close()
2032 2036 elapsed = time.time() - start
2033 2037 if elapsed <= 0:
2034 2038 elapsed = 0.001
2035 2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2036 2040 (util.bytecount(total_bytes), elapsed,
2037 2041 util.bytecount(total_bytes / elapsed)))
2038 2042 self.invalidate()
2039 2043 return len(self.heads()) + 1
2040 2044
2041 2045 def clone(self, remote, heads=[], stream=False):
2042 2046 '''clone remote repository.
2043 2047
2044 2048 keyword arguments:
2045 2049 heads: list of revs to clone (forces use of pull)
2046 2050 stream: use streaming clone if possible'''
2047 2051
2048 2052 # now, all clients that can request uncompressed clones can
2049 2053 # read repo formats supported by all servers that can serve
2050 2054 # them.
2051 2055
2052 2056 # if revlog format changes, client will have to check version
2053 2057 # and format flags on "stream" capability, and use
2054 2058 # uncompressed only if compatible.
2055 2059
2056 2060 if stream and not heads and remote.capable('stream'):
2057 2061 return self.stream_in(remote)
2058 2062 return self.pull(remote, heads)
2059 2063
2060 2064 # used to avoid circular references so destructors work
2061 2065 def aftertrans(files):
2062 2066 renamefiles = [tuple(t) for t in files]
2063 2067 def a():
2064 2068 for src, dest in renamefiles:
2065 2069 util.rename(src, dest)
2066 2070 return a
2067 2071
2068 2072 def instance(ui, path, create):
2069 2073 return localrepository(ui, util.drop_scheme('file', path), create)
2070 2074
2071 2075 def islocal(path):
2072 2076 return True
@@ -1,65 +1,101 b''
1 1 #!/bin/sh
2 2
3 3 cat > unix2dos.py <<EOF
4 4 import sys
5 5
6 6 for path in sys.argv[1:]:
7 7 data = file(path, 'rb').read()
8 8 data = data.replace('\n', '\r\n')
9 9 file(path, 'wb').write(data)
10 10 EOF
11 11
12 cat > print.py <<EOF
13 import sys
14 print(sys.stdin.read().replace('\n', '<LF>').replace('\r', '<CR>').replace('\0', '<NUL>'))
15 EOF
16
12 17 hg init
13 18 echo '[hooks]' >> .hg/hgrc
14 19 echo 'pretxncommit.crlf = python:hgext.win32text.forbidcrlf' >> .hg/hgrc
15 20 echo 'pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf' >> .hg/hgrc
16 21 cat .hg/hgrc
17 22 echo
18 23
19 24 echo hello > f
20 25 hg add f
21 26 hg ci -m 1 -d'0 0'
22 27 echo
23 28
24 29 python unix2dos.py f
25 30 hg ci -m 2 -d'0 0'
26 31 hg revert -a
27 32 echo
28 33
29 34 mkdir d
30 35 echo hello > d/f2
31 36 python unix2dos.py d/f2
32 37 hg add d/f2
33 38 hg ci -m 3 -d'0 0'
34 39 hg revert -a
35 40 rm d/f2
36 41 echo
37 42
38 43 hg rem f
39 44 hg ci -m 4 -d'0 0'
40 45 echo
41 46
42 47 python -c 'file("bin", "wb").write("hello\x00\x0D\x0A")'
43 48 hg add bin
44 49 hg ci -m 5 -d'0 0'
45 50 hg log -v
46 51 echo
47 52
48 53 hg clone . dupe
49 54 echo
50 55 for x in a b c d; do echo content > dupe/$x; done
51 56 hg -R dupe add
52 57 python unix2dos.py dupe/b dupe/c dupe/d
53 58 hg -R dupe ci -m a -d'0 0' dupe/a
54 59 hg -R dupe ci -m b/c -d'0 0' dupe/[bc]
55 60 hg -R dupe ci -m d -d'0 0' dupe/d
56 61 hg -R dupe log -v
57 62 echo
58 63
59 64 hg pull dupe
60 65 echo
61 66
62 67 hg log -v
63 68 echo
64 69
65 # XXX missing tests for encode/decode hooks
70 rm .hg/hgrc
71 (echo some; echo text) > f3
72 python -c 'file("f4.bat", "wb").write("rem empty\x0D\x0A")'
73 hg add f3 f4.bat
74 hg ci -m 6 -d'0 0'
75
76 python print.py < bin
77 python print.py < f3
78 python print.py < f4.bat
79 echo
80
81 echo '[extensions]' >> .hg/hgrc
82 echo 'win32text = ' >> .hg/hgrc
83 echo '[decode]' >> .hg/hgrc
84 echo '** = cleverdecode:' >> .hg/hgrc
85 echo '[encode]' >> .hg/hgrc
86 echo '** = cleverencode:' >> .hg/hgrc
87 cat .hg/hgrc
88 echo
89
90 rm f3 f4.bat bin
91 hg co 2>&1 | python -c 'import sys, os; sys.stdout.write(sys.stdin.read().replace(os.getcwd(), "...."))'
92 python print.py < bin
93 python print.py < f3
94 python print.py < f4.bat
95 echo
96
97 python -c 'file("f5.sh", "wb").write("# empty\x0D\x0A")'
98 hg add f5.sh
99 hg ci -m 7 -d'0 0'
100 python print.py < f5.sh
101 hg cat f5.sh | python print.py
@@ -1,157 +1,179 b''
1 1 [hooks]
2 2 pretxncommit.crlf = python:hgext.win32text.forbidcrlf
3 3 pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
4 4
5 5
6 6 Attempt to commit or push text file(s) using CRLF line endings
7 7 in b1aa5cde7ff4: f
8 8 transaction abort!
9 9 rollback completed
10 10 abort: pretxncommit.crlf hook failed
11 11 reverting f
12 12
13 13 Attempt to commit or push text file(s) using CRLF line endings
14 14 in 88b17af74937: d/f2
15 15 transaction abort!
16 16 rollback completed
17 17 abort: pretxncommit.crlf hook failed
18 18 forgetting d/f2
19 19
20 20
21 21 changeset: 2:b67b2dae057a
22 22 tag: tip
23 23 user: test
24 24 date: Thu Jan 01 00:00:00 1970 +0000
25 25 files: bin
26 26 description:
27 27 5
28 28
29 29
30 30 changeset: 1:c72a7d1d0907
31 31 user: test
32 32 date: Thu Jan 01 00:00:00 1970 +0000
33 33 files: f
34 34 description:
35 35 4
36 36
37 37
38 38 changeset: 0:fcf06d5c4e1d
39 39 user: test
40 40 date: Thu Jan 01 00:00:00 1970 +0000
41 41 files: f
42 42 description:
43 43 1
44 44
45 45
46 46
47 47 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 48
49 49 adding dupe/a
50 50 adding dupe/b
51 51 adding dupe/c
52 52 adding dupe/d
53 53 changeset: 5:6e8a7629ff5b
54 54 tag: tip
55 55 user: test
56 56 date: Thu Jan 01 00:00:00 1970 +0000
57 57 files: d
58 58 description:
59 59 d
60 60
61 61
62 62 changeset: 4:ac30a42ce8bc
63 63 user: test
64 64 date: Thu Jan 01 00:00:00 1970 +0000
65 65 files: b c
66 66 description:
67 67 b/c
68 68
69 69
70 70 changeset: 3:a73b85ef1fb7
71 71 user: test
72 72 date: Thu Jan 01 00:00:00 1970 +0000
73 73 files: a
74 74 description:
75 75 a
76 76
77 77
78 78 changeset: 2:b67b2dae057a
79 79 user: test
80 80 date: Thu Jan 01 00:00:00 1970 +0000
81 81 files: bin
82 82 description:
83 83 5
84 84
85 85
86 86 changeset: 1:c72a7d1d0907
87 87 user: test
88 88 date: Thu Jan 01 00:00:00 1970 +0000
89 89 files: f
90 90 description:
91 91 4
92 92
93 93
94 94 changeset: 0:fcf06d5c4e1d
95 95 user: test
96 96 date: Thu Jan 01 00:00:00 1970 +0000
97 97 files: f
98 98 description:
99 99 1
100 100
101 101
102 102
103 103 pulling from dupe
104 104 searching for changes
105 105 adding changesets
106 106 adding manifests
107 107 adding file changes
108 108 added 3 changesets with 4 changes to 4 files
109 109 Attempt to commit or push text file(s) using CRLF line endings
110 110 in ac30a42ce8bc: b
111 111 in ac30a42ce8bc: c
112 112 in 6e8a7629ff5b: d
113 113
114 114 To prevent this mistake in your local repository,
115 115 add to Mercurial.ini or .hg/hgrc:
116 116
117 117 [hooks]
118 118 pretxncommit.crlf = python:hgext.win32text.forbidcrlf
119 119
120 120 and also consider adding:
121 121
122 122 [extensions]
123 123 hgext.win32text =
124 124 [encode]
125 125 ** = cleverencode:
126 126 [decode]
127 127 ** = cleverdecode:
128 128 transaction abort!
129 129 rollback completed
130 130 abort: pretxnchangegroup.crlf hook failed
131 131
132 132 changeset: 2:b67b2dae057a
133 133 tag: tip
134 134 user: test
135 135 date: Thu Jan 01 00:00:00 1970 +0000
136 136 files: bin
137 137 description:
138 138 5
139 139
140 140
141 141 changeset: 1:c72a7d1d0907
142 142 user: test
143 143 date: Thu Jan 01 00:00:00 1970 +0000
144 144 files: f
145 145 description:
146 146 4
147 147
148 148
149 149 changeset: 0:fcf06d5c4e1d
150 150 user: test
151 151 date: Thu Jan 01 00:00:00 1970 +0000
152 152 files: f
153 153 description:
154 154 1
155 155
156 156
157 157
158 hello<NUL><CR><LF>
159 some<LF>text<LF>
160 rem empty<CR><LF>
161
162 [extensions]
163 win32text =
164 [decode]
165 ** = cleverdecode:
166 [encode]
167 ** = cleverencode:
168
169 WARNING: f4.bat already has CRLF line endings
170 and does not need EOL conversion by the win32text plugin.
171 Before your next commit, please reconsider your encode/decode settings in
172 Mercurial.ini or ..../.hg/hgrc.
173 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 hello<NUL><CR><LF>
175 some<CR><LF>text<CR><LF>
176 rem empty<CR><LF>
177
178 # empty<CR><LF>
179 # empty<LF>
General Comments 0
You need to be logged in to leave comments. Login now