##// END OF EJS Templates
symlinks: use is_link wherever is_exec is used
Matt Mackall -
r4002:d7b9ec58 default
parent child Browse files
Show More
@@ -1,517 +1,518 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import ancestor, bdiff, repo, revlog, util, os, errno
11 11
12 12 class changectx(object):
13 13 """A changecontext object makes access to data related to a particular
14 14 changeset convenient."""
15 15 def __init__(self, repo, changeid=None):
16 16 """changeid is a revision number, node, or tag"""
17 17 self._repo = repo
18 18
19 19 if not changeid and changeid != 0:
20 20 p1, p2 = self._repo.dirstate.parents()
21 21 self._rev = self._repo.changelog.rev(p1)
22 22 if self._rev == -1:
23 23 changeid = 'tip'
24 24 else:
25 25 self._node = p1
26 26 return
27 27
28 28 self._node = self._repo.lookup(changeid)
29 29 self._rev = self._repo.changelog.rev(self._node)
30 30
31 31 def __str__(self):
32 32 return short(self.node())
33 33
34 34 def __repr__(self):
35 35 return "<changectx %s>" % str(self)
36 36
37 37 def __eq__(self, other):
38 38 try:
39 39 return self._rev == other._rev
40 40 except AttributeError:
41 41 return False
42 42
43 43 def __nonzero__(self):
44 44 return self._rev != nullrev
45 45
46 46 def __getattr__(self, name):
47 47 if name == '_changeset':
48 48 self._changeset = self._repo.changelog.read(self.node())
49 49 return self._changeset
50 50 elif name == '_manifest':
51 51 self._manifest = self._repo.manifest.read(self._changeset[0])
52 52 return self._manifest
53 53 elif name == '_manifestdelta':
54 54 md = self._repo.manifest.readdelta(self._changeset[0])
55 55 self._manifestdelta = md
56 56 return self._manifestdelta
57 57 else:
58 58 raise AttributeError, name
59 59
60 60 def changeset(self): return self._changeset
61 61 def manifest(self): return self._manifest
62 62
63 63 def rev(self): return self._rev
64 64 def node(self): return self._node
65 65 def user(self): return self._changeset[1]
66 66 def date(self): return self._changeset[2]
67 67 def files(self): return self._changeset[3]
68 68 def description(self): return self._changeset[4]
69 69 def branch(self): return self._changeset[5].get("branch", "")
70 70
71 71 def parents(self):
72 72 """return contexts for each parent changeset"""
73 73 p = self._repo.changelog.parents(self._node)
74 74 return [changectx(self._repo, x) for x in p]
75 75
76 76 def children(self):
77 77 """return contexts for each child changeset"""
78 78 c = self._repo.changelog.children(self._node)
79 79 return [changectx(self._repo, x) for x in c]
80 80
81 81 def filenode(self, path):
82 82 if '_manifest' in self.__dict__:
83 83 try:
84 84 return self._manifest[path]
85 85 except KeyError:
86 86 raise revlog.LookupError(_("'%s' not found in manifest") % path)
87 87 if '_manifestdelta' in self.__dict__ or path in self.files():
88 88 if path in self._manifestdelta:
89 89 return self._manifestdelta[path]
90 90 node, flag = self._repo.manifest.find(self._changeset[0], path)
91 91 if not node:
92 92 raise revlog.LookupError(_("'%s' not found in manifest") % path)
93 93
94 94 return node
95 95
96 96 def filectx(self, path, fileid=None, filelog=None):
97 97 """get a file context from this changeset"""
98 98 if fileid is None:
99 99 fileid = self.filenode(path)
100 100 return filectx(self._repo, path, fileid=fileid,
101 101 changectx=self, filelog=filelog)
102 102
103 103 def filectxs(self):
104 104 """generate a file context for each file in this changeset's
105 105 manifest"""
106 106 mf = self.manifest()
107 107 m = mf.keys()
108 108 m.sort()
109 109 for f in m:
110 110 yield self.filectx(f, fileid=mf[f])
111 111
112 112 def ancestor(self, c2):
113 113 """
114 114 return the ancestor context of self and c2
115 115 """
116 116 n = self._repo.changelog.ancestor(self._node, c2._node)
117 117 return changectx(self._repo, n)
118 118
119 119 class filectx(object):
120 120 """A filecontext object makes access to data related to a particular
121 121 filerevision convenient."""
122 122 def __init__(self, repo, path, changeid=None, fileid=None,
123 123 filelog=None, changectx=None):
124 124 """changeid can be a changeset revision, node, or tag.
125 125 fileid can be a file revision or node."""
126 126 self._repo = repo
127 127 self._path = path
128 128
129 129 assert (changeid is not None
130 130 or fileid is not None
131 131 or changectx is not None)
132 132
133 133 if filelog:
134 134 self._filelog = filelog
135 135
136 136 if fileid is None:
137 137 if changectx is None:
138 138 self._changeid = changeid
139 139 else:
140 140 self._changectx = changectx
141 141 else:
142 142 self._fileid = fileid
143 143
144 144 def __getattr__(self, name):
145 145 if name == '_changectx':
146 146 self._changectx = changectx(self._repo, self._changeid)
147 147 return self._changectx
148 148 elif name == '_filelog':
149 149 self._filelog = self._repo.file(self._path)
150 150 return self._filelog
151 151 elif name == '_changeid':
152 152 self._changeid = self._filelog.linkrev(self._filenode)
153 153 return self._changeid
154 154 elif name == '_filenode':
155 155 if '_fileid' in self.__dict__:
156 156 self._filenode = self._filelog.lookup(self._fileid)
157 157 else:
158 158 self._filenode = self._changectx.filenode(self._path)
159 159 return self._filenode
160 160 elif name == '_filerev':
161 161 self._filerev = self._filelog.rev(self._filenode)
162 162 return self._filerev
163 163 else:
164 164 raise AttributeError, name
165 165
166 166 def __nonzero__(self):
167 167 try:
168 168 n = self._filenode
169 169 return True
170 170 except revlog.LookupError:
171 171 # file is missing
172 172 return False
173 173
174 174 def __str__(self):
175 175 return "%s@%s" % (self.path(), short(self.node()))
176 176
177 177 def __repr__(self):
178 178 return "<filectx %s>" % str(self)
179 179
180 180 def __eq__(self, other):
181 181 try:
182 182 return (self._path == other._path
183 183 and self._changeid == other._changeid)
184 184 except AttributeError:
185 185 return False
186 186
187 187 def filectx(self, fileid):
188 188 '''opens an arbitrary revision of the file without
189 189 opening a new filelog'''
190 190 return filectx(self._repo, self._path, fileid=fileid,
191 191 filelog=self._filelog)
192 192
193 193 def filerev(self): return self._filerev
194 194 def filenode(self): return self._filenode
195 195 def filelog(self): return self._filelog
196 196
197 197 def rev(self):
198 198 if '_changectx' in self.__dict__:
199 199 return self._changectx.rev()
200 200 return self._filelog.linkrev(self._filenode)
201 201
202 202 def node(self): return self._changectx.node()
203 203 def user(self): return self._changectx.user()
204 204 def date(self): return self._changectx.date()
205 205 def files(self): return self._changectx.files()
206 206 def description(self): return self._changectx.description()
207 207 def branch(self): return self._changectx.branch()
208 208 def manifest(self): return self._changectx.manifest()
209 209 def changectx(self): return self._changectx
210 210
211 211 def data(self): return self._filelog.read(self._filenode)
212 212 def renamed(self): return self._filelog.renamed(self._filenode)
213 213 def path(self): return self._path
214 214 def size(self): return self._filelog.size(self._filerev)
215 215
216 216 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
217 217
218 218 def parents(self):
219 219 p = self._path
220 220 fl = self._filelog
221 221 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
222 222
223 223 r = self.renamed()
224 224 if r:
225 225 pl[0] = (r[0], r[1], None)
226 226
227 227 return [filectx(self._repo, p, fileid=n, filelog=l)
228 228 for p,n,l in pl if n != nullid]
229 229
230 230 def children(self):
231 231 # hard for renames
232 232 c = self._filelog.children(self._filenode)
233 233 return [filectx(self._repo, self._path, fileid=x,
234 234 filelog=self._filelog) for x in c]
235 235
236 236 def annotate(self, follow=False):
237 237 '''returns a list of tuples of (ctx, line) for each line
238 238 in the file, where ctx is the filectx of the node where
239 239 that line was last changed'''
240 240
241 241 def decorate(text, rev):
242 242 return ([rev] * len(text.splitlines()), text)
243 243
244 244 def pair(parent, child):
245 245 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
246 246 child[0][b1:b2] = parent[0][a1:a2]
247 247 return child
248 248
249 249 getlog = util.cachefunc(lambda x: self._repo.file(x))
250 250 def getctx(path, fileid):
251 251 log = path == self._path and self._filelog or getlog(path)
252 252 return filectx(self._repo, path, fileid=fileid, filelog=log)
253 253 getctx = util.cachefunc(getctx)
254 254
255 255 def parents(f):
256 256 # we want to reuse filectx objects as much as possible
257 257 p = f._path
258 258 if f._filerev is None: # working dir
259 259 pl = [(n.path(), n.filerev()) for n in f.parents()]
260 260 else:
261 261 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
262 262
263 263 if follow:
264 264 r = f.renamed()
265 265 if r:
266 266 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
267 267
268 268 return [getctx(p, n) for p, n in pl if n != nullrev]
269 269
270 270 # use linkrev to find the first changeset where self appeared
271 271 if self.rev() != self._filelog.linkrev(self._filenode):
272 272 base = self.filectx(self.filerev())
273 273 else:
274 274 base = self
275 275
276 276 # find all ancestors
277 277 needed = {base: 1}
278 278 visit = [base]
279 279 files = [base._path]
280 280 while visit:
281 281 f = visit.pop(0)
282 282 for p in parents(f):
283 283 if p not in needed:
284 284 needed[p] = 1
285 285 visit.append(p)
286 286 if p._path not in files:
287 287 files.append(p._path)
288 288 else:
289 289 # count how many times we'll use this
290 290 needed[p] += 1
291 291
292 292 # sort by revision (per file) which is a topological order
293 293 visit = []
294 294 files.reverse()
295 295 for f in files:
296 296 fn = [(n._filerev, n) for n in needed.keys() if n._path == f]
297 297 fn.sort()
298 298 visit.extend(fn)
299 299 hist = {}
300 300
301 301 for r, f in visit:
302 302 curr = decorate(f.data(), f)
303 303 for p in parents(f):
304 304 if p != nullid:
305 305 curr = pair(hist[p], curr)
306 306 # trim the history of unneeded revs
307 307 needed[p] -= 1
308 308 if not needed[p]:
309 309 del hist[p]
310 310 hist[f] = curr
311 311
312 312 return zip(hist[f][0], hist[f][1].splitlines(1))
313 313
314 314 def ancestor(self, fc2):
315 315 """
316 316 find the common ancestor file context, if any, of self, and fc2
317 317 """
318 318
319 319 acache = {}
320 320
321 321 # prime the ancestor cache for the working directory
322 322 for c in (self, fc2):
323 323 if c._filerev == None:
324 324 pl = [(n.path(), n.filenode()) for n in c.parents()]
325 325 acache[(c._path, None)] = pl
326 326
327 327 flcache = {self._path:self._filelog, fc2._path:fc2._filelog}
328 328 def parents(vertex):
329 329 if vertex in acache:
330 330 return acache[vertex]
331 331 f, n = vertex
332 332 if f not in flcache:
333 333 flcache[f] = self._repo.file(f)
334 334 fl = flcache[f]
335 335 pl = [(f, p) for p in fl.parents(n) if p != nullid]
336 336 re = fl.renamed(n)
337 337 if re:
338 338 pl.append(re)
339 339 acache[vertex] = pl
340 340 return pl
341 341
342 342 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
343 343 v = ancestor.ancestor(a, b, parents)
344 344 if v:
345 345 f, n = v
346 346 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
347 347
348 348 return None
349 349
350 350 class workingctx(changectx):
351 351 """A workingctx object makes access to data related to
352 352 the current working directory convenient."""
353 353 def __init__(self, repo):
354 354 self._repo = repo
355 355 self._rev = None
356 356 self._node = None
357 357
358 358 def __str__(self):
359 359 return str(self._parents[0]) + "+"
360 360
361 361 def __nonzero__(self):
362 362 return True
363 363
364 364 def __getattr__(self, name):
365 365 if name == '_parents':
366 366 self._parents = self._repo.parents()
367 367 return self._parents
368 368 if name == '_status':
369 369 self._status = self._repo.status()
370 370 return self._status
371 371 if name == '_manifest':
372 372 self._buildmanifest()
373 373 return self._manifest
374 374 else:
375 375 raise AttributeError, name
376 376
377 377 def _buildmanifest(self):
378 378 """generate a manifest corresponding to the working directory"""
379 379
380 380 man = self._parents[0].manifest().copy()
381 381 is_exec = util.execfunc(self._repo.root, man.execf)
382 is_link = util.linkfunc(self._repo.root, man.linkf)
382 383 copied = self._repo.dirstate.copies()
383 384 modified, added, removed, deleted, unknown = self._status[:5]
384 385 for i, l in (("a", added), ("m", modified), ("u", unknown)):
385 386 for f in l:
386 387 man[f] = man.get(copied.get(f, f), nullid) + i
387 388 try:
388 man.set(f, is_exec(f))
389 man.set(f, is_exec(f), is_link(f))
389 390 except OSError:
390 391 pass
391 392
392 393 for f in deleted + removed:
393 394 if f in man:
394 395 del man[f]
395 396
396 397 self._manifest = man
397 398
398 399 def manifest(self): return self._manifest
399 400
400 401 def user(self): return self._repo.ui.username()
401 402 def date(self): return util.makedate()
402 403 def description(self): return ""
403 404 def files(self):
404 405 f = self.modified() + self.added() + self.removed()
405 406 f.sort()
406 407 return f
407 408
408 409 def modified(self): return self._status[0]
409 410 def added(self): return self._status[1]
410 411 def removed(self): return self._status[2]
411 412 def deleted(self): return self._status[3]
412 413 def unknown(self): return self._status[4]
413 414 def clean(self): return self._status[5]
414 415 def branch(self):
415 416 try:
416 417 return self._repo.opener("branch").read().strip()
417 418 except IOError:
418 419 return ""
419 420
420 421 def parents(self):
421 422 """return contexts for each parent changeset"""
422 423 return self._parents
423 424
424 425 def children(self):
425 426 return []
426 427
427 428 def filectx(self, path, filelog=None):
428 429 """get a file context from the working directory"""
429 430 return workingfilectx(self._repo, path, workingctx=self,
430 431 filelog=filelog)
431 432
432 433 def ancestor(self, c2):
433 434 """return the ancestor context of self and c2"""
434 435 return self._parents[0].ancestor(c2) # punt on two parents for now
435 436
436 437 class workingfilectx(filectx):
437 438 """A workingfilectx object makes access to data related to a particular
438 439 file in the working directory convenient."""
439 440 def __init__(self, repo, path, filelog=None, workingctx=None):
440 441 """changeid can be a changeset revision, node, or tag.
441 442 fileid can be a file revision or node."""
442 443 self._repo = repo
443 444 self._path = path
444 445 self._changeid = None
445 446 self._filerev = self._filenode = None
446 447
447 448 if filelog:
448 449 self._filelog = filelog
449 450 if workingctx:
450 451 self._changectx = workingctx
451 452
452 453 def __getattr__(self, name):
453 454 if name == '_changectx':
454 455 self._changectx = workingctx(repo)
455 456 return self._changectx
456 457 elif name == '_repopath':
457 458 self._repopath = (self._repo.dirstate.copied(self._path)
458 459 or self._path)
459 460 return self._repopath
460 461 elif name == '_filelog':
461 462 self._filelog = self._repo.file(self._repopath)
462 463 return self._filelog
463 464 else:
464 465 raise AttributeError, name
465 466
466 467 def __nonzero__(self):
467 468 return True
468 469
469 470 def __str__(self):
470 471 return "%s@%s" % (self.path(), self._changectx)
471 472
472 473 def filectx(self, fileid):
473 474 '''opens an arbitrary revision of the file without
474 475 opening a new filelog'''
475 476 return filectx(self._repo, self._repopath, fileid=fileid,
476 477 filelog=self._filelog)
477 478
478 479 def rev(self):
479 480 if '_changectx' in self.__dict__:
480 481 return self._changectx.rev()
481 482 return self._filelog.linkrev(self._filenode)
482 483
483 484 def data(self): return self._repo.wread(self._path)
484 485 def renamed(self):
485 486 rp = self._repopath
486 487 if rp == self._path:
487 488 return None
488 489 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
489 490
490 491 def parents(self):
491 492 '''return parent filectxs, following copies if necessary'''
492 493 p = self._path
493 494 rp = self._repopath
494 495 pcl = self._changectx._parents
495 496 fl = self._filelog
496 497 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
497 498 if len(pcl) > 1:
498 499 if rp != p:
499 500 fl = None
500 501 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
501 502
502 503 return [filectx(self._repo, p, fileid=n, filelog=l)
503 504 for p,n,l in pl if n != nullid]
504 505
505 506 def children(self):
506 507 return []
507 508
508 509 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
509 510 def date(self):
510 511 t, tz = self._changectx.date()
511 512 try:
512 513 return (os.lstat(repo.wjoin(self._path)).st_mtime, tz)
513 514 except OSError, err:
514 515 if err.errno != errno.ENOENT: raise
515 516 return (t, tz)
516 517
517 518 def cmp(self, text): return self._repo.wread(self._path) == text
@@ -1,1867 +1,1869 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 os.mkdir(os.path.join(self.path, "store"))
45 45 requirements = ("revlogv1", "store")
46 46 reqfile = self.opener("requires", "w")
47 47 for r in requirements:
48 48 reqfile.write("%s\n" % r)
49 49 reqfile.close()
50 50 # create an invalid changelog
51 51 self.opener("00changelog.i", "a").write(
52 52 '\0\0\0\2' # represents revlogv2
53 53 ' dummy changelog to prevent using the old repo layout'
54 54 )
55 55 else:
56 56 raise repo.RepoError(_("repository %s not found") % path)
57 57 elif create:
58 58 raise repo.RepoError(_("repository %s already exists") % path)
59 59 else:
60 60 # find requirements
61 61 try:
62 62 requirements = self.opener("requires").read().splitlines()
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66 requirements = []
67 67 # check them
68 68 for r in requirements:
69 69 if r not in self.supported:
70 70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71 71
72 72 # setup store
73 73 if "store" in requirements:
74 74 self.encodefn = util.encodefilename
75 75 self.decodefn = util.decodefilename
76 76 self.spath = os.path.join(self.path, "store")
77 77 else:
78 78 self.encodefn = lambda x: x
79 79 self.decodefn = lambda x: x
80 80 self.spath = self.path
81 81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82 82
83 83 self.ui = ui.ui(parentui=parentui)
84 84 try:
85 85 self.ui.readconfig(self.join("hgrc"), self.root)
86 86 except IOError:
87 87 pass
88 88
89 89 v = self.ui.configrevlog()
90 90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 92 fl = v.get('flags', None)
93 93 flags = 0
94 94 if fl != None:
95 95 for x in fl.split():
96 96 flags |= revlog.flagstr(x)
97 97 elif self.revlogv1:
98 98 flags = revlog.REVLOG_DEFAULT_FLAGS
99 99
100 100 v = self.revlogversion | flags
101 101 self.manifest = manifest.manifest(self.sopener, v)
102 102 self.changelog = changelog.changelog(self.sopener, v)
103 103
104 104 fallback = self.ui.config('ui', 'fallbackencoding')
105 105 if fallback:
106 106 util._fallbackencoding = fallback
107 107
108 108 # the changelog might not have the inline index flag
109 109 # on. If the format of the changelog is the same as found in
110 110 # .hgrc, apply any flags found in the .hgrc as well.
111 111 # Otherwise, just version from the changelog
112 112 v = self.changelog.version
113 113 if v == self.revlogversion:
114 114 v |= flags
115 115 self.revlogversion = v
116 116
117 117 self.tagscache = None
118 118 self.branchcache = None
119 119 self.nodetagscache = None
120 120 self.encodepats = None
121 121 self.decodepats = None
122 122 self.transhandle = None
123 123
124 124 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
125 125
126 126 def url(self):
127 127 return 'file:' + self.root
128 128
129 129 def hook(self, name, throw=False, **args):
130 130 def callhook(hname, funcname):
131 131 '''call python hook. hook is callable object, looked up as
132 132 name in python module. if callable returns "true", hook
133 133 fails, else passes. if hook raises exception, treated as
134 134 hook failure. exception propagates if throw is "true".
135 135
136 136 reason for "true" meaning "hook failed" is so that
137 137 unmodified commands (e.g. mercurial.commands.update) can
138 138 be run as hooks without wrappers to convert return values.'''
139 139
140 140 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
141 141 d = funcname.rfind('.')
142 142 if d == -1:
143 143 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
144 144 % (hname, funcname))
145 145 modname = funcname[:d]
146 146 try:
147 147 obj = __import__(modname)
148 148 except ImportError:
149 149 try:
150 150 # extensions are loaded with hgext_ prefix
151 151 obj = __import__("hgext_%s" % modname)
152 152 except ImportError:
153 153 raise util.Abort(_('%s hook is invalid '
154 154 '(import of "%s" failed)') %
155 155 (hname, modname))
156 156 try:
157 157 for p in funcname.split('.')[1:]:
158 158 obj = getattr(obj, p)
159 159 except AttributeError, err:
160 160 raise util.Abort(_('%s hook is invalid '
161 161 '("%s" is not defined)') %
162 162 (hname, funcname))
163 163 if not callable(obj):
164 164 raise util.Abort(_('%s hook is invalid '
165 165 '("%s" is not callable)') %
166 166 (hname, funcname))
167 167 try:
168 168 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
169 169 except (KeyboardInterrupt, util.SignalInterrupt):
170 170 raise
171 171 except Exception, exc:
172 172 if isinstance(exc, util.Abort):
173 173 self.ui.warn(_('error: %s hook failed: %s\n') %
174 174 (hname, exc.args[0]))
175 175 else:
176 176 self.ui.warn(_('error: %s hook raised an exception: '
177 177 '%s\n') % (hname, exc))
178 178 if throw:
179 179 raise
180 180 self.ui.print_exc()
181 181 return True
182 182 if r:
183 183 if throw:
184 184 raise util.Abort(_('%s hook failed') % hname)
185 185 self.ui.warn(_('warning: %s hook failed\n') % hname)
186 186 return r
187 187
188 188 def runhook(name, cmd):
189 189 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
190 190 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
191 191 r = util.system(cmd, environ=env, cwd=self.root)
192 192 if r:
193 193 desc, r = util.explain_exit(r)
194 194 if throw:
195 195 raise util.Abort(_('%s hook %s') % (name, desc))
196 196 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
197 197 return r
198 198
199 199 r = False
200 200 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
201 201 if hname.split(".", 1)[0] == name and cmd]
202 202 hooks.sort()
203 203 for hname, cmd in hooks:
204 204 if cmd.startswith('python:'):
205 205 r = callhook(hname, cmd[7:].strip()) or r
206 206 else:
207 207 r = runhook(hname, cmd) or r
208 208 return r
209 209
210 210 tag_disallowed = ':\r\n'
211 211
212 212 def tag(self, name, node, message, local, user, date):
213 213 '''tag a revision with a symbolic name.
214 214
215 215 if local is True, the tag is stored in a per-repository file.
216 216 otherwise, it is stored in the .hgtags file, and a new
217 217 changeset is committed with the change.
218 218
219 219 keyword arguments:
220 220
221 221 local: whether to store tag in non-version-controlled file
222 222 (default False)
223 223
224 224 message: commit message to use if committing
225 225
226 226 user: name of user to use if committing
227 227
228 228 date: date tuple to use if committing'''
229 229
230 230 for c in self.tag_disallowed:
231 231 if c in name:
232 232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233 233
234 234 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
235 235
236 236 if local:
237 237 # local tags are stored in the current charset
238 238 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
239 239 self.hook('tag', node=hex(node), tag=name, local=local)
240 240 return
241 241
242 242 for x in self.status()[:5]:
243 243 if '.hgtags' in x:
244 244 raise util.Abort(_('working copy of .hgtags is changed '
245 245 '(please commit .hgtags manually)'))
246 246
247 247 # committed tags are stored in UTF-8
248 248 line = '%s %s\n' % (hex(node), util.fromlocal(name))
249 249 self.wfile('.hgtags', 'ab').write(line)
250 250 if self.dirstate.state('.hgtags') == '?':
251 251 self.add(['.hgtags'])
252 252
253 253 self.commit(['.hgtags'], message, user, date)
254 254 self.hook('tag', node=hex(node), tag=name, local=local)
255 255
256 256 def tags(self):
257 257 '''return a mapping of tag to node'''
258 258 if not self.tagscache:
259 259 self.tagscache = {}
260 260
261 261 def parsetag(line, context):
262 262 if not line:
263 263 return
264 264 s = l.split(" ", 1)
265 265 if len(s) != 2:
266 266 self.ui.warn(_("%s: cannot parse entry\n") % context)
267 267 return
268 268 node, key = s
269 269 key = util.tolocal(key.strip()) # stored in UTF-8
270 270 try:
271 271 bin_n = bin(node)
272 272 except TypeError:
273 273 self.ui.warn(_("%s: node '%s' is not well formed\n") %
274 274 (context, node))
275 275 return
276 276 if bin_n not in self.changelog.nodemap:
277 277 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
278 278 (context, key))
279 279 return
280 280 self.tagscache[key] = bin_n
281 281
282 282 # read the tags file from each head, ending with the tip,
283 283 # and add each tag found to the map, with "newer" ones
284 284 # taking precedence
285 285 f = None
286 286 for rev, node, fnode in self._hgtagsnodes():
287 287 f = (f and f.filectx(fnode) or
288 288 self.filectx('.hgtags', fileid=fnode))
289 289 count = 0
290 290 for l in f.data().splitlines():
291 291 count += 1
292 292 parsetag(l, _("%s, line %d") % (str(f), count))
293 293
294 294 try:
295 295 f = self.opener("localtags")
296 296 count = 0
297 297 for l in f:
298 298 # localtags are stored in the local character set
299 299 # while the internal tag table is stored in UTF-8
300 300 l = util.fromlocal(l)
301 301 count += 1
302 302 parsetag(l, _("localtags, line %d") % count)
303 303 except IOError:
304 304 pass
305 305
306 306 self.tagscache['tip'] = self.changelog.tip()
307 307
308 308 return self.tagscache
309 309
310 310 def _hgtagsnodes(self):
311 311 heads = self.heads()
312 312 heads.reverse()
313 313 last = {}
314 314 ret = []
315 315 for node in heads:
316 316 c = self.changectx(node)
317 317 rev = c.rev()
318 318 try:
319 319 fnode = c.filenode('.hgtags')
320 320 except revlog.LookupError:
321 321 continue
322 322 ret.append((rev, node, fnode))
323 323 if fnode in last:
324 324 ret[last[fnode]] = None
325 325 last[fnode] = len(ret) - 1
326 326 return [item for item in ret if item]
327 327
328 328 def tagslist(self):
329 329 '''return a list of tags ordered by revision'''
330 330 l = []
331 331 for t, n in self.tags().items():
332 332 try:
333 333 r = self.changelog.rev(n)
334 334 except:
335 335 r = -2 # sort to the beginning of the list if unknown
336 336 l.append((r, t, n))
337 337 l.sort()
338 338 return [(t, n) for r, t, n in l]
339 339
340 340 def nodetags(self, node):
341 341 '''return the tags associated with a node'''
342 342 if not self.nodetagscache:
343 343 self.nodetagscache = {}
344 344 for t, n in self.tags().items():
345 345 self.nodetagscache.setdefault(n, []).append(t)
346 346 return self.nodetagscache.get(node, [])
347 347
348 348 def _branchtags(self):
349 349 partial, last, lrev = self._readbranchcache()
350 350
351 351 tiprev = self.changelog.count() - 1
352 352 if lrev != tiprev:
353 353 self._updatebranchcache(partial, lrev+1, tiprev+1)
354 354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
355 355
356 356 return partial
357 357
358 358 def branchtags(self):
359 359 if self.branchcache is not None:
360 360 return self.branchcache
361 361
362 362 self.branchcache = {} # avoid recursion in changectx
363 363 partial = self._branchtags()
364 364
365 365 # the branch cache is stored on disk as UTF-8, but in the local
366 366 # charset internally
367 367 for k, v in partial.items():
368 368 self.branchcache[util.tolocal(k)] = v
369 369 return self.branchcache
370 370
371 371 def _readbranchcache(self):
372 372 partial = {}
373 373 try:
374 374 f = self.opener("branches.cache")
375 375 lines = f.read().split('\n')
376 376 f.close()
377 377 last, lrev = lines.pop(0).rstrip().split(" ", 1)
378 378 last, lrev = bin(last), int(lrev)
379 379 if not (lrev < self.changelog.count() and
380 380 self.changelog.node(lrev) == last): # sanity check
381 381 # invalidate the cache
382 382 raise ValueError('Invalid branch cache: unknown tip')
383 383 for l in lines:
384 384 if not l: continue
385 385 node, label = l.rstrip().split(" ", 1)
386 386 partial[label] = bin(node)
387 387 except (KeyboardInterrupt, util.SignalInterrupt):
388 388 raise
389 389 except Exception, inst:
390 390 if self.ui.debugflag:
391 391 self.ui.warn(str(inst), '\n')
392 392 partial, last, lrev = {}, nullid, nullrev
393 393 return partial, last, lrev
394 394
395 395 def _writebranchcache(self, branches, tip, tiprev):
396 396 try:
397 397 f = self.opener("branches.cache", "w")
398 398 f.write("%s %s\n" % (hex(tip), tiprev))
399 399 for label, node in branches.iteritems():
400 400 f.write("%s %s\n" % (hex(node), label))
401 401 except IOError:
402 402 pass
403 403
404 404 def _updatebranchcache(self, partial, start, end):
405 405 for r in xrange(start, end):
406 406 c = self.changectx(r)
407 407 b = c.branch()
408 408 if b:
409 409 partial[b] = c.node()
410 410
411 411 def lookup(self, key):
412 412 if key == '.':
413 413 key = self.dirstate.parents()[0]
414 414 if key == nullid:
415 415 raise repo.RepoError(_("no revision checked out"))
416 416 elif key == 'null':
417 417 return nullid
418 418 n = self.changelog._match(key)
419 419 if n:
420 420 return n
421 421 if key in self.tags():
422 422 return self.tags()[key]
423 423 if key in self.branchtags():
424 424 return self.branchtags()[key]
425 425 n = self.changelog._partialmatch(key)
426 426 if n:
427 427 return n
428 428 raise repo.RepoError(_("unknown revision '%s'") % key)
429 429
430 430 def dev(self):
431 431 return os.lstat(self.path).st_dev
432 432
433 433 def local(self):
434 434 return True
435 435
436 436 def join(self, f):
437 437 return os.path.join(self.path, f)
438 438
439 439 def sjoin(self, f):
440 440 f = self.encodefn(f)
441 441 return os.path.join(self.spath, f)
442 442
443 443 def wjoin(self, f):
444 444 return os.path.join(self.root, f)
445 445
446 446 def file(self, f):
447 447 if f[0] == '/':
448 448 f = f[1:]
449 449 return filelog.filelog(self.sopener, f, self.revlogversion)
450 450
451 451 def changectx(self, changeid=None):
452 452 return context.changectx(self, changeid)
453 453
454 454 def workingctx(self):
455 455 return context.workingctx(self)
456 456
457 457 def parents(self, changeid=None):
458 458 '''
459 459 get list of changectxs for parents of changeid or working directory
460 460 '''
461 461 if changeid is None:
462 462 pl = self.dirstate.parents()
463 463 else:
464 464 n = self.changelog.lookup(changeid)
465 465 pl = self.changelog.parents(n)
466 466 if pl[1] == nullid:
467 467 return [self.changectx(pl[0])]
468 468 return [self.changectx(pl[0]), self.changectx(pl[1])]
469 469
470 470 def filectx(self, path, changeid=None, fileid=None):
471 471 """changeid can be a changeset revision, node, or tag.
472 472 fileid can be a file revision or node."""
473 473 return context.filectx(self, path, changeid, fileid)
474 474
475 475 def getcwd(self):
476 476 return self.dirstate.getcwd()
477 477
478 478 def wfile(self, f, mode='r'):
479 479 return self.wopener(f, mode)
480 480
481 481 def wread(self, filename):
482 482 if self.encodepats == None:
483 483 l = []
484 484 for pat, cmd in self.ui.configitems("encode"):
485 485 mf = util.matcher(self.root, "", [pat], [], [])[1]
486 486 l.append((mf, cmd))
487 487 self.encodepats = l
488 488
489 489 data = self.wopener(filename, 'r').read()
490 490
491 491 for mf, cmd in self.encodepats:
492 492 if mf(filename):
493 493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 494 data = util.filter(data, cmd)
495 495 break
496 496
497 497 return data
498 498
499 499 def wwrite(self, filename, data, fd=None):
500 500 if self.decodepats == None:
501 501 l = []
502 502 for pat, cmd in self.ui.configitems("decode"):
503 503 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 504 l.append((mf, cmd))
505 505 self.decodepats = l
506 506
507 507 for mf, cmd in self.decodepats:
508 508 if mf(filename):
509 509 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
510 510 data = util.filter(data, cmd)
511 511 break
512 512
513 513 if fd:
514 514 return fd.write(data)
515 515 return self.wopener(filename, 'w').write(data)
516 516
517 517 def transaction(self):
518 518 tr = self.transhandle
519 519 if tr != None and tr.running():
520 520 return tr.nest()
521 521
522 522 # save dirstate for rollback
523 523 try:
524 524 ds = self.opener("dirstate").read()
525 525 except IOError:
526 526 ds = ""
527 527 self.opener("journal.dirstate", "w").write(ds)
528 528
529 529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
531 531 tr = transaction.transaction(self.ui.warn, self.sopener,
532 532 self.sjoin("journal"),
533 533 aftertrans(renames))
534 534 self.transhandle = tr
535 535 return tr
536 536
537 537 def recover(self):
538 538 l = self.lock()
539 539 if os.path.exists(self.sjoin("journal")):
540 540 self.ui.status(_("rolling back interrupted transaction\n"))
541 541 transaction.rollback(self.sopener, self.sjoin("journal"))
542 542 self.reload()
543 543 return True
544 544 else:
545 545 self.ui.warn(_("no interrupted transaction available\n"))
546 546 return False
547 547
548 548 def rollback(self, wlock=None):
549 549 if not wlock:
550 550 wlock = self.wlock()
551 551 l = self.lock()
552 552 if os.path.exists(self.sjoin("undo")):
553 553 self.ui.status(_("rolling back last transaction\n"))
554 554 transaction.rollback(self.sopener, self.sjoin("undo"))
555 555 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
556 556 self.reload()
557 557 self.wreload()
558 558 else:
559 559 self.ui.warn(_("no rollback information available\n"))
560 560
561 561 def wreload(self):
562 562 self.dirstate.read()
563 563
564 564 def reload(self):
565 565 self.changelog.load()
566 566 self.manifest.load()
567 567 self.tagscache = None
568 568 self.nodetagscache = None
569 569
570 570 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
571 571 desc=None):
572 572 try:
573 573 l = lock.lock(lockname, 0, releasefn, desc=desc)
574 574 except lock.LockHeld, inst:
575 575 if not wait:
576 576 raise
577 577 self.ui.warn(_("waiting for lock on %s held by %r\n") %
578 578 (desc, inst.locker))
579 579 # default to 600 seconds timeout
580 580 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
581 581 releasefn, desc=desc)
582 582 if acquirefn:
583 583 acquirefn()
584 584 return l
585 585
586 586 def lock(self, wait=1):
587 587 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
588 588 desc=_('repository %s') % self.origroot)
589 589
590 590 def wlock(self, wait=1):
591 591 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
592 592 self.wreload,
593 593 desc=_('working directory of %s') % self.origroot)
594 594
595 595 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
596 596 """
597 597 commit an individual file as part of a larger transaction
598 598 """
599 599
600 600 t = self.wread(fn)
601 601 fl = self.file(fn)
602 602 fp1 = manifest1.get(fn, nullid)
603 603 fp2 = manifest2.get(fn, nullid)
604 604
605 605 meta = {}
606 606 cp = self.dirstate.copied(fn)
607 607 if cp:
608 608 meta["copy"] = cp
609 609 if not manifest2: # not a branch merge
610 610 meta["copyrev"] = hex(manifest1.get(cp, nullid))
611 611 fp2 = nullid
612 612 elif fp2 != nullid: # copied on remote side
613 613 meta["copyrev"] = hex(manifest1.get(cp, nullid))
614 614 elif fp1 != nullid: # copied on local side, reversed
615 615 meta["copyrev"] = hex(manifest2.get(cp))
616 616 fp2 = nullid
617 617 else: # directory rename
618 618 meta["copyrev"] = hex(manifest1.get(cp, nullid))
619 619 self.ui.debug(_(" %s: copy %s:%s\n") %
620 620 (fn, cp, meta["copyrev"]))
621 621 fp1 = nullid
622 622 elif fp2 != nullid:
623 623 # is one parent an ancestor of the other?
624 624 fpa = fl.ancestor(fp1, fp2)
625 625 if fpa == fp1:
626 626 fp1, fp2 = fp2, nullid
627 627 elif fpa == fp2:
628 628 fp2 = nullid
629 629
630 630 # is the file unmodified from the parent? report existing entry
631 631 if fp2 == nullid and not fl.cmp(fp1, t):
632 632 return fp1
633 633
634 634 changelist.append(fn)
635 635 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
636 636
637 637 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
638 638 if p1 is None:
639 639 p1, p2 = self.dirstate.parents()
640 640 return self.commit(files=files, text=text, user=user, date=date,
641 641 p1=p1, p2=p2, wlock=wlock, extra=extra)
642 642
643 643 def commit(self, files=None, text="", user=None, date=None,
644 644 match=util.always, force=False, lock=None, wlock=None,
645 645 force_editor=False, p1=None, p2=None, extra={}):
646 646
647 647 commit = []
648 648 remove = []
649 649 changed = []
650 650 use_dirstate = (p1 is None) # not rawcommit
651 651 extra = extra.copy()
652 652
653 653 if use_dirstate:
654 654 if files:
655 655 for f in files:
656 656 s = self.dirstate.state(f)
657 657 if s in 'nmai':
658 658 commit.append(f)
659 659 elif s == 'r':
660 660 remove.append(f)
661 661 else:
662 662 self.ui.warn(_("%s not tracked!\n") % f)
663 663 else:
664 664 changes = self.status(match=match)[:5]
665 665 modified, added, removed, deleted, unknown = changes
666 666 commit = modified + added
667 667 remove = removed
668 668 else:
669 669 commit = files
670 670
671 671 if use_dirstate:
672 672 p1, p2 = self.dirstate.parents()
673 673 update_dirstate = True
674 674 else:
675 675 p1, p2 = p1, p2 or nullid
676 676 update_dirstate = (self.dirstate.parents()[0] == p1)
677 677
678 678 c1 = self.changelog.read(p1)
679 679 c2 = self.changelog.read(p2)
680 680 m1 = self.manifest.read(c1[0]).copy()
681 681 m2 = self.manifest.read(c2[0])
682 682
683 683 if use_dirstate:
684 684 branchname = self.workingctx().branch()
685 685 try:
686 686 branchname = branchname.decode('UTF-8').encode('UTF-8')
687 687 except UnicodeDecodeError:
688 688 raise util.Abort(_('branch name not in UTF-8!'))
689 689 else:
690 690 branchname = ""
691 691
692 692 if use_dirstate:
693 693 oldname = c1[5].get("branch", "") # stored in UTF-8
694 694 if not commit and not remove and not force and p2 == nullid and \
695 695 branchname == oldname:
696 696 self.ui.status(_("nothing changed\n"))
697 697 return None
698 698
699 699 xp1 = hex(p1)
700 700 if p2 == nullid: xp2 = ''
701 701 else: xp2 = hex(p2)
702 702
703 703 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
704 704
705 705 if not wlock:
706 706 wlock = self.wlock()
707 707 if not lock:
708 708 lock = self.lock()
709 709 tr = self.transaction()
710 710
711 711 # check in files
712 712 new = {}
713 713 linkrev = self.changelog.count()
714 714 commit.sort()
715 715 is_exec = util.execfunc(self.root, m1.execf)
716 is_link = util.linkfunc(self.root, m1.linkf)
716 717 for f in commit:
717 718 self.ui.note(f + "\n")
718 719 try:
719 720 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
720 m1.set(f, is_exec(f))
721 m1.set(f, is_exec(f), is_link(f))
721 722 except IOError:
722 723 if use_dirstate:
723 724 self.ui.warn(_("trouble committing %s!\n") % f)
724 725 raise
725 726 else:
726 727 remove.append(f)
727 728
728 729 # update manifest
729 730 m1.update(new)
730 731 remove.sort()
731 732 removed = []
732 733
733 734 for f in remove:
734 735 if f in m1:
735 736 del m1[f]
736 737 removed.append(f)
737 738 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
738 739
739 740 # add changeset
740 741 new = new.keys()
741 742 new.sort()
742 743
743 744 user = user or self.ui.username()
744 745 if not text or force_editor:
745 746 edittext = []
746 747 if text:
747 748 edittext.append(text)
748 749 edittext.append("")
749 750 edittext.append("HG: user: %s" % user)
750 751 if p2 != nullid:
751 752 edittext.append("HG: branch merge")
752 753 edittext.extend(["HG: changed %s" % f for f in changed])
753 754 edittext.extend(["HG: removed %s" % f for f in removed])
754 755 if not changed and not remove:
755 756 edittext.append("HG: no files changed")
756 757 edittext.append("")
757 758 # run editor in the repository root
758 759 olddir = os.getcwd()
759 760 os.chdir(self.root)
760 761 text = self.ui.edit("\n".join(edittext), user)
761 762 os.chdir(olddir)
762 763
763 764 lines = [line.rstrip() for line in text.rstrip().splitlines()]
764 765 while lines and not lines[0]:
765 766 del lines[0]
766 767 if not lines:
767 768 return None
768 769 text = '\n'.join(lines)
769 770 if branchname:
770 771 extra["branch"] = branchname
771 772 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
772 773 user, date, extra)
773 774 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
774 775 parent2=xp2)
775 776 tr.close()
776 777
777 778 if use_dirstate or update_dirstate:
778 779 self.dirstate.setparents(n)
779 780 if use_dirstate:
780 781 self.dirstate.update(new, "n")
781 782 self.dirstate.forget(removed)
782 783
783 784 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
784 785 return n
785 786
786 787 def walk(self, node=None, files=[], match=util.always, badmatch=None):
787 788 '''
788 789 walk recursively through the directory tree or a given
789 790 changeset, finding all files matched by the match
790 791 function
791 792
792 793 results are yielded in a tuple (src, filename), where src
793 794 is one of:
794 795 'f' the file was found in the directory tree
795 796 'm' the file was only in the dirstate and not in the tree
796 797 'b' file was not found and matched badmatch
797 798 '''
798 799
799 800 if node:
800 801 fdict = dict.fromkeys(files)
801 802 for fn in self.manifest.read(self.changelog.read(node)[0]):
802 803 for ffn in fdict:
803 804 # match if the file is the exact name or a directory
804 805 if ffn == fn or fn.startswith("%s/" % ffn):
805 806 del fdict[ffn]
806 807 break
807 808 if match(fn):
808 809 yield 'm', fn
809 810 for fn in fdict:
810 811 if badmatch and badmatch(fn):
811 812 if match(fn):
812 813 yield 'b', fn
813 814 else:
814 815 self.ui.warn(_('%s: No such file in rev %s\n') % (
815 816 util.pathto(self.getcwd(), fn), short(node)))
816 817 else:
817 818 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
818 819 yield src, fn
819 820
820 821 def status(self, node1=None, node2=None, files=[], match=util.always,
821 822 wlock=None, list_ignored=False, list_clean=False):
822 823 """return status of files between two nodes or node and working directory
823 824
824 825 If node1 is None, use the first dirstate parent instead.
825 826 If node2 is None, compare node1 with working directory.
826 827 """
827 828
828 829 def fcmp(fn, mf):
829 830 t1 = self.wread(fn)
830 831 return self.file(fn).cmp(mf.get(fn, nullid), t1)
831 832
832 833 def mfmatches(node):
833 834 change = self.changelog.read(node)
834 835 mf = self.manifest.read(change[0]).copy()
835 836 for fn in mf.keys():
836 837 if not match(fn):
837 838 del mf[fn]
838 839 return mf
839 840
840 841 modified, added, removed, deleted, unknown = [], [], [], [], []
841 842 ignored, clean = [], []
842 843
843 844 compareworking = False
844 845 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
845 846 compareworking = True
846 847
847 848 if not compareworking:
848 849 # read the manifest from node1 before the manifest from node2,
849 850 # so that we'll hit the manifest cache if we're going through
850 851 # all the revisions in parent->child order.
851 852 mf1 = mfmatches(node1)
852 853
853 854 # are we comparing the working directory?
854 855 if not node2:
855 856 if not wlock:
856 857 try:
857 858 wlock = self.wlock(wait=0)
858 859 except lock.LockException:
859 860 wlock = None
860 861 (lookup, modified, added, removed, deleted, unknown,
861 862 ignored, clean) = self.dirstate.status(files, match,
862 863 list_ignored, list_clean)
863 864
864 865 # are we comparing working dir against its parent?
865 866 if compareworking:
866 867 if lookup:
867 868 # do a full compare of any files that might have changed
868 869 mf2 = mfmatches(self.dirstate.parents()[0])
869 870 for f in lookup:
870 871 if fcmp(f, mf2):
871 872 modified.append(f)
872 873 else:
873 874 clean.append(f)
874 875 if wlock is not None:
875 876 self.dirstate.update([f], "n")
876 877 else:
877 878 # we are comparing working dir against non-parent
878 879 # generate a pseudo-manifest for the working dir
879 880 # XXX: create it in dirstate.py ?
880 881 mf2 = mfmatches(self.dirstate.parents()[0])
881 882 is_exec = util.execfunc(self.root, mf2.execf)
883 is_link = util.linkfunc(self.root, mf2.linkf)
882 884 for f in lookup + modified + added:
883 885 mf2[f] = ""
884 mf2.set(f, is_exec(f))
886 mf2.set(f, is_exec(f), is_link(f))
885 887 for f in removed:
886 888 if f in mf2:
887 889 del mf2[f]
888 890 else:
889 891 # we are comparing two revisions
890 892 mf2 = mfmatches(node2)
891 893
892 894 if not compareworking:
893 895 # flush lists from dirstate before comparing manifests
894 896 modified, added, clean = [], [], []
895 897
896 898 # make sure to sort the files so we talk to the disk in a
897 899 # reasonable order
898 900 mf2keys = mf2.keys()
899 901 mf2keys.sort()
900 902 for fn in mf2keys:
901 903 if mf1.has_key(fn):
902 904 if mf1.flags(fn) != mf2.flags(fn) or \
903 905 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
904 906 modified.append(fn)
905 907 elif list_clean:
906 908 clean.append(fn)
907 909 del mf1[fn]
908 910 else:
909 911 added.append(fn)
910 912
911 913 removed = mf1.keys()
912 914
913 915 # sort and return results:
914 916 for l in modified, added, removed, deleted, unknown, ignored, clean:
915 917 l.sort()
916 918 return (modified, added, removed, deleted, unknown, ignored, clean)
917 919
918 920 def add(self, list, wlock=None):
919 921 if not wlock:
920 922 wlock = self.wlock()
921 923 for f in list:
922 924 p = self.wjoin(f)
923 925 if not os.path.exists(p):
924 926 self.ui.warn(_("%s does not exist!\n") % f)
925 927 elif not os.path.isfile(p):
926 928 self.ui.warn(_("%s not added: only files supported currently\n")
927 929 % f)
928 930 elif self.dirstate.state(f) in 'an':
929 931 self.ui.warn(_("%s already tracked!\n") % f)
930 932 else:
931 933 self.dirstate.update([f], "a")
932 934
933 935 def forget(self, list, wlock=None):
934 936 if not wlock:
935 937 wlock = self.wlock()
936 938 for f in list:
937 939 if self.dirstate.state(f) not in 'ai':
938 940 self.ui.warn(_("%s not added!\n") % f)
939 941 else:
940 942 self.dirstate.forget([f])
941 943
942 944 def remove(self, list, unlink=False, wlock=None):
943 945 if unlink:
944 946 for f in list:
945 947 try:
946 948 util.unlink(self.wjoin(f))
947 949 except OSError, inst:
948 950 if inst.errno != errno.ENOENT:
949 951 raise
950 952 if not wlock:
951 953 wlock = self.wlock()
952 954 for f in list:
953 955 p = self.wjoin(f)
954 956 if os.path.exists(p):
955 957 self.ui.warn(_("%s still exists!\n") % f)
956 958 elif self.dirstate.state(f) == 'a':
957 959 self.dirstate.forget([f])
958 960 elif f not in self.dirstate:
959 961 self.ui.warn(_("%s not tracked!\n") % f)
960 962 else:
961 963 self.dirstate.update([f], "r")
962 964
963 965 def undelete(self, list, wlock=None):
964 966 p = self.dirstate.parents()[0]
965 967 mn = self.changelog.read(p)[0]
966 968 m = self.manifest.read(mn)
967 969 if not wlock:
968 970 wlock = self.wlock()
969 971 for f in list:
970 972 if self.dirstate.state(f) not in "r":
971 973 self.ui.warn("%s not removed!\n" % f)
972 974 else:
973 975 t = self.file(f).read(m[f])
974 976 self.wwrite(f, t)
975 977 util.set_exec(self.wjoin(f), m.execf(f))
976 978 self.dirstate.update([f], "n")
977 979
978 980 def copy(self, source, dest, wlock=None):
979 981 p = self.wjoin(dest)
980 982 if not os.path.exists(p):
981 983 self.ui.warn(_("%s does not exist!\n") % dest)
982 984 elif not os.path.isfile(p):
983 985 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
984 986 else:
985 987 if not wlock:
986 988 wlock = self.wlock()
987 989 if self.dirstate.state(dest) == '?':
988 990 self.dirstate.update([dest], "a")
989 991 self.dirstate.copy(source, dest)
990 992
991 993 def heads(self, start=None):
992 994 heads = self.changelog.heads(start)
993 995 # sort the output in rev descending order
994 996 heads = [(-self.changelog.rev(h), h) for h in heads]
995 997 heads.sort()
996 998 return [n for (r, n) in heads]
997 999
998 1000 def branches(self, nodes):
999 1001 if not nodes:
1000 1002 nodes = [self.changelog.tip()]
1001 1003 b = []
1002 1004 for n in nodes:
1003 1005 t = n
1004 1006 while 1:
1005 1007 p = self.changelog.parents(n)
1006 1008 if p[1] != nullid or p[0] == nullid:
1007 1009 b.append((t, n, p[0], p[1]))
1008 1010 break
1009 1011 n = p[0]
1010 1012 return b
1011 1013
1012 1014 def between(self, pairs):
1013 1015 r = []
1014 1016
1015 1017 for top, bottom in pairs:
1016 1018 n, l, i = top, [], 0
1017 1019 f = 1
1018 1020
1019 1021 while n != bottom:
1020 1022 p = self.changelog.parents(n)[0]
1021 1023 if i == f:
1022 1024 l.append(n)
1023 1025 f = f * 2
1024 1026 n = p
1025 1027 i += 1
1026 1028
1027 1029 r.append(l)
1028 1030
1029 1031 return r
1030 1032
1031 1033 def findincoming(self, remote, base=None, heads=None, force=False):
1032 1034 """Return list of roots of the subsets of missing nodes from remote
1033 1035
1034 1036 If base dict is specified, assume that these nodes and their parents
1035 1037 exist on the remote side and that no child of a node of base exists
1036 1038 in both remote and self.
1037 1039 Furthermore base will be updated to include the nodes that exists
1038 1040 in self and remote but no children exists in self and remote.
1039 1041 If a list of heads is specified, return only nodes which are heads
1040 1042 or ancestors of these heads.
1041 1043
1042 1044 All the ancestors of base are in self and in remote.
1043 1045 All the descendants of the list returned are missing in self.
1044 1046 (and so we know that the rest of the nodes are missing in remote, see
1045 1047 outgoing)
1046 1048 """
1047 1049 m = self.changelog.nodemap
1048 1050 search = []
1049 1051 fetch = {}
1050 1052 seen = {}
1051 1053 seenbranch = {}
1052 1054 if base == None:
1053 1055 base = {}
1054 1056
1055 1057 if not heads:
1056 1058 heads = remote.heads()
1057 1059
1058 1060 if self.changelog.tip() == nullid:
1059 1061 base[nullid] = 1
1060 1062 if heads != [nullid]:
1061 1063 return [nullid]
1062 1064 return []
1063 1065
1064 1066 # assume we're closer to the tip than the root
1065 1067 # and start by examining the heads
1066 1068 self.ui.status(_("searching for changes\n"))
1067 1069
1068 1070 unknown = []
1069 1071 for h in heads:
1070 1072 if h not in m:
1071 1073 unknown.append(h)
1072 1074 else:
1073 1075 base[h] = 1
1074 1076
1075 1077 if not unknown:
1076 1078 return []
1077 1079
1078 1080 req = dict.fromkeys(unknown)
1079 1081 reqcnt = 0
1080 1082
1081 1083 # search through remote branches
1082 1084 # a 'branch' here is a linear segment of history, with four parts:
1083 1085 # head, root, first parent, second parent
1084 1086 # (a branch always has two parents (or none) by definition)
1085 1087 unknown = remote.branches(unknown)
1086 1088 while unknown:
1087 1089 r = []
1088 1090 while unknown:
1089 1091 n = unknown.pop(0)
1090 1092 if n[0] in seen:
1091 1093 continue
1092 1094
1093 1095 self.ui.debug(_("examining %s:%s\n")
1094 1096 % (short(n[0]), short(n[1])))
1095 1097 if n[0] == nullid: # found the end of the branch
1096 1098 pass
1097 1099 elif n in seenbranch:
1098 1100 self.ui.debug(_("branch already found\n"))
1099 1101 continue
1100 1102 elif n[1] and n[1] in m: # do we know the base?
1101 1103 self.ui.debug(_("found incomplete branch %s:%s\n")
1102 1104 % (short(n[0]), short(n[1])))
1103 1105 search.append(n) # schedule branch range for scanning
1104 1106 seenbranch[n] = 1
1105 1107 else:
1106 1108 if n[1] not in seen and n[1] not in fetch:
1107 1109 if n[2] in m and n[3] in m:
1108 1110 self.ui.debug(_("found new changeset %s\n") %
1109 1111 short(n[1]))
1110 1112 fetch[n[1]] = 1 # earliest unknown
1111 1113 for p in n[2:4]:
1112 1114 if p in m:
1113 1115 base[p] = 1 # latest known
1114 1116
1115 1117 for p in n[2:4]:
1116 1118 if p not in req and p not in m:
1117 1119 r.append(p)
1118 1120 req[p] = 1
1119 1121 seen[n[0]] = 1
1120 1122
1121 1123 if r:
1122 1124 reqcnt += 1
1123 1125 self.ui.debug(_("request %d: %s\n") %
1124 1126 (reqcnt, " ".join(map(short, r))))
1125 1127 for p in xrange(0, len(r), 10):
1126 1128 for b in remote.branches(r[p:p+10]):
1127 1129 self.ui.debug(_("received %s:%s\n") %
1128 1130 (short(b[0]), short(b[1])))
1129 1131 unknown.append(b)
1130 1132
1131 1133 # do binary search on the branches we found
1132 1134 while search:
1133 1135 n = search.pop(0)
1134 1136 reqcnt += 1
1135 1137 l = remote.between([(n[0], n[1])])[0]
1136 1138 l.append(n[1])
1137 1139 p = n[0]
1138 1140 f = 1
1139 1141 for i in l:
1140 1142 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1141 1143 if i in m:
1142 1144 if f <= 2:
1143 1145 self.ui.debug(_("found new branch changeset %s\n") %
1144 1146 short(p))
1145 1147 fetch[p] = 1
1146 1148 base[i] = 1
1147 1149 else:
1148 1150 self.ui.debug(_("narrowed branch search to %s:%s\n")
1149 1151 % (short(p), short(i)))
1150 1152 search.append((p, i))
1151 1153 break
1152 1154 p, f = i, f * 2
1153 1155
1154 1156 # sanity check our fetch list
1155 1157 for f in fetch.keys():
1156 1158 if f in m:
1157 1159 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1158 1160
1159 1161 if base.keys() == [nullid]:
1160 1162 if force:
1161 1163 self.ui.warn(_("warning: repository is unrelated\n"))
1162 1164 else:
1163 1165 raise util.Abort(_("repository is unrelated"))
1164 1166
1165 1167 self.ui.debug(_("found new changesets starting at ") +
1166 1168 " ".join([short(f) for f in fetch]) + "\n")
1167 1169
1168 1170 self.ui.debug(_("%d total queries\n") % reqcnt)
1169 1171
1170 1172 return fetch.keys()
1171 1173
1172 1174 def findoutgoing(self, remote, base=None, heads=None, force=False):
1173 1175 """Return list of nodes that are roots of subsets not in remote
1174 1176
1175 1177 If base dict is specified, assume that these nodes and their parents
1176 1178 exist on the remote side.
1177 1179 If a list of heads is specified, return only nodes which are heads
1178 1180 or ancestors of these heads, and return a second element which
1179 1181 contains all remote heads which get new children.
1180 1182 """
1181 1183 if base == None:
1182 1184 base = {}
1183 1185 self.findincoming(remote, base, heads, force=force)
1184 1186
1185 1187 self.ui.debug(_("common changesets up to ")
1186 1188 + " ".join(map(short, base.keys())) + "\n")
1187 1189
1188 1190 remain = dict.fromkeys(self.changelog.nodemap)
1189 1191
1190 1192 # prune everything remote has from the tree
1191 1193 del remain[nullid]
1192 1194 remove = base.keys()
1193 1195 while remove:
1194 1196 n = remove.pop(0)
1195 1197 if n in remain:
1196 1198 del remain[n]
1197 1199 for p in self.changelog.parents(n):
1198 1200 remove.append(p)
1199 1201
1200 1202 # find every node whose parents have been pruned
1201 1203 subset = []
1202 1204 # find every remote head that will get new children
1203 1205 updated_heads = {}
1204 1206 for n in remain:
1205 1207 p1, p2 = self.changelog.parents(n)
1206 1208 if p1 not in remain and p2 not in remain:
1207 1209 subset.append(n)
1208 1210 if heads:
1209 1211 if p1 in heads:
1210 1212 updated_heads[p1] = True
1211 1213 if p2 in heads:
1212 1214 updated_heads[p2] = True
1213 1215
1214 1216 # this is the set of all roots we have to push
1215 1217 if heads:
1216 1218 return subset, updated_heads.keys()
1217 1219 else:
1218 1220 return subset
1219 1221
1220 1222 def pull(self, remote, heads=None, force=False, lock=None):
1221 1223 mylock = False
1222 1224 if not lock:
1223 1225 lock = self.lock()
1224 1226 mylock = True
1225 1227
1226 1228 try:
1227 1229 fetch = self.findincoming(remote, force=force)
1228 1230 if fetch == [nullid]:
1229 1231 self.ui.status(_("requesting all changes\n"))
1230 1232
1231 1233 if not fetch:
1232 1234 self.ui.status(_("no changes found\n"))
1233 1235 return 0
1234 1236
1235 1237 if heads is None:
1236 1238 cg = remote.changegroup(fetch, 'pull')
1237 1239 else:
1238 1240 if 'changegroupsubset' not in remote.capabilities:
1239 1241 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1240 1242 cg = remote.changegroupsubset(fetch, heads, 'pull')
1241 1243 return self.addchangegroup(cg, 'pull', remote.url())
1242 1244 finally:
1243 1245 if mylock:
1244 1246 lock.release()
1245 1247
1246 1248 def push(self, remote, force=False, revs=None):
1247 1249 # there are two ways to push to remote repo:
1248 1250 #
1249 1251 # addchangegroup assumes local user can lock remote
1250 1252 # repo (local filesystem, old ssh servers).
1251 1253 #
1252 1254 # unbundle assumes local user cannot lock remote repo (new ssh
1253 1255 # servers, http servers).
1254 1256
1255 1257 if remote.capable('unbundle'):
1256 1258 return self.push_unbundle(remote, force, revs)
1257 1259 return self.push_addchangegroup(remote, force, revs)
1258 1260
1259 1261 def prepush(self, remote, force, revs):
1260 1262 base = {}
1261 1263 remote_heads = remote.heads()
1262 1264 inc = self.findincoming(remote, base, remote_heads, force=force)
1263 1265
1264 1266 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1265 1267 if revs is not None:
1266 1268 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1267 1269 else:
1268 1270 bases, heads = update, self.changelog.heads()
1269 1271
1270 1272 if not bases:
1271 1273 self.ui.status(_("no changes found\n"))
1272 1274 return None, 1
1273 1275 elif not force:
1274 1276 # check if we're creating new remote heads
1275 1277 # to be a remote head after push, node must be either
1276 1278 # - unknown locally
1277 1279 # - a local outgoing head descended from update
1278 1280 # - a remote head that's known locally and not
1279 1281 # ancestral to an outgoing head
1280 1282
1281 1283 warn = 0
1282 1284
1283 1285 if remote_heads == [nullid]:
1284 1286 warn = 0
1285 1287 elif not revs and len(heads) > len(remote_heads):
1286 1288 warn = 1
1287 1289 else:
1288 1290 newheads = list(heads)
1289 1291 for r in remote_heads:
1290 1292 if r in self.changelog.nodemap:
1291 1293 desc = self.changelog.heads(r, heads)
1292 1294 l = [h for h in heads if h in desc]
1293 1295 if not l:
1294 1296 newheads.append(r)
1295 1297 else:
1296 1298 newheads.append(r)
1297 1299 if len(newheads) > len(remote_heads):
1298 1300 warn = 1
1299 1301
1300 1302 if warn:
1301 1303 self.ui.warn(_("abort: push creates new remote branches!\n"))
1302 1304 self.ui.status(_("(did you forget to merge?"
1303 1305 " use push -f to force)\n"))
1304 1306 return None, 1
1305 1307 elif inc:
1306 1308 self.ui.warn(_("note: unsynced remote changes!\n"))
1307 1309
1308 1310
1309 1311 if revs is None:
1310 1312 cg = self.changegroup(update, 'push')
1311 1313 else:
1312 1314 cg = self.changegroupsubset(update, revs, 'push')
1313 1315 return cg, remote_heads
1314 1316
1315 1317 def push_addchangegroup(self, remote, force, revs):
1316 1318 lock = remote.lock()
1317 1319
1318 1320 ret = self.prepush(remote, force, revs)
1319 1321 if ret[0] is not None:
1320 1322 cg, remote_heads = ret
1321 1323 return remote.addchangegroup(cg, 'push', self.url())
1322 1324 return ret[1]
1323 1325
1324 1326 def push_unbundle(self, remote, force, revs):
1325 1327 # local repo finds heads on server, finds out what revs it
1326 1328 # must push. once revs transferred, if server finds it has
1327 1329 # different heads (someone else won commit/push race), server
1328 1330 # aborts.
1329 1331
1330 1332 ret = self.prepush(remote, force, revs)
1331 1333 if ret[0] is not None:
1332 1334 cg, remote_heads = ret
1333 1335 if force: remote_heads = ['force']
1334 1336 return remote.unbundle(cg, remote_heads, 'push')
1335 1337 return ret[1]
1336 1338
1337 1339 def changegroupinfo(self, nodes):
1338 1340 self.ui.note(_("%d changesets found\n") % len(nodes))
1339 1341 if self.ui.debugflag:
1340 1342 self.ui.debug(_("List of changesets:\n"))
1341 1343 for node in nodes:
1342 1344 self.ui.debug("%s\n" % hex(node))
1343 1345
1344 1346 def changegroupsubset(self, bases, heads, source):
1345 1347 """This function generates a changegroup consisting of all the nodes
1346 1348 that are descendents of any of the bases, and ancestors of any of
1347 1349 the heads.
1348 1350
1349 1351 It is fairly complex as determining which filenodes and which
1350 1352 manifest nodes need to be included for the changeset to be complete
1351 1353 is non-trivial.
1352 1354
1353 1355 Another wrinkle is doing the reverse, figuring out which changeset in
1354 1356 the changegroup a particular filenode or manifestnode belongs to."""
1355 1357
1356 1358 self.hook('preoutgoing', throw=True, source=source)
1357 1359
1358 1360 # Set up some initial variables
1359 1361 # Make it easy to refer to self.changelog
1360 1362 cl = self.changelog
1361 1363 # msng is short for missing - compute the list of changesets in this
1362 1364 # changegroup.
1363 1365 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1364 1366 self.changegroupinfo(msng_cl_lst)
1365 1367 # Some bases may turn out to be superfluous, and some heads may be
1366 1368 # too. nodesbetween will return the minimal set of bases and heads
1367 1369 # necessary to re-create the changegroup.
1368 1370
1369 1371 # Known heads are the list of heads that it is assumed the recipient
1370 1372 # of this changegroup will know about.
1371 1373 knownheads = {}
1372 1374 # We assume that all parents of bases are known heads.
1373 1375 for n in bases:
1374 1376 for p in cl.parents(n):
1375 1377 if p != nullid:
1376 1378 knownheads[p] = 1
1377 1379 knownheads = knownheads.keys()
1378 1380 if knownheads:
1379 1381 # Now that we know what heads are known, we can compute which
1380 1382 # changesets are known. The recipient must know about all
1381 1383 # changesets required to reach the known heads from the null
1382 1384 # changeset.
1383 1385 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1384 1386 junk = None
1385 1387 # Transform the list into an ersatz set.
1386 1388 has_cl_set = dict.fromkeys(has_cl_set)
1387 1389 else:
1388 1390 # If there were no known heads, the recipient cannot be assumed to
1389 1391 # know about any changesets.
1390 1392 has_cl_set = {}
1391 1393
1392 1394 # Make it easy to refer to self.manifest
1393 1395 mnfst = self.manifest
1394 1396 # We don't know which manifests are missing yet
1395 1397 msng_mnfst_set = {}
1396 1398 # Nor do we know which filenodes are missing.
1397 1399 msng_filenode_set = {}
1398 1400
1399 1401 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1400 1402 junk = None
1401 1403
1402 1404 # A changeset always belongs to itself, so the changenode lookup
1403 1405 # function for a changenode is identity.
1404 1406 def identity(x):
1405 1407 return x
1406 1408
1407 1409 # A function generating function. Sets up an environment for the
1408 1410 # inner function.
1409 1411 def cmp_by_rev_func(revlog):
1410 1412 # Compare two nodes by their revision number in the environment's
1411 1413 # revision history. Since the revision number both represents the
1412 1414 # most efficient order to read the nodes in, and represents a
1413 1415 # topological sorting of the nodes, this function is often useful.
1414 1416 def cmp_by_rev(a, b):
1415 1417 return cmp(revlog.rev(a), revlog.rev(b))
1416 1418 return cmp_by_rev
1417 1419
1418 1420 # If we determine that a particular file or manifest node must be a
1419 1421 # node that the recipient of the changegroup will already have, we can
1420 1422 # also assume the recipient will have all the parents. This function
1421 1423 # prunes them from the set of missing nodes.
1422 1424 def prune_parents(revlog, hasset, msngset):
1423 1425 haslst = hasset.keys()
1424 1426 haslst.sort(cmp_by_rev_func(revlog))
1425 1427 for node in haslst:
1426 1428 parentlst = [p for p in revlog.parents(node) if p != nullid]
1427 1429 while parentlst:
1428 1430 n = parentlst.pop()
1429 1431 if n not in hasset:
1430 1432 hasset[n] = 1
1431 1433 p = [p for p in revlog.parents(n) if p != nullid]
1432 1434 parentlst.extend(p)
1433 1435 for n in hasset:
1434 1436 msngset.pop(n, None)
1435 1437
1436 1438 # This is a function generating function used to set up an environment
1437 1439 # for the inner function to execute in.
1438 1440 def manifest_and_file_collector(changedfileset):
1439 1441 # This is an information gathering function that gathers
1440 1442 # information from each changeset node that goes out as part of
1441 1443 # the changegroup. The information gathered is a list of which
1442 1444 # manifest nodes are potentially required (the recipient may
1443 1445 # already have them) and total list of all files which were
1444 1446 # changed in any changeset in the changegroup.
1445 1447 #
1446 1448 # We also remember the first changenode we saw any manifest
1447 1449 # referenced by so we can later determine which changenode 'owns'
1448 1450 # the manifest.
1449 1451 def collect_manifests_and_files(clnode):
1450 1452 c = cl.read(clnode)
1451 1453 for f in c[3]:
1452 1454 # This is to make sure we only have one instance of each
1453 1455 # filename string for each filename.
1454 1456 changedfileset.setdefault(f, f)
1455 1457 msng_mnfst_set.setdefault(c[0], clnode)
1456 1458 return collect_manifests_and_files
1457 1459
1458 1460 # Figure out which manifest nodes (of the ones we think might be part
1459 1461 # of the changegroup) the recipient must know about and remove them
1460 1462 # from the changegroup.
1461 1463 def prune_manifests():
1462 1464 has_mnfst_set = {}
1463 1465 for n in msng_mnfst_set:
1464 1466 # If a 'missing' manifest thinks it belongs to a changenode
1465 1467 # the recipient is assumed to have, obviously the recipient
1466 1468 # must have that manifest.
1467 1469 linknode = cl.node(mnfst.linkrev(n))
1468 1470 if linknode in has_cl_set:
1469 1471 has_mnfst_set[n] = 1
1470 1472 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1471 1473
1472 1474 # Use the information collected in collect_manifests_and_files to say
1473 1475 # which changenode any manifestnode belongs to.
1474 1476 def lookup_manifest_link(mnfstnode):
1475 1477 return msng_mnfst_set[mnfstnode]
1476 1478
1477 1479 # A function generating function that sets up the initial environment
1478 1480 # the inner function.
1479 1481 def filenode_collector(changedfiles):
1480 1482 next_rev = [0]
1481 1483 # This gathers information from each manifestnode included in the
1482 1484 # changegroup about which filenodes the manifest node references
1483 1485 # so we can include those in the changegroup too.
1484 1486 #
1485 1487 # It also remembers which changenode each filenode belongs to. It
1486 1488 # does this by assuming the a filenode belongs to the changenode
1487 1489 # the first manifest that references it belongs to.
1488 1490 def collect_msng_filenodes(mnfstnode):
1489 1491 r = mnfst.rev(mnfstnode)
1490 1492 if r == next_rev[0]:
1491 1493 # If the last rev we looked at was the one just previous,
1492 1494 # we only need to see a diff.
1493 1495 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1494 1496 # For each line in the delta
1495 1497 for dline in delta.splitlines():
1496 1498 # get the filename and filenode for that line
1497 1499 f, fnode = dline.split('\0')
1498 1500 fnode = bin(fnode[:40])
1499 1501 f = changedfiles.get(f, None)
1500 1502 # And if the file is in the list of files we care
1501 1503 # about.
1502 1504 if f is not None:
1503 1505 # Get the changenode this manifest belongs to
1504 1506 clnode = msng_mnfst_set[mnfstnode]
1505 1507 # Create the set of filenodes for the file if
1506 1508 # there isn't one already.
1507 1509 ndset = msng_filenode_set.setdefault(f, {})
1508 1510 # And set the filenode's changelog node to the
1509 1511 # manifest's if it hasn't been set already.
1510 1512 ndset.setdefault(fnode, clnode)
1511 1513 else:
1512 1514 # Otherwise we need a full manifest.
1513 1515 m = mnfst.read(mnfstnode)
1514 1516 # For every file in we care about.
1515 1517 for f in changedfiles:
1516 1518 fnode = m.get(f, None)
1517 1519 # If it's in the manifest
1518 1520 if fnode is not None:
1519 1521 # See comments above.
1520 1522 clnode = msng_mnfst_set[mnfstnode]
1521 1523 ndset = msng_filenode_set.setdefault(f, {})
1522 1524 ndset.setdefault(fnode, clnode)
1523 1525 # Remember the revision we hope to see next.
1524 1526 next_rev[0] = r + 1
1525 1527 return collect_msng_filenodes
1526 1528
1527 1529 # We have a list of filenodes we think we need for a file, lets remove
1528 1530 # all those we now the recipient must have.
1529 1531 def prune_filenodes(f, filerevlog):
1530 1532 msngset = msng_filenode_set[f]
1531 1533 hasset = {}
1532 1534 # If a 'missing' filenode thinks it belongs to a changenode we
1533 1535 # assume the recipient must have, then the recipient must have
1534 1536 # that filenode.
1535 1537 for n in msngset:
1536 1538 clnode = cl.node(filerevlog.linkrev(n))
1537 1539 if clnode in has_cl_set:
1538 1540 hasset[n] = 1
1539 1541 prune_parents(filerevlog, hasset, msngset)
1540 1542
1541 1543 # A function generator function that sets up the a context for the
1542 1544 # inner function.
1543 1545 def lookup_filenode_link_func(fname):
1544 1546 msngset = msng_filenode_set[fname]
1545 1547 # Lookup the changenode the filenode belongs to.
1546 1548 def lookup_filenode_link(fnode):
1547 1549 return msngset[fnode]
1548 1550 return lookup_filenode_link
1549 1551
1550 1552 # Now that we have all theses utility functions to help out and
1551 1553 # logically divide up the task, generate the group.
1552 1554 def gengroup():
1553 1555 # The set of changed files starts empty.
1554 1556 changedfiles = {}
1555 1557 # Create a changenode group generator that will call our functions
1556 1558 # back to lookup the owning changenode and collect information.
1557 1559 group = cl.group(msng_cl_lst, identity,
1558 1560 manifest_and_file_collector(changedfiles))
1559 1561 for chnk in group:
1560 1562 yield chnk
1561 1563
1562 1564 # The list of manifests has been collected by the generator
1563 1565 # calling our functions back.
1564 1566 prune_manifests()
1565 1567 msng_mnfst_lst = msng_mnfst_set.keys()
1566 1568 # Sort the manifestnodes by revision number.
1567 1569 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1568 1570 # Create a generator for the manifestnodes that calls our lookup
1569 1571 # and data collection functions back.
1570 1572 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1571 1573 filenode_collector(changedfiles))
1572 1574 for chnk in group:
1573 1575 yield chnk
1574 1576
1575 1577 # These are no longer needed, dereference and toss the memory for
1576 1578 # them.
1577 1579 msng_mnfst_lst = None
1578 1580 msng_mnfst_set.clear()
1579 1581
1580 1582 changedfiles = changedfiles.keys()
1581 1583 changedfiles.sort()
1582 1584 # Go through all our files in order sorted by name.
1583 1585 for fname in changedfiles:
1584 1586 filerevlog = self.file(fname)
1585 1587 # Toss out the filenodes that the recipient isn't really
1586 1588 # missing.
1587 1589 if msng_filenode_set.has_key(fname):
1588 1590 prune_filenodes(fname, filerevlog)
1589 1591 msng_filenode_lst = msng_filenode_set[fname].keys()
1590 1592 else:
1591 1593 msng_filenode_lst = []
1592 1594 # If any filenodes are left, generate the group for them,
1593 1595 # otherwise don't bother.
1594 1596 if len(msng_filenode_lst) > 0:
1595 1597 yield changegroup.genchunk(fname)
1596 1598 # Sort the filenodes by their revision #
1597 1599 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1598 1600 # Create a group generator and only pass in a changenode
1599 1601 # lookup function as we need to collect no information
1600 1602 # from filenodes.
1601 1603 group = filerevlog.group(msng_filenode_lst,
1602 1604 lookup_filenode_link_func(fname))
1603 1605 for chnk in group:
1604 1606 yield chnk
1605 1607 if msng_filenode_set.has_key(fname):
1606 1608 # Don't need this anymore, toss it to free memory.
1607 1609 del msng_filenode_set[fname]
1608 1610 # Signal that no more groups are left.
1609 1611 yield changegroup.closechunk()
1610 1612
1611 1613 if msng_cl_lst:
1612 1614 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1613 1615
1614 1616 return util.chunkbuffer(gengroup())
1615 1617
1616 1618 def changegroup(self, basenodes, source):
1617 1619 """Generate a changegroup of all nodes that we have that a recipient
1618 1620 doesn't.
1619 1621
1620 1622 This is much easier than the previous function as we can assume that
1621 1623 the recipient has any changenode we aren't sending them."""
1622 1624
1623 1625 self.hook('preoutgoing', throw=True, source=source)
1624 1626
1625 1627 cl = self.changelog
1626 1628 nodes = cl.nodesbetween(basenodes, None)[0]
1627 1629 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1628 1630 self.changegroupinfo(nodes)
1629 1631
1630 1632 def identity(x):
1631 1633 return x
1632 1634
1633 1635 def gennodelst(revlog):
1634 1636 for r in xrange(0, revlog.count()):
1635 1637 n = revlog.node(r)
1636 1638 if revlog.linkrev(n) in revset:
1637 1639 yield n
1638 1640
1639 1641 def changed_file_collector(changedfileset):
1640 1642 def collect_changed_files(clnode):
1641 1643 c = cl.read(clnode)
1642 1644 for fname in c[3]:
1643 1645 changedfileset[fname] = 1
1644 1646 return collect_changed_files
1645 1647
1646 1648 def lookuprevlink_func(revlog):
1647 1649 def lookuprevlink(n):
1648 1650 return cl.node(revlog.linkrev(n))
1649 1651 return lookuprevlink
1650 1652
1651 1653 def gengroup():
1652 1654 # construct a list of all changed files
1653 1655 changedfiles = {}
1654 1656
1655 1657 for chnk in cl.group(nodes, identity,
1656 1658 changed_file_collector(changedfiles)):
1657 1659 yield chnk
1658 1660 changedfiles = changedfiles.keys()
1659 1661 changedfiles.sort()
1660 1662
1661 1663 mnfst = self.manifest
1662 1664 nodeiter = gennodelst(mnfst)
1663 1665 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1664 1666 yield chnk
1665 1667
1666 1668 for fname in changedfiles:
1667 1669 filerevlog = self.file(fname)
1668 1670 nodeiter = gennodelst(filerevlog)
1669 1671 nodeiter = list(nodeiter)
1670 1672 if nodeiter:
1671 1673 yield changegroup.genchunk(fname)
1672 1674 lookup = lookuprevlink_func(filerevlog)
1673 1675 for chnk in filerevlog.group(nodeiter, lookup):
1674 1676 yield chnk
1675 1677
1676 1678 yield changegroup.closechunk()
1677 1679
1678 1680 if nodes:
1679 1681 self.hook('outgoing', node=hex(nodes[0]), source=source)
1680 1682
1681 1683 return util.chunkbuffer(gengroup())
1682 1684
1683 1685 def addchangegroup(self, source, srctype, url):
1684 1686 """add changegroup to repo.
1685 1687
1686 1688 return values:
1687 1689 - nothing changed or no source: 0
1688 1690 - more heads than before: 1+added heads (2..n)
1689 1691 - less heads than before: -1-removed heads (-2..-n)
1690 1692 - number of heads stays the same: 1
1691 1693 """
1692 1694 def csmap(x):
1693 1695 self.ui.debug(_("add changeset %s\n") % short(x))
1694 1696 return cl.count()
1695 1697
1696 1698 def revmap(x):
1697 1699 return cl.rev(x)
1698 1700
1699 1701 if not source:
1700 1702 return 0
1701 1703
1702 1704 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1703 1705
1704 1706 changesets = files = revisions = 0
1705 1707
1706 1708 tr = self.transaction()
1707 1709
1708 1710 # write changelog data to temp files so concurrent readers will not see
1709 1711 # inconsistent view
1710 1712 cl = None
1711 1713 try:
1712 1714 cl = appendfile.appendchangelog(self.sopener,
1713 1715 self.changelog.version)
1714 1716
1715 1717 oldheads = len(cl.heads())
1716 1718
1717 1719 # pull off the changeset group
1718 1720 self.ui.status(_("adding changesets\n"))
1719 1721 cor = cl.count() - 1
1720 1722 chunkiter = changegroup.chunkiter(source)
1721 1723 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1722 1724 raise util.Abort(_("received changelog group is empty"))
1723 1725 cnr = cl.count() - 1
1724 1726 changesets = cnr - cor
1725 1727
1726 1728 # pull off the manifest group
1727 1729 self.ui.status(_("adding manifests\n"))
1728 1730 chunkiter = changegroup.chunkiter(source)
1729 1731 # no need to check for empty manifest group here:
1730 1732 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1731 1733 # no new manifest will be created and the manifest group will
1732 1734 # be empty during the pull
1733 1735 self.manifest.addgroup(chunkiter, revmap, tr)
1734 1736
1735 1737 # process the files
1736 1738 self.ui.status(_("adding file changes\n"))
1737 1739 while 1:
1738 1740 f = changegroup.getchunk(source)
1739 1741 if not f:
1740 1742 break
1741 1743 self.ui.debug(_("adding %s revisions\n") % f)
1742 1744 fl = self.file(f)
1743 1745 o = fl.count()
1744 1746 chunkiter = changegroup.chunkiter(source)
1745 1747 if fl.addgroup(chunkiter, revmap, tr) is None:
1746 1748 raise util.Abort(_("received file revlog group is empty"))
1747 1749 revisions += fl.count() - o
1748 1750 files += 1
1749 1751
1750 1752 cl.writedata()
1751 1753 finally:
1752 1754 if cl:
1753 1755 cl.cleanup()
1754 1756
1755 1757 # make changelog see real files again
1756 1758 self.changelog = changelog.changelog(self.sopener,
1757 1759 self.changelog.version)
1758 1760 self.changelog.checkinlinesize(tr)
1759 1761
1760 1762 newheads = len(self.changelog.heads())
1761 1763 heads = ""
1762 1764 if oldheads and newheads != oldheads:
1763 1765 heads = _(" (%+d heads)") % (newheads - oldheads)
1764 1766
1765 1767 self.ui.status(_("added %d changesets"
1766 1768 " with %d changes to %d files%s\n")
1767 1769 % (changesets, revisions, files, heads))
1768 1770
1769 1771 if changesets > 0:
1770 1772 self.hook('pretxnchangegroup', throw=True,
1771 1773 node=hex(self.changelog.node(cor+1)), source=srctype,
1772 1774 url=url)
1773 1775
1774 1776 tr.close()
1775 1777
1776 1778 if changesets > 0:
1777 1779 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1778 1780 source=srctype, url=url)
1779 1781
1780 1782 for i in xrange(cor + 1, cnr + 1):
1781 1783 self.hook("incoming", node=hex(self.changelog.node(i)),
1782 1784 source=srctype, url=url)
1783 1785
1784 1786 # never return 0 here:
1785 1787 if newheads < oldheads:
1786 1788 return newheads - oldheads - 1
1787 1789 else:
1788 1790 return newheads - oldheads + 1
1789 1791
1790 1792
1791 1793 def stream_in(self, remote):
1792 1794 fp = remote.stream_out()
1793 1795 l = fp.readline()
1794 1796 try:
1795 1797 resp = int(l)
1796 1798 except ValueError:
1797 1799 raise util.UnexpectedOutput(
1798 1800 _('Unexpected response from remote server:'), l)
1799 1801 if resp == 1:
1800 1802 raise util.Abort(_('operation forbidden by server'))
1801 1803 elif resp == 2:
1802 1804 raise util.Abort(_('locking the remote repository failed'))
1803 1805 elif resp != 0:
1804 1806 raise util.Abort(_('the server sent an unknown error code'))
1805 1807 self.ui.status(_('streaming all changes\n'))
1806 1808 l = fp.readline()
1807 1809 try:
1808 1810 total_files, total_bytes = map(int, l.split(' ', 1))
1809 1811 except ValueError, TypeError:
1810 1812 raise util.UnexpectedOutput(
1811 1813 _('Unexpected response from remote server:'), l)
1812 1814 self.ui.status(_('%d files to transfer, %s of data\n') %
1813 1815 (total_files, util.bytecount(total_bytes)))
1814 1816 start = time.time()
1815 1817 for i in xrange(total_files):
1816 1818 # XXX doesn't support '\n' or '\r' in filenames
1817 1819 l = fp.readline()
1818 1820 try:
1819 1821 name, size = l.split('\0', 1)
1820 1822 size = int(size)
1821 1823 except ValueError, TypeError:
1822 1824 raise util.UnexpectedOutput(
1823 1825 _('Unexpected response from remote server:'), l)
1824 1826 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1825 1827 ofp = self.sopener(name, 'w')
1826 1828 for chunk in util.filechunkiter(fp, limit=size):
1827 1829 ofp.write(chunk)
1828 1830 ofp.close()
1829 1831 elapsed = time.time() - start
1830 1832 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1831 1833 (util.bytecount(total_bytes), elapsed,
1832 1834 util.bytecount(total_bytes / elapsed)))
1833 1835 self.reload()
1834 1836 return len(self.heads()) + 1
1835 1837
1836 1838 def clone(self, remote, heads=[], stream=False):
1837 1839 '''clone remote repository.
1838 1840
1839 1841 keyword arguments:
1840 1842 heads: list of revs to clone (forces use of pull)
1841 1843 stream: use streaming clone if possible'''
1842 1844
1843 1845 # now, all clients that can request uncompressed clones can
1844 1846 # read repo formats supported by all servers that can serve
1845 1847 # them.
1846 1848
1847 1849 # if revlog format changes, client will have to check version
1848 1850 # and format flags on "stream" capability, and use
1849 1851 # uncompressed only if compatible.
1850 1852
1851 1853 if stream and not heads and remote.capable('stream'):
1852 1854 return self.stream_in(remote)
1853 1855 return self.pull(remote, heads)
1854 1856
1855 1857 # used to avoid circular references so destructors work
1856 1858 def aftertrans(files):
1857 1859 renamefiles = [tuple(t) for t in files]
1858 1860 def a():
1859 1861 for src, dest in renamefiles:
1860 1862 util.rename(src, dest)
1861 1863 return a
1862 1864
1863 1865 def instance(ui, path, create):
1864 1866 return localrepository(ui, util.drop_scheme('file', path), create)
1865 1867
1866 1868 def islocal(path):
1867 1869 return True
@@ -1,1386 +1,1386 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile
17 17 import os, threading, time, calendar, ConfigParser, locale
18 18
19 19 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
20 20 or "ascii"
21 21 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
22 22 _fallbackencoding = 'ISO-8859-1'
23 23
24 24 def tolocal(s):
25 25 """
26 26 Convert a string from internal UTF-8 to local encoding
27 27
28 28 All internal strings should be UTF-8 but some repos before the
29 29 implementation of locale support may contain latin1 or possibly
30 30 other character sets. We attempt to decode everything strictly
31 31 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
32 32 replace unknown characters.
33 33 """
34 34 for e in ('UTF-8', _fallbackencoding):
35 35 try:
36 36 u = s.decode(e) # attempt strict decoding
37 37 return u.encode(_encoding, "replace")
38 38 except LookupError, k:
39 39 raise Abort(_("%s, please check your locale settings") % k)
40 40 except UnicodeDecodeError:
41 41 pass
42 42 u = s.decode("utf-8", "replace") # last ditch
43 43 return u.encode(_encoding, "replace")
44 44
45 45 def fromlocal(s):
46 46 """
47 47 Convert a string from the local character encoding to UTF-8
48 48
49 49 We attempt to decode strings using the encoding mode set by
50 50 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
51 51 characters will cause an error message. Other modes include
52 52 'replace', which replaces unknown characters with a special
53 53 Unicode character, and 'ignore', which drops the character.
54 54 """
55 55 try:
56 56 return s.decode(_encoding, _encodingmode).encode("utf-8")
57 57 except UnicodeDecodeError, inst:
58 58 sub = s[max(0, inst.start-10):inst.start+10]
59 59 raise Abort("decoding near '%s': %s!" % (sub, inst))
60 60 except LookupError, k:
61 61 raise Abort(_("%s, please check your locale settings") % k)
62 62
63 63 def locallen(s):
64 64 """Find the length in characters of a local string"""
65 65 return len(s.decode(_encoding, "replace"))
66 66
67 67 def localsub(s, a, b=None):
68 68 try:
69 69 u = s.decode(_encoding, _encodingmode)
70 70 if b is not None:
71 71 u = u[a:b]
72 72 else:
73 73 u = u[:a]
74 74 return u.encode(_encoding, _encodingmode)
75 75 except UnicodeDecodeError, inst:
76 76 sub = s[max(0, inst.start-10), inst.start+10]
77 77 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
78 78
79 79 # used by parsedate
80 80 defaultdateformats = (
81 81 '%Y-%m-%d %H:%M:%S',
82 82 '%Y-%m-%d %I:%M:%S%p',
83 83 '%Y-%m-%d %H:%M',
84 84 '%Y-%m-%d %I:%M%p',
85 85 '%Y-%m-%d',
86 86 '%m-%d',
87 87 '%m/%d',
88 88 '%m/%d/%y',
89 89 '%m/%d/%Y',
90 90 '%a %b %d %H:%M:%S %Y',
91 91 '%a %b %d %I:%M:%S%p %Y',
92 92 '%b %d %H:%M:%S %Y',
93 93 '%b %d %I:%M:%S%p %Y',
94 94 '%b %d %H:%M:%S',
95 95 '%b %d %I:%M:%S%p',
96 96 '%b %d %H:%M',
97 97 '%b %d %I:%M%p',
98 98 '%b %d %Y',
99 99 '%b %d',
100 100 '%H:%M:%S',
101 101 '%I:%M:%SP',
102 102 '%H:%M',
103 103 '%I:%M%p',
104 104 )
105 105
106 106 extendeddateformats = defaultdateformats + (
107 107 "%Y",
108 108 "%Y-%m",
109 109 "%b",
110 110 "%b %Y",
111 111 )
112 112
113 113 class SignalInterrupt(Exception):
114 114 """Exception raised on SIGTERM and SIGHUP."""
115 115
116 116 # like SafeConfigParser but with case-sensitive keys
117 117 class configparser(ConfigParser.SafeConfigParser):
118 118 def optionxform(self, optionstr):
119 119 return optionstr
120 120
121 121 def cachefunc(func):
122 122 '''cache the result of function calls'''
123 123 # XXX doesn't handle keywords args
124 124 cache = {}
125 125 if func.func_code.co_argcount == 1:
126 126 # we gain a small amount of time because
127 127 # we don't need to pack/unpack the list
128 128 def f(arg):
129 129 if arg not in cache:
130 130 cache[arg] = func(arg)
131 131 return cache[arg]
132 132 else:
133 133 def f(*args):
134 134 if args not in cache:
135 135 cache[args] = func(*args)
136 136 return cache[args]
137 137
138 138 return f
139 139
140 140 def pipefilter(s, cmd):
141 141 '''filter string S through command CMD, returning its output'''
142 142 (pout, pin) = popen2.popen2(cmd, -1, 'b')
143 143 def writer():
144 144 try:
145 145 pin.write(s)
146 146 pin.close()
147 147 except IOError, inst:
148 148 if inst.errno != errno.EPIPE:
149 149 raise
150 150
151 151 # we should use select instead on UNIX, but this will work on most
152 152 # systems, including Windows
153 153 w = threading.Thread(target=writer)
154 154 w.start()
155 155 f = pout.read()
156 156 pout.close()
157 157 w.join()
158 158 return f
159 159
160 160 def tempfilter(s, cmd):
161 161 '''filter string S through a pair of temporary files with CMD.
162 162 CMD is used as a template to create the real command to be run,
163 163 with the strings INFILE and OUTFILE replaced by the real names of
164 164 the temporary files generated.'''
165 165 inname, outname = None, None
166 166 try:
167 167 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
168 168 fp = os.fdopen(infd, 'wb')
169 169 fp.write(s)
170 170 fp.close()
171 171 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
172 172 os.close(outfd)
173 173 cmd = cmd.replace('INFILE', inname)
174 174 cmd = cmd.replace('OUTFILE', outname)
175 175 code = os.system(cmd)
176 176 if code: raise Abort(_("command '%s' failed: %s") %
177 177 (cmd, explain_exit(code)))
178 178 return open(outname, 'rb').read()
179 179 finally:
180 180 try:
181 181 if inname: os.unlink(inname)
182 182 except: pass
183 183 try:
184 184 if outname: os.unlink(outname)
185 185 except: pass
186 186
187 187 filtertable = {
188 188 'tempfile:': tempfilter,
189 189 'pipe:': pipefilter,
190 190 }
191 191
192 192 def filter(s, cmd):
193 193 "filter a string through a command that transforms its input to its output"
194 194 for name, fn in filtertable.iteritems():
195 195 if cmd.startswith(name):
196 196 return fn(s, cmd[len(name):].lstrip())
197 197 return pipefilter(s, cmd)
198 198
199 199 def find_in_path(name, path, default=None):
200 200 '''find name in search path. path can be string (will be split
201 201 with os.pathsep), or iterable thing that returns strings. if name
202 202 found, return path to name. else return default.'''
203 203 if isinstance(path, str):
204 204 path = path.split(os.pathsep)
205 205 for p in path:
206 206 p_name = os.path.join(p, name)
207 207 if os.path.exists(p_name):
208 208 return p_name
209 209 return default
210 210
211 211 def binary(s):
212 212 """return true if a string is binary data using diff's heuristic"""
213 213 if s and '\0' in s[:4096]:
214 214 return True
215 215 return False
216 216
217 217 def unique(g):
218 218 """return the uniq elements of iterable g"""
219 219 seen = {}
220 220 l = []
221 221 for f in g:
222 222 if f not in seen:
223 223 seen[f] = 1
224 224 l.append(f)
225 225 return l
226 226
227 227 class Abort(Exception):
228 228 """Raised if a command needs to print an error and exit."""
229 229
230 230 class UnexpectedOutput(Abort):
231 231 """Raised to print an error with part of output and exit."""
232 232
233 233 def always(fn): return True
234 234 def never(fn): return False
235 235
236 236 def patkind(name, dflt_pat='glob'):
237 237 """Split a string into an optional pattern kind prefix and the
238 238 actual pattern."""
239 239 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
240 240 if name.startswith(prefix + ':'): return name.split(':', 1)
241 241 return dflt_pat, name
242 242
243 243 def globre(pat, head='^', tail='$'):
244 244 "convert a glob pattern into a regexp"
245 245 i, n = 0, len(pat)
246 246 res = ''
247 247 group = False
248 248 def peek(): return i < n and pat[i]
249 249 while i < n:
250 250 c = pat[i]
251 251 i = i+1
252 252 if c == '*':
253 253 if peek() == '*':
254 254 i += 1
255 255 res += '.*'
256 256 else:
257 257 res += '[^/]*'
258 258 elif c == '?':
259 259 res += '.'
260 260 elif c == '[':
261 261 j = i
262 262 if j < n and pat[j] in '!]':
263 263 j += 1
264 264 while j < n and pat[j] != ']':
265 265 j += 1
266 266 if j >= n:
267 267 res += '\\['
268 268 else:
269 269 stuff = pat[i:j].replace('\\','\\\\')
270 270 i = j + 1
271 271 if stuff[0] == '!':
272 272 stuff = '^' + stuff[1:]
273 273 elif stuff[0] == '^':
274 274 stuff = '\\' + stuff
275 275 res = '%s[%s]' % (res, stuff)
276 276 elif c == '{':
277 277 group = True
278 278 res += '(?:'
279 279 elif c == '}' and group:
280 280 res += ')'
281 281 group = False
282 282 elif c == ',' and group:
283 283 res += '|'
284 284 elif c == '\\':
285 285 p = peek()
286 286 if p:
287 287 i += 1
288 288 res += re.escape(p)
289 289 else:
290 290 res += re.escape(c)
291 291 else:
292 292 res += re.escape(c)
293 293 return head + res + tail
294 294
295 295 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
296 296
297 297 def pathto(n1, n2):
298 298 '''return the relative path from one place to another.
299 299 n1 should use os.sep to separate directories
300 300 n2 should use "/" to separate directories
301 301 returns an os.sep-separated path.
302 302 '''
303 303 if not n1: return localpath(n2)
304 304 a, b = n1.split(os.sep), n2.split('/')
305 305 a.reverse()
306 306 b.reverse()
307 307 while a and b and a[-1] == b[-1]:
308 308 a.pop()
309 309 b.pop()
310 310 b.reverse()
311 311 return os.sep.join((['..'] * len(a)) + b)
312 312
313 313 def canonpath(root, cwd, myname):
314 314 """return the canonical path of myname, given cwd and root"""
315 315 if root == os.sep:
316 316 rootsep = os.sep
317 317 elif root.endswith(os.sep):
318 318 rootsep = root
319 319 else:
320 320 rootsep = root + os.sep
321 321 name = myname
322 322 if not os.path.isabs(name):
323 323 name = os.path.join(root, cwd, name)
324 324 name = os.path.normpath(name)
325 325 if name != rootsep and name.startswith(rootsep):
326 326 name = name[len(rootsep):]
327 327 audit_path(name)
328 328 return pconvert(name)
329 329 elif name == root:
330 330 return ''
331 331 else:
332 332 # Determine whether `name' is in the hierarchy at or beneath `root',
333 333 # by iterating name=dirname(name) until that causes no change (can't
334 334 # check name == '/', because that doesn't work on windows). For each
335 335 # `name', compare dev/inode numbers. If they match, the list `rel'
336 336 # holds the reversed list of components making up the relative file
337 337 # name we want.
338 338 root_st = os.stat(root)
339 339 rel = []
340 340 while True:
341 341 try:
342 342 name_st = os.stat(name)
343 343 except OSError:
344 344 break
345 345 if samestat(name_st, root_st):
346 346 rel.reverse()
347 347 name = os.path.join(*rel)
348 348 audit_path(name)
349 349 return pconvert(name)
350 350 dirname, basename = os.path.split(name)
351 351 rel.append(basename)
352 352 if dirname == name:
353 353 break
354 354 name = dirname
355 355
356 356 raise Abort('%s not under root' % myname)
357 357
358 358 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
359 359 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
360 360
361 361 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
362 362 if os.name == 'nt':
363 363 dflt_pat = 'glob'
364 364 else:
365 365 dflt_pat = 'relpath'
366 366 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
367 367
368 368 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
369 369 """build a function to match a set of file patterns
370 370
371 371 arguments:
372 372 canonroot - the canonical root of the tree you're matching against
373 373 cwd - the current working directory, if relevant
374 374 names - patterns to find
375 375 inc - patterns to include
376 376 exc - patterns to exclude
377 377 head - a regex to prepend to patterns to control whether a match is rooted
378 378
379 379 a pattern is one of:
380 380 'glob:<rooted glob>'
381 381 're:<rooted regexp>'
382 382 'path:<rooted path>'
383 383 'relglob:<relative glob>'
384 384 'relpath:<relative path>'
385 385 'relre:<relative regexp>'
386 386 '<rooted path or regexp>'
387 387
388 388 returns:
389 389 a 3-tuple containing
390 390 - list of explicit non-pattern names passed in
391 391 - a bool match(filename) function
392 392 - a bool indicating if any patterns were passed in
393 393
394 394 todo:
395 395 make head regex a rooted bool
396 396 """
397 397
398 398 def contains_glob(name):
399 399 for c in name:
400 400 if c in _globchars: return True
401 401 return False
402 402
403 403 def regex(kind, name, tail):
404 404 '''convert a pattern into a regular expression'''
405 405 if kind == 're':
406 406 return name
407 407 elif kind == 'path':
408 408 return '^' + re.escape(name) + '(?:/|$)'
409 409 elif kind == 'relglob':
410 410 return head + globre(name, '(?:|.*/)', tail)
411 411 elif kind == 'relpath':
412 412 return head + re.escape(name) + tail
413 413 elif kind == 'relre':
414 414 if name.startswith('^'):
415 415 return name
416 416 return '.*' + name
417 417 return head + globre(name, '', tail)
418 418
419 419 def matchfn(pats, tail):
420 420 """build a matching function from a set of patterns"""
421 421 if not pats:
422 422 return
423 423 matches = []
424 424 for k, p in pats:
425 425 try:
426 426 pat = '(?:%s)' % regex(k, p, tail)
427 427 matches.append(re.compile(pat).match)
428 428 except re.error:
429 429 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
430 430 else: raise Abort("invalid pattern (%s): %s" % (k, p))
431 431
432 432 def buildfn(text):
433 433 for m in matches:
434 434 r = m(text)
435 435 if r:
436 436 return r
437 437
438 438 return buildfn
439 439
440 440 def globprefix(pat):
441 441 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
442 442 root = []
443 443 for p in pat.split(os.sep):
444 444 if contains_glob(p): break
445 445 root.append(p)
446 446 return '/'.join(root)
447 447
448 448 pats = []
449 449 files = []
450 450 roots = []
451 451 for kind, name in [patkind(p, dflt_pat) for p in names]:
452 452 if kind in ('glob', 'relpath'):
453 453 name = canonpath(canonroot, cwd, name)
454 454 if name == '':
455 455 kind, name = 'glob', '**'
456 456 if kind in ('glob', 'path', 're'):
457 457 pats.append((kind, name))
458 458 if kind == 'glob':
459 459 root = globprefix(name)
460 460 if root: roots.append(root)
461 461 elif kind == 'relpath':
462 462 files.append((kind, name))
463 463 roots.append(name)
464 464
465 465 patmatch = matchfn(pats, '$') or always
466 466 filematch = matchfn(files, '(?:/|$)') or always
467 467 incmatch = always
468 468 if inc:
469 469 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
470 470 incmatch = matchfn(inckinds, '(?:/|$)')
471 471 excmatch = lambda fn: False
472 472 if exc:
473 473 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
474 474 excmatch = matchfn(exckinds, '(?:/|$)')
475 475
476 476 return (roots,
477 477 lambda fn: (incmatch(fn) and not excmatch(fn) and
478 478 (fn.endswith('/') or
479 479 (not pats and not files) or
480 480 (pats and patmatch(fn)) or
481 481 (files and filematch(fn)))),
482 482 (inc or exc or (pats and pats != [('glob', '**')])) and True)
483 483
484 484 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
485 485 '''enhanced shell command execution.
486 486 run with environment maybe modified, maybe in different dir.
487 487
488 488 if command fails and onerr is None, return status. if ui object,
489 489 print error message and return status, else raise onerr object as
490 490 exception.'''
491 491 def py2shell(val):
492 492 'convert python object into string that is useful to shell'
493 493 if val in (None, False):
494 494 return '0'
495 495 if val == True:
496 496 return '1'
497 497 return str(val)
498 498 oldenv = {}
499 499 for k in environ:
500 500 oldenv[k] = os.environ.get(k)
501 501 if cwd is not None:
502 502 oldcwd = os.getcwd()
503 503 origcmd = cmd
504 504 if os.name == 'nt':
505 505 cmd = '"%s"' % cmd
506 506 try:
507 507 for k, v in environ.iteritems():
508 508 os.environ[k] = py2shell(v)
509 509 if cwd is not None and oldcwd != cwd:
510 510 os.chdir(cwd)
511 511 rc = os.system(cmd)
512 512 if rc and onerr:
513 513 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
514 514 explain_exit(rc)[0])
515 515 if errprefix:
516 516 errmsg = '%s: %s' % (errprefix, errmsg)
517 517 try:
518 518 onerr.warn(errmsg + '\n')
519 519 except AttributeError:
520 520 raise onerr(errmsg)
521 521 return rc
522 522 finally:
523 523 for k, v in oldenv.iteritems():
524 524 if v is None:
525 525 del os.environ[k]
526 526 else:
527 527 os.environ[k] = v
528 528 if cwd is not None and oldcwd != cwd:
529 529 os.chdir(oldcwd)
530 530
531 531 def rename(src, dst):
532 532 """forcibly rename a file"""
533 533 try:
534 534 os.rename(src, dst)
535 535 except OSError, err:
536 536 # on windows, rename to existing file is not allowed, so we
537 537 # must delete destination first. but if file is open, unlink
538 538 # schedules it for delete but does not delete it. rename
539 539 # happens immediately even for open files, so we create
540 540 # temporary file, delete it, rename destination to that name,
541 541 # then delete that. then rename is safe to do.
542 542 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
543 543 os.close(fd)
544 544 os.unlink(temp)
545 545 os.rename(dst, temp)
546 546 os.unlink(temp)
547 547 os.rename(src, dst)
548 548
549 549 def unlink(f):
550 550 """unlink and remove the directory if it is empty"""
551 551 os.unlink(f)
552 552 # try removing directories that might now be empty
553 553 try:
554 554 os.removedirs(os.path.dirname(f))
555 555 except OSError:
556 556 pass
557 557
558 558 def copyfile(src, dest):
559 559 "copy a file, preserving mode"
560 560 try:
561 561 shutil.copyfile(src, dest)
562 562 shutil.copymode(src, dest)
563 563 except shutil.Error, inst:
564 564 raise util.Abort(str(inst))
565 565
566 566 def copyfiles(src, dst, hardlink=None):
567 567 """Copy a directory tree using hardlinks if possible"""
568 568
569 569 if hardlink is None:
570 570 hardlink = (os.stat(src).st_dev ==
571 571 os.stat(os.path.dirname(dst)).st_dev)
572 572
573 573 if os.path.isdir(src):
574 574 os.mkdir(dst)
575 575 for name in os.listdir(src):
576 576 srcname = os.path.join(src, name)
577 577 dstname = os.path.join(dst, name)
578 578 copyfiles(srcname, dstname, hardlink)
579 579 else:
580 580 if hardlink:
581 581 try:
582 582 os_link(src, dst)
583 583 except (IOError, OSError):
584 584 hardlink = False
585 585 shutil.copy(src, dst)
586 586 else:
587 587 shutil.copy(src, dst)
588 588
589 589 def audit_path(path):
590 590 """Abort if path contains dangerous components"""
591 591 parts = os.path.normcase(path).split(os.sep)
592 592 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
593 593 or os.pardir in parts):
594 594 raise Abort(_("path contains illegal component: %s\n") % path)
595 595
596 596 def _makelock_file(info, pathname):
597 597 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
598 598 os.write(ld, info)
599 599 os.close(ld)
600 600
601 601 def _readlock_file(pathname):
602 602 return posixfile(pathname).read()
603 603
604 604 def nlinks(pathname):
605 605 """Return number of hardlinks for the given file."""
606 606 return os.lstat(pathname).st_nlink
607 607
608 608 if hasattr(os, 'link'):
609 609 os_link = os.link
610 610 else:
611 611 def os_link(src, dst):
612 612 raise OSError(0, _("Hardlinks not supported"))
613 613
614 614 def fstat(fp):
615 615 '''stat file object that may not have fileno method.'''
616 616 try:
617 617 return os.fstat(fp.fileno())
618 618 except AttributeError:
619 619 return os.stat(fp.name)
620 620
621 621 posixfile = file
622 622
623 623 def is_win_9x():
624 624 '''return true if run on windows 95, 98 or me.'''
625 625 try:
626 626 return sys.getwindowsversion()[3] == 1
627 627 except AttributeError:
628 628 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
629 629
630 630 getuser_fallback = None
631 631
632 632 def getuser():
633 633 '''return name of current user'''
634 634 try:
635 635 return getpass.getuser()
636 636 except ImportError:
637 637 # import of pwd will fail on windows - try fallback
638 638 if getuser_fallback:
639 639 return getuser_fallback()
640 640 # raised if win32api not available
641 641 raise Abort(_('user name not available - set USERNAME '
642 642 'environment variable'))
643 643
644 644 def username(uid=None):
645 645 """Return the name of the user with the given uid.
646 646
647 647 If uid is None, return the name of the current user."""
648 648 try:
649 649 import pwd
650 650 if uid is None:
651 651 uid = os.getuid()
652 652 try:
653 653 return pwd.getpwuid(uid)[0]
654 654 except KeyError:
655 655 return str(uid)
656 656 except ImportError:
657 657 return None
658 658
659 659 def groupname(gid=None):
660 660 """Return the name of the group with the given gid.
661 661
662 662 If gid is None, return the name of the current group."""
663 663 try:
664 664 import grp
665 665 if gid is None:
666 666 gid = os.getgid()
667 667 try:
668 668 return grp.getgrgid(gid)[0]
669 669 except KeyError:
670 670 return str(gid)
671 671 except ImportError:
672 672 return None
673 673
674 674 # File system features
675 675
676 676 def checkfolding(path):
677 677 """
678 678 Check whether the given path is on a case-sensitive filesystem
679 679
680 680 Requires a path (like /foo/.hg) ending with a foldable final
681 681 directory component.
682 682 """
683 683 s1 = os.stat(path)
684 684 d, b = os.path.split(path)
685 685 p2 = os.path.join(d, b.upper())
686 686 if path == p2:
687 687 p2 = os.path.join(d, b.lower())
688 688 try:
689 689 s2 = os.stat(p2)
690 690 if s2 == s1:
691 691 return False
692 692 return True
693 693 except:
694 694 return True
695 695
696 696 def checkexec(path):
697 697 """
698 698 Check whether the given path is on a filesystem with UNIX-like exec flags
699 699
700 700 Requires a directory (like /foo/.hg)
701 701 """
702 702 fh, fn = tempfile.mkstemp("", "", path)
703 703 os.close(fh)
704 704 m = os.stat(fn).st_mode
705 705 os.chmod(fn, m ^ 0111)
706 706 r = (os.stat(fn).st_mode != m)
707 707 os.unlink(fn)
708 708 return r
709 709
710 710 def execfunc(path, fallback):
711 711 '''return an is_exec() function with default to fallback'''
712 712 if checkexec(path):
713 713 return lambda x: is_exec(os.path.join(path, x))
714 714 return fallback
715 715
716 def checksymlink(path):
716 def checklink(path):
717 717 """check whether the given path is on a symlink-capable filesystem"""
718 718 # mktemp is not racy because symlink creation will fail if the
719 719 # file already exists
720 720 name = tempfile.mktemp(dir=path)
721 721 try:
722 722 os.symlink(".", name)
723 723 os.unlink(name)
724 724 return True
725 725 except OSError:
726 726 return False
727 727
728 728 def linkfunc(path, fallback):
729 729 '''return an is_link() function with default to fallback'''
730 730 if checklink(path):
731 731 return lambda x: is_link(os.path.join(path, x))
732 732 return fallback
733 733
734 734 # Platform specific variants
735 735 if os.name == 'nt':
736 736 import msvcrt
737 737 nulldev = 'NUL:'
738 738
739 739 class winstdout:
740 740 '''stdout on windows misbehaves if sent through a pipe'''
741 741
742 742 def __init__(self, fp):
743 743 self.fp = fp
744 744
745 745 def __getattr__(self, key):
746 746 return getattr(self.fp, key)
747 747
748 748 def close(self):
749 749 try:
750 750 self.fp.close()
751 751 except: pass
752 752
753 753 def write(self, s):
754 754 try:
755 755 return self.fp.write(s)
756 756 except IOError, inst:
757 757 if inst.errno != 0: raise
758 758 self.close()
759 759 raise IOError(errno.EPIPE, 'Broken pipe')
760 760
761 761 sys.stdout = winstdout(sys.stdout)
762 762
763 763 def system_rcpath():
764 764 try:
765 765 return system_rcpath_win32()
766 766 except:
767 767 return [r'c:\mercurial\mercurial.ini']
768 768
769 769 def os_rcpath():
770 770 '''return default os-specific hgrc search path'''
771 771 path = system_rcpath()
772 772 path.append(user_rcpath())
773 773 userprofile = os.environ.get('USERPROFILE')
774 774 if userprofile:
775 775 path.append(os.path.join(userprofile, 'mercurial.ini'))
776 776 return path
777 777
778 778 def user_rcpath():
779 779 '''return os-specific hgrc search path to the user dir'''
780 780 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
781 781
782 782 def parse_patch_output(output_line):
783 783 """parses the output produced by patch and returns the file name"""
784 784 pf = output_line[14:]
785 785 if pf[0] == '`':
786 786 pf = pf[1:-1] # Remove the quotes
787 787 return pf
788 788
789 789 def testpid(pid):
790 790 '''return False if pid dead, True if running or not known'''
791 791 return True
792 792
793 793 def set_exec(f, mode):
794 794 pass
795 795
796 796 def set_link(f, mode):
797 797 pass
798 798
799 799 def set_binary(fd):
800 800 msvcrt.setmode(fd.fileno(), os.O_BINARY)
801 801
802 802 def pconvert(path):
803 803 return path.replace("\\", "/")
804 804
805 805 def localpath(path):
806 806 return path.replace('/', '\\')
807 807
808 808 def normpath(path):
809 809 return pconvert(os.path.normpath(path))
810 810
811 811 makelock = _makelock_file
812 812 readlock = _readlock_file
813 813
814 814 def samestat(s1, s2):
815 815 return False
816 816
817 817 def shellquote(s):
818 818 return '"%s"' % s.replace('"', '\\"')
819 819
820 820 def explain_exit(code):
821 821 return _("exited with status %d") % code, code
822 822
823 823 # if you change this stub into a real check, please try to implement the
824 824 # username and groupname functions above, too.
825 825 def isowner(fp, st=None):
826 826 return True
827 827
828 828 try:
829 829 # override functions with win32 versions if possible
830 830 from util_win32 import *
831 831 if not is_win_9x():
832 832 posixfile = posixfile_nt
833 833 except ImportError:
834 834 pass
835 835
836 836 else:
837 837 nulldev = '/dev/null'
838 838 _umask = os.umask(0)
839 839 os.umask(_umask)
840 840
841 841 def rcfiles(path):
842 842 rcs = [os.path.join(path, 'hgrc')]
843 843 rcdir = os.path.join(path, 'hgrc.d')
844 844 try:
845 845 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
846 846 if f.endswith(".rc")])
847 847 except OSError:
848 848 pass
849 849 return rcs
850 850
851 851 def os_rcpath():
852 852 '''return default os-specific hgrc search path'''
853 853 path = []
854 854 # old mod_python does not set sys.argv
855 855 if len(getattr(sys, 'argv', [])) > 0:
856 856 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
857 857 '/../etc/mercurial'))
858 858 path.extend(rcfiles('/etc/mercurial'))
859 859 path.append(os.path.expanduser('~/.hgrc'))
860 860 path = [os.path.normpath(f) for f in path]
861 861 return path
862 862
863 863 def parse_patch_output(output_line):
864 864 """parses the output produced by patch and returns the file name"""
865 865 pf = output_line[14:]
866 866 if pf.startswith("'") and pf.endswith("'") and " " in pf:
867 867 pf = pf[1:-1] # Remove the quotes
868 868 return pf
869 869
870 870 def is_exec(f):
871 871 """check whether a file is executable"""
872 872 return (os.lstat(f).st_mode & 0100 != 0)
873 873
874 874 def set_exec(f, mode):
875 875 s = os.lstat(f).st_mode
876 876 if (s & 0100 != 0) == mode:
877 877 return
878 878 if mode:
879 879 # Turn on +x for every +r bit when making a file executable
880 880 # and obey umask.
881 881 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
882 882 else:
883 883 os.chmod(f, s & 0666)
884 884
885 885 def is_link(f):
886 886 """check whether a file is a symlink"""
887 887 return (os.lstat(f).st_mode & 0120000 == 0120000)
888 888
889 889 def set_link(f, mode):
890 890 """make a file a symbolic link/regular file
891 891
892 892 if a file is changed to a link, its contents become the link data
893 893 if a link is changed to a file, its link data become its contents
894 894 """
895 895
896 896 m = is_link(f)
897 897 if m == bool(mode):
898 898 return
899 899
900 900 if mode: # switch file to link
901 901 data = file(f).read()
902 902 os.unlink(f)
903 903 os.symlink(data, f)
904 904 else:
905 905 data = os.readlink(f)
906 906 os.unlink(f)
907 907 file(f, "w").write(data)
908 908
909 909 def set_binary(fd):
910 910 pass
911 911
912 912 def pconvert(path):
913 913 return path
914 914
915 915 def localpath(path):
916 916 return path
917 917
918 918 normpath = os.path.normpath
919 919 samestat = os.path.samestat
920 920
921 921 def makelock(info, pathname):
922 922 try:
923 923 os.symlink(info, pathname)
924 924 except OSError, why:
925 925 if why.errno == errno.EEXIST:
926 926 raise
927 927 else:
928 928 _makelock_file(info, pathname)
929 929
930 930 def readlock(pathname):
931 931 try:
932 932 return os.readlink(pathname)
933 933 except OSError, why:
934 934 if why.errno == errno.EINVAL:
935 935 return _readlock_file(pathname)
936 936 else:
937 937 raise
938 938
939 939 def shellquote(s):
940 940 return "'%s'" % s.replace("'", "'\\''")
941 941
942 942 def testpid(pid):
943 943 '''return False if pid dead, True if running or not sure'''
944 944 try:
945 945 os.kill(pid, 0)
946 946 return True
947 947 except OSError, inst:
948 948 return inst.errno != errno.ESRCH
949 949
950 950 def explain_exit(code):
951 951 """return a 2-tuple (desc, code) describing a process's status"""
952 952 if os.WIFEXITED(code):
953 953 val = os.WEXITSTATUS(code)
954 954 return _("exited with status %d") % val, val
955 955 elif os.WIFSIGNALED(code):
956 956 val = os.WTERMSIG(code)
957 957 return _("killed by signal %d") % val, val
958 958 elif os.WIFSTOPPED(code):
959 959 val = os.WSTOPSIG(code)
960 960 return _("stopped by signal %d") % val, val
961 961 raise ValueError(_("invalid exit code"))
962 962
963 963 def isowner(fp, st=None):
964 964 """Return True if the file object f belongs to the current user.
965 965
966 966 The return value of a util.fstat(f) may be passed as the st argument.
967 967 """
968 968 if st is None:
969 969 st = fstat(fp)
970 970 return st.st_uid == os.getuid()
971 971
972 972 def _buildencodefun():
973 973 e = '_'
974 974 win_reserved = [ord(x) for x in '\\:*?"<>|']
975 975 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
976 976 for x in (range(32) + range(126, 256) + win_reserved):
977 977 cmap[chr(x)] = "~%02x" % x
978 978 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
979 979 cmap[chr(x)] = e + chr(x).lower()
980 980 dmap = {}
981 981 for k, v in cmap.iteritems():
982 982 dmap[v] = k
983 983 def decode(s):
984 984 i = 0
985 985 while i < len(s):
986 986 for l in xrange(1, 4):
987 987 try:
988 988 yield dmap[s[i:i+l]]
989 989 i += l
990 990 break
991 991 except KeyError:
992 992 pass
993 993 else:
994 994 raise KeyError
995 995 return (lambda s: "".join([cmap[c] for c in s]),
996 996 lambda s: "".join(list(decode(s))))
997 997
998 998 encodefilename, decodefilename = _buildencodefun()
999 999
1000 1000 def encodedopener(openerfn, fn):
1001 1001 def o(path, *args, **kw):
1002 1002 return openerfn(fn(path), *args, **kw)
1003 1003 return o
1004 1004
1005 1005 def opener(base, audit=True):
1006 1006 """
1007 1007 return a function that opens files relative to base
1008 1008
1009 1009 this function is used to hide the details of COW semantics and
1010 1010 remote file access from higher level code.
1011 1011 """
1012 1012 p = base
1013 1013 audit_p = audit
1014 1014
1015 1015 def mktempcopy(name):
1016 1016 d, fn = os.path.split(name)
1017 1017 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1018 1018 os.close(fd)
1019 1019 ofp = posixfile(temp, "wb")
1020 1020 try:
1021 1021 try:
1022 1022 ifp = posixfile(name, "rb")
1023 1023 except IOError, inst:
1024 1024 if not getattr(inst, 'filename', None):
1025 1025 inst.filename = name
1026 1026 raise
1027 1027 for chunk in filechunkiter(ifp):
1028 1028 ofp.write(chunk)
1029 1029 ifp.close()
1030 1030 ofp.close()
1031 1031 except:
1032 1032 try: os.unlink(temp)
1033 1033 except: pass
1034 1034 raise
1035 1035 st = os.lstat(name)
1036 1036 os.chmod(temp, st.st_mode)
1037 1037 return temp
1038 1038
1039 1039 class atomictempfile(posixfile):
1040 1040 """the file will only be copied when rename is called"""
1041 1041 def __init__(self, name, mode):
1042 1042 self.__name = name
1043 1043 self.temp = mktempcopy(name)
1044 1044 posixfile.__init__(self, self.temp, mode)
1045 1045 def rename(self):
1046 1046 if not self.closed:
1047 1047 posixfile.close(self)
1048 1048 rename(self.temp, localpath(self.__name))
1049 1049 def __del__(self):
1050 1050 if not self.closed:
1051 1051 try:
1052 1052 os.unlink(self.temp)
1053 1053 except: pass
1054 1054 posixfile.close(self)
1055 1055
1056 1056 class atomicfile(atomictempfile):
1057 1057 """the file will only be copied on close"""
1058 1058 def __init__(self, name, mode):
1059 1059 atomictempfile.__init__(self, name, mode)
1060 1060 def close(self):
1061 1061 self.rename()
1062 1062 def __del__(self):
1063 1063 self.rename()
1064 1064
1065 1065 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1066 1066 if audit_p:
1067 1067 audit_path(path)
1068 1068 f = os.path.join(p, path)
1069 1069
1070 1070 if not text:
1071 1071 mode += "b" # for that other OS
1072 1072
1073 1073 if mode[0] != "r":
1074 1074 try:
1075 1075 nlink = nlinks(f)
1076 1076 except OSError:
1077 1077 d = os.path.dirname(f)
1078 1078 if not os.path.isdir(d):
1079 1079 os.makedirs(d)
1080 1080 else:
1081 1081 if atomic:
1082 1082 return atomicfile(f, mode)
1083 1083 elif atomictemp:
1084 1084 return atomictempfile(f, mode)
1085 1085 if nlink > 1:
1086 1086 rename(mktempcopy(f), f)
1087 1087 return posixfile(f, mode)
1088 1088
1089 1089 return o
1090 1090
1091 1091 class chunkbuffer(object):
1092 1092 """Allow arbitrary sized chunks of data to be efficiently read from an
1093 1093 iterator over chunks of arbitrary size."""
1094 1094
1095 1095 def __init__(self, in_iter, targetsize = 2**16):
1096 1096 """in_iter is the iterator that's iterating over the input chunks.
1097 1097 targetsize is how big a buffer to try to maintain."""
1098 1098 self.in_iter = iter(in_iter)
1099 1099 self.buf = ''
1100 1100 self.targetsize = int(targetsize)
1101 1101 if self.targetsize <= 0:
1102 1102 raise ValueError(_("targetsize must be greater than 0, was %d") %
1103 1103 targetsize)
1104 1104 self.iterempty = False
1105 1105
1106 1106 def fillbuf(self):
1107 1107 """Ignore target size; read every chunk from iterator until empty."""
1108 1108 if not self.iterempty:
1109 1109 collector = cStringIO.StringIO()
1110 1110 collector.write(self.buf)
1111 1111 for ch in self.in_iter:
1112 1112 collector.write(ch)
1113 1113 self.buf = collector.getvalue()
1114 1114 self.iterempty = True
1115 1115
1116 1116 def read(self, l):
1117 1117 """Read L bytes of data from the iterator of chunks of data.
1118 1118 Returns less than L bytes if the iterator runs dry."""
1119 1119 if l > len(self.buf) and not self.iterempty:
1120 1120 # Clamp to a multiple of self.targetsize
1121 1121 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1122 1122 collector = cStringIO.StringIO()
1123 1123 collector.write(self.buf)
1124 1124 collected = len(self.buf)
1125 1125 for chunk in self.in_iter:
1126 1126 collector.write(chunk)
1127 1127 collected += len(chunk)
1128 1128 if collected >= targetsize:
1129 1129 break
1130 1130 if collected < targetsize:
1131 1131 self.iterempty = True
1132 1132 self.buf = collector.getvalue()
1133 1133 s, self.buf = self.buf[:l], buffer(self.buf, l)
1134 1134 return s
1135 1135
1136 1136 def filechunkiter(f, size=65536, limit=None):
1137 1137 """Create a generator that produces the data in the file size
1138 1138 (default 65536) bytes at a time, up to optional limit (default is
1139 1139 to read all data). Chunks may be less than size bytes if the
1140 1140 chunk is the last chunk in the file, or the file is a socket or
1141 1141 some other type of file that sometimes reads less data than is
1142 1142 requested."""
1143 1143 assert size >= 0
1144 1144 assert limit is None or limit >= 0
1145 1145 while True:
1146 1146 if limit is None: nbytes = size
1147 1147 else: nbytes = min(limit, size)
1148 1148 s = nbytes and f.read(nbytes)
1149 1149 if not s: break
1150 1150 if limit: limit -= len(s)
1151 1151 yield s
1152 1152
1153 1153 def makedate():
1154 1154 lt = time.localtime()
1155 1155 if lt[8] == 1 and time.daylight:
1156 1156 tz = time.altzone
1157 1157 else:
1158 1158 tz = time.timezone
1159 1159 return time.mktime(lt), tz
1160 1160
1161 1161 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1162 1162 """represent a (unixtime, offset) tuple as a localized time.
1163 1163 unixtime is seconds since the epoch, and offset is the time zone's
1164 1164 number of seconds away from UTC. if timezone is false, do not
1165 1165 append time zone to string."""
1166 1166 t, tz = date or makedate()
1167 1167 s = time.strftime(format, time.gmtime(float(t) - tz))
1168 1168 if timezone:
1169 1169 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1170 1170 return s
1171 1171
1172 1172 def strdate(string, format, defaults):
1173 1173 """parse a localized time string and return a (unixtime, offset) tuple.
1174 1174 if the string cannot be parsed, ValueError is raised."""
1175 1175 def timezone(string):
1176 1176 tz = string.split()[-1]
1177 1177 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1178 1178 tz = int(tz)
1179 1179 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1180 1180 return offset
1181 1181 if tz == "GMT" or tz == "UTC":
1182 1182 return 0
1183 1183 return None
1184 1184
1185 1185 # NOTE: unixtime = localunixtime + offset
1186 1186 offset, date = timezone(string), string
1187 1187 if offset != None:
1188 1188 date = " ".join(string.split()[:-1])
1189 1189
1190 1190 # add missing elements from defaults
1191 1191 for part in defaults:
1192 1192 found = [True for p in part if ("%"+p) in format]
1193 1193 if not found:
1194 1194 date += "@" + defaults[part]
1195 1195 format += "@%" + part[0]
1196 1196
1197 1197 timetuple = time.strptime(date, format)
1198 1198 localunixtime = int(calendar.timegm(timetuple))
1199 1199 if offset is None:
1200 1200 # local timezone
1201 1201 unixtime = int(time.mktime(timetuple))
1202 1202 offset = unixtime - localunixtime
1203 1203 else:
1204 1204 unixtime = localunixtime + offset
1205 1205 return unixtime, offset
1206 1206
1207 1207 def parsedate(string, formats=None, defaults=None):
1208 1208 """parse a localized time string and return a (unixtime, offset) tuple.
1209 1209 The date may be a "unixtime offset" string or in one of the specified
1210 1210 formats."""
1211 1211 if not string:
1212 1212 return 0, 0
1213 1213 if not formats:
1214 1214 formats = defaultdateformats
1215 1215 string = string.strip()
1216 1216 try:
1217 1217 when, offset = map(int, string.split(' '))
1218 1218 except ValueError:
1219 1219 # fill out defaults
1220 1220 if not defaults:
1221 1221 defaults = {}
1222 1222 now = makedate()
1223 1223 for part in "d mb yY HI M S".split():
1224 1224 if part not in defaults:
1225 1225 if part[0] in "HMS":
1226 1226 defaults[part] = "00"
1227 1227 elif part[0] in "dm":
1228 1228 defaults[part] = "1"
1229 1229 else:
1230 1230 defaults[part] = datestr(now, "%" + part[0], False)
1231 1231
1232 1232 for format in formats:
1233 1233 try:
1234 1234 when, offset = strdate(string, format, defaults)
1235 1235 except ValueError:
1236 1236 pass
1237 1237 else:
1238 1238 break
1239 1239 else:
1240 1240 raise Abort(_('invalid date: %r ') % string)
1241 1241 # validate explicit (probably user-specified) date and
1242 1242 # time zone offset. values must fit in signed 32 bits for
1243 1243 # current 32-bit linux runtimes. timezones go from UTC-12
1244 1244 # to UTC+14
1245 1245 if abs(when) > 0x7fffffff:
1246 1246 raise Abort(_('date exceeds 32 bits: %d') % when)
1247 1247 if offset < -50400 or offset > 43200:
1248 1248 raise Abort(_('impossible time zone offset: %d') % offset)
1249 1249 return when, offset
1250 1250
1251 1251 def matchdate(date):
1252 1252 """Return a function that matches a given date match specifier
1253 1253
1254 1254 Formats include:
1255 1255
1256 1256 '{date}' match a given date to the accuracy provided
1257 1257
1258 1258 '<{date}' on or before a given date
1259 1259
1260 1260 '>{date}' on or after a given date
1261 1261
1262 1262 """
1263 1263
1264 1264 def lower(date):
1265 1265 return parsedate(date, extendeddateformats)[0]
1266 1266
1267 1267 def upper(date):
1268 1268 d = dict(mb="12", HI="23", M="59", S="59")
1269 1269 for days in "31 30 29".split():
1270 1270 try:
1271 1271 d["d"] = days
1272 1272 return parsedate(date, extendeddateformats, d)[0]
1273 1273 except:
1274 1274 pass
1275 1275 d["d"] = "28"
1276 1276 return parsedate(date, extendeddateformats, d)[0]
1277 1277
1278 1278 if date[0] == "<":
1279 1279 when = upper(date[1:])
1280 1280 return lambda x: x <= when
1281 1281 elif date[0] == ">":
1282 1282 when = lower(date[1:])
1283 1283 return lambda x: x >= when
1284 1284 elif date[0] == "-":
1285 1285 try:
1286 1286 days = int(date[1:])
1287 1287 except ValueError:
1288 1288 raise Abort(_("invalid day spec: %s") % date[1:])
1289 1289 when = makedate()[0] - days * 3600 * 24
1290 1290 return lambda x: x >= when
1291 1291 elif " to " in date:
1292 1292 a, b = date.split(" to ")
1293 1293 start, stop = lower(a), upper(b)
1294 1294 return lambda x: x >= start and x <= stop
1295 1295 else:
1296 1296 start, stop = lower(date), upper(date)
1297 1297 return lambda x: x >= start and x <= stop
1298 1298
1299 1299 def shortuser(user):
1300 1300 """Return a short representation of a user name or email address."""
1301 1301 f = user.find('@')
1302 1302 if f >= 0:
1303 1303 user = user[:f]
1304 1304 f = user.find('<')
1305 1305 if f >= 0:
1306 1306 user = user[f+1:]
1307 1307 f = user.find(' ')
1308 1308 if f >= 0:
1309 1309 user = user[:f]
1310 1310 f = user.find('.')
1311 1311 if f >= 0:
1312 1312 user = user[:f]
1313 1313 return user
1314 1314
1315 1315 def ellipsis(text, maxlength=400):
1316 1316 """Trim string to at most maxlength (default: 400) characters."""
1317 1317 if len(text) <= maxlength:
1318 1318 return text
1319 1319 else:
1320 1320 return "%s..." % (text[:maxlength-3])
1321 1321
1322 1322 def walkrepos(path):
1323 1323 '''yield every hg repository under path, recursively.'''
1324 1324 def errhandler(err):
1325 1325 if err.filename == path:
1326 1326 raise err
1327 1327
1328 1328 for root, dirs, files in os.walk(path, onerror=errhandler):
1329 1329 for d in dirs:
1330 1330 if d == '.hg':
1331 1331 yield root
1332 1332 dirs[:] = []
1333 1333 break
1334 1334
1335 1335 _rcpath = None
1336 1336
1337 1337 def rcpath():
1338 1338 '''return hgrc search path. if env var HGRCPATH is set, use it.
1339 1339 for each item in path, if directory, use files ending in .rc,
1340 1340 else use item.
1341 1341 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1342 1342 if no HGRCPATH, use default os-specific path.'''
1343 1343 global _rcpath
1344 1344 if _rcpath is None:
1345 1345 if 'HGRCPATH' in os.environ:
1346 1346 _rcpath = []
1347 1347 for p in os.environ['HGRCPATH'].split(os.pathsep):
1348 1348 if not p: continue
1349 1349 if os.path.isdir(p):
1350 1350 for f in os.listdir(p):
1351 1351 if f.endswith('.rc'):
1352 1352 _rcpath.append(os.path.join(p, f))
1353 1353 else:
1354 1354 _rcpath.append(p)
1355 1355 else:
1356 1356 _rcpath = os_rcpath()
1357 1357 return _rcpath
1358 1358
1359 1359 def bytecount(nbytes):
1360 1360 '''return byte count formatted as readable string, with units'''
1361 1361
1362 1362 units = (
1363 1363 (100, 1<<30, _('%.0f GB')),
1364 1364 (10, 1<<30, _('%.1f GB')),
1365 1365 (1, 1<<30, _('%.2f GB')),
1366 1366 (100, 1<<20, _('%.0f MB')),
1367 1367 (10, 1<<20, _('%.1f MB')),
1368 1368 (1, 1<<20, _('%.2f MB')),
1369 1369 (100, 1<<10, _('%.0f KB')),
1370 1370 (10, 1<<10, _('%.1f KB')),
1371 1371 (1, 1<<10, _('%.2f KB')),
1372 1372 (1, 1, _('%.0f bytes')),
1373 1373 )
1374 1374
1375 1375 for multiplier, divisor, format in units:
1376 1376 if nbytes >= divisor * multiplier:
1377 1377 return format % (nbytes / float(divisor))
1378 1378 return units[-1][2] % nbytes
1379 1379
1380 1380 def drop_scheme(scheme, path):
1381 1381 sc = scheme + ':'
1382 1382 if path.startswith(sc):
1383 1383 path = path[len(sc):]
1384 1384 if path.startswith('//'):
1385 1385 path = path[2:]
1386 1386 return path
@@ -1,67 +1,66 b''
1 1 % commit date test
2 2 abort: impossible time zone offset: 4444444
3 3 transaction abort!
4 4 rollback completed
5 5 abort: invalid date: '1\t15.1'
6 6 transaction abort!
7 7 rollback completed
8 8 abort: invalid date: 'foo bar'
9 9 transaction abort!
10 10 rollback completed
11 11 nothing changed
12 12 % partial commit test
13 13 trouble committing bar!
14 14 abort: No such file or directory: .../test/bar
15 15 adding dir/file
16 16 dir/file
17 17 adding dir.file
18 18 abort: no match under directory .../test/dir!
19 19 abort: no match under directory .../test/bleh!
20 20 abort: no match under directory .../test/dir2!
21 21 dir/file
22 22 does-not-exist: No such file or directory
23 23 abort: file .../test/does-not-exist not found!
24 baz: unsupported file type (type is symbolic link)
25 abort: can't commit .../test/baz: unsupported file type!
24 abort: file .../test/baz not tracked!
26 25 abort: file .../test/quux not tracked!
27 26 dir/file
28 27 % partial subdir commit test
29 28 adding bar/bar
30 29 adding foo/foo
31 30 % subdir log 1
32 31 changeset: 0:6ef3cb06bb80
33 32 user: test
34 33 date: Mon Jan 12 13:46:40 1970 +0000
35 34 files: foo/foo
36 35 description:
37 36 commit-subdir-1
38 37
39 38
40 39 % subdir log 2
41 40 changeset: 1:f2e51572cf5a
42 41 tag: tip
43 42 user: test
44 43 date: Mon Jan 12 13:46:41 1970 +0000
45 44 files: bar/bar
46 45 description:
47 46 commit-subdir-2
48 47
49 48
50 49 % full log
51 50 changeset: 1:f2e51572cf5a
52 51 tag: tip
53 52 user: test
54 53 date: Mon Jan 12 13:46:41 1970 +0000
55 54 files: bar/bar
56 55 description:
57 56 commit-subdir-2
58 57
59 58
60 59 changeset: 0:6ef3cb06bb80
61 60 user: test
62 61 date: Mon Jan 12 13:46:40 1970 +0000
63 62 files: foo/foo
64 63 description:
65 64 commit-subdir-1
66 65
67 66
@@ -1,15 +1,16 b''
1 adding bar
1 2 adding foo
2 3 adding bomb
3 4 adding a.c
4 5 adding dir/a.o
5 6 adding dir/b.o
7 M dir/b.o
6 8 ! a.c
7 9 ! dir/a.o
8 ! dir/b.o
9 10 ? .hgignore
10 11 a.c: unsupported file type (type is fifo)
11 12 ! a.c
12 13 # test absolute path through symlink outside repo
13 14 A f
14 15 # try symlink outside repo to file inside
15 16 abort: ../z not under root
General Comments 0
You need to be logged in to leave comments. Login now