##// END OF EJS Templates
py3: replace os.environ with encoding.environ (part 1 of 5)...
Pulkit Goyal -
r30634:ad15646d default
parent child Browse files
Show More
@@ -1,608 +1,607
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
12
11
13 from .i18n import _
12 from .i18n import _
14 from .node import (
13 from .node import (
15 bin,
14 bin,
16 hex,
15 hex,
17 )
16 )
18 from . import (
17 from . import (
19 encoding,
18 encoding,
20 error,
19 error,
21 lock as lockmod,
20 lock as lockmod,
22 obsolete,
21 obsolete,
23 util,
22 util,
24 )
23 )
25
24
26 def _getbkfile(repo):
25 def _getbkfile(repo):
27 """Hook so that extensions that mess with the store can hook bm storage.
26 """Hook so that extensions that mess with the store can hook bm storage.
28
27
29 For core, this just handles wether we should see pending
28 For core, this just handles wether we should see pending
30 bookmarks or the committed ones. Other extensions (like share)
29 bookmarks or the committed ones. Other extensions (like share)
31 may need to tweak this behavior further.
30 may need to tweak this behavior further.
32 """
31 """
33 bkfile = None
32 bkfile = None
34 if 'HG_PENDING' in os.environ:
33 if 'HG_PENDING' in encoding.environ:
35 try:
34 try:
36 bkfile = repo.vfs('bookmarks.pending')
35 bkfile = repo.vfs('bookmarks.pending')
37 except IOError as inst:
36 except IOError as inst:
38 if inst.errno != errno.ENOENT:
37 if inst.errno != errno.ENOENT:
39 raise
38 raise
40 if bkfile is None:
39 if bkfile is None:
41 bkfile = repo.vfs('bookmarks')
40 bkfile = repo.vfs('bookmarks')
42 return bkfile
41 return bkfile
43
42
44
43
45 class bmstore(dict):
44 class bmstore(dict):
46 """Storage for bookmarks.
45 """Storage for bookmarks.
47
46
48 This object should do all bookmark-related reads and writes, so
47 This object should do all bookmark-related reads and writes, so
49 that it's fairly simple to replace the storage underlying
48 that it's fairly simple to replace the storage underlying
50 bookmarks without having to clone the logic surrounding
49 bookmarks without having to clone the logic surrounding
51 bookmarks. This type also should manage the active bookmark, if
50 bookmarks. This type also should manage the active bookmark, if
52 any.
51 any.
53
52
54 This particular bmstore implementation stores bookmarks as
53 This particular bmstore implementation stores bookmarks as
55 {hash}\s{name}\n (the same format as localtags) in
54 {hash}\s{name}\n (the same format as localtags) in
56 .hg/bookmarks. The mapping is stored as {name: nodeid}.
55 .hg/bookmarks. The mapping is stored as {name: nodeid}.
57 """
56 """
58
57
59 def __init__(self, repo):
58 def __init__(self, repo):
60 dict.__init__(self)
59 dict.__init__(self)
61 self._repo = repo
60 self._repo = repo
62 try:
61 try:
63 bkfile = _getbkfile(repo)
62 bkfile = _getbkfile(repo)
64 for line in bkfile:
63 for line in bkfile:
65 line = line.strip()
64 line = line.strip()
66 if not line:
65 if not line:
67 continue
66 continue
68 if ' ' not in line:
67 if ' ' not in line:
69 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
68 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
70 % line)
69 % line)
71 continue
70 continue
72 sha, refspec = line.split(' ', 1)
71 sha, refspec = line.split(' ', 1)
73 refspec = encoding.tolocal(refspec)
72 refspec = encoding.tolocal(refspec)
74 try:
73 try:
75 self[refspec] = repo.changelog.lookup(sha)
74 self[refspec] = repo.changelog.lookup(sha)
76 except LookupError:
75 except LookupError:
77 pass
76 pass
78 except IOError as inst:
77 except IOError as inst:
79 if inst.errno != errno.ENOENT:
78 if inst.errno != errno.ENOENT:
80 raise
79 raise
81 self._clean = True
80 self._clean = True
82 self._active = _readactive(repo, self)
81 self._active = _readactive(repo, self)
83 self._aclean = True
82 self._aclean = True
84
83
85 @property
84 @property
86 def active(self):
85 def active(self):
87 return self._active
86 return self._active
88
87
89 @active.setter
88 @active.setter
90 def active(self, mark):
89 def active(self, mark):
91 if mark is not None and mark not in self:
90 if mark is not None and mark not in self:
92 raise AssertionError('bookmark %s does not exist!' % mark)
91 raise AssertionError('bookmark %s does not exist!' % mark)
93
92
94 self._active = mark
93 self._active = mark
95 self._aclean = False
94 self._aclean = False
96
95
97 def __setitem__(self, *args, **kwargs):
96 def __setitem__(self, *args, **kwargs):
98 self._clean = False
97 self._clean = False
99 return dict.__setitem__(self, *args, **kwargs)
98 return dict.__setitem__(self, *args, **kwargs)
100
99
101 def __delitem__(self, key):
100 def __delitem__(self, key):
102 self._clean = False
101 self._clean = False
103 return dict.__delitem__(self, key)
102 return dict.__delitem__(self, key)
104
103
105 def recordchange(self, tr):
104 def recordchange(self, tr):
106 """record that bookmarks have been changed in a transaction
105 """record that bookmarks have been changed in a transaction
107
106
108 The transaction is then responsible for updating the file content."""
107 The transaction is then responsible for updating the file content."""
109 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
108 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
110 location='plain')
109 location='plain')
111 tr.hookargs['bookmark_moved'] = '1'
110 tr.hookargs['bookmark_moved'] = '1'
112
111
113 def _writerepo(self, repo):
112 def _writerepo(self, repo):
114 """Factored out for extensibility"""
113 """Factored out for extensibility"""
115 rbm = repo._bookmarks
114 rbm = repo._bookmarks
116 if rbm.active not in self:
115 if rbm.active not in self:
117 rbm.active = None
116 rbm.active = None
118 rbm._writeactive()
117 rbm._writeactive()
119
118
120 with repo.wlock():
119 with repo.wlock():
121 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
120 file_ = repo.vfs('bookmarks', 'w', atomictemp=True,
122 checkambig=True)
121 checkambig=True)
123 try:
122 try:
124 self._write(file_)
123 self._write(file_)
125 except: # re-raises
124 except: # re-raises
126 file_.discard()
125 file_.discard()
127 raise
126 raise
128 finally:
127 finally:
129 file_.close()
128 file_.close()
130
129
131 def _writeactive(self):
130 def _writeactive(self):
132 if self._aclean:
131 if self._aclean:
133 return
132 return
134 with self._repo.wlock():
133 with self._repo.wlock():
135 if self._active is not None:
134 if self._active is not None:
136 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
135 f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
137 checkambig=True)
136 checkambig=True)
138 try:
137 try:
139 f.write(encoding.fromlocal(self._active))
138 f.write(encoding.fromlocal(self._active))
140 finally:
139 finally:
141 f.close()
140 f.close()
142 else:
141 else:
143 try:
142 try:
144 self._repo.vfs.unlink('bookmarks.current')
143 self._repo.vfs.unlink('bookmarks.current')
145 except OSError as inst:
144 except OSError as inst:
146 if inst.errno != errno.ENOENT:
145 if inst.errno != errno.ENOENT:
147 raise
146 raise
148 self._aclean = True
147 self._aclean = True
149
148
150 def _write(self, fp):
149 def _write(self, fp):
151 for name, node in self.iteritems():
150 for name, node in self.iteritems():
152 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
151 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
153 self._clean = True
152 self._clean = True
154 self._repo.invalidatevolatilesets()
153 self._repo.invalidatevolatilesets()
155
154
156 def expandname(self, bname):
155 def expandname(self, bname):
157 if bname == '.':
156 if bname == '.':
158 if self.active:
157 if self.active:
159 return self.active
158 return self.active
160 else:
159 else:
161 raise error.Abort(_("no active bookmark"))
160 raise error.Abort(_("no active bookmark"))
162 return bname
161 return bname
163
162
164 def _readactive(repo, marks):
163 def _readactive(repo, marks):
165 """
164 """
166 Get the active bookmark. We can have an active bookmark that updates
165 Get the active bookmark. We can have an active bookmark that updates
167 itself as we commit. This function returns the name of that bookmark.
166 itself as we commit. This function returns the name of that bookmark.
168 It is stored in .hg/bookmarks.current
167 It is stored in .hg/bookmarks.current
169 """
168 """
170 mark = None
169 mark = None
171 try:
170 try:
172 file = repo.vfs('bookmarks.current')
171 file = repo.vfs('bookmarks.current')
173 except IOError as inst:
172 except IOError as inst:
174 if inst.errno != errno.ENOENT:
173 if inst.errno != errno.ENOENT:
175 raise
174 raise
176 return None
175 return None
177 try:
176 try:
178 # No readline() in osutil.posixfile, reading everything is
177 # No readline() in osutil.posixfile, reading everything is
179 # cheap.
178 # cheap.
180 # Note that it's possible for readlines() here to raise
179 # Note that it's possible for readlines() here to raise
181 # IOError, since we might be reading the active mark over
180 # IOError, since we might be reading the active mark over
182 # static-http which only tries to load the file when we try
181 # static-http which only tries to load the file when we try
183 # to read from it.
182 # to read from it.
184 mark = encoding.tolocal((file.readlines() or [''])[0])
183 mark = encoding.tolocal((file.readlines() or [''])[0])
185 if mark == '' or mark not in marks:
184 if mark == '' or mark not in marks:
186 mark = None
185 mark = None
187 except IOError as inst:
186 except IOError as inst:
188 if inst.errno != errno.ENOENT:
187 if inst.errno != errno.ENOENT:
189 raise
188 raise
190 return None
189 return None
191 finally:
190 finally:
192 file.close()
191 file.close()
193 return mark
192 return mark
194
193
195 def activate(repo, mark):
194 def activate(repo, mark):
196 """
195 """
197 Set the given bookmark to be 'active', meaning that this bookmark will
196 Set the given bookmark to be 'active', meaning that this bookmark will
198 follow new commits that are made.
197 follow new commits that are made.
199 The name is recorded in .hg/bookmarks.current
198 The name is recorded in .hg/bookmarks.current
200 """
199 """
201 repo._bookmarks.active = mark
200 repo._bookmarks.active = mark
202 repo._bookmarks._writeactive()
201 repo._bookmarks._writeactive()
203
202
204 def deactivate(repo):
203 def deactivate(repo):
205 """
204 """
206 Unset the active bookmark in this repository.
205 Unset the active bookmark in this repository.
207 """
206 """
208 repo._bookmarks.active = None
207 repo._bookmarks.active = None
209 repo._bookmarks._writeactive()
208 repo._bookmarks._writeactive()
210
209
211 def isactivewdirparent(repo):
210 def isactivewdirparent(repo):
212 """
211 """
213 Tell whether the 'active' bookmark (the one that follows new commits)
212 Tell whether the 'active' bookmark (the one that follows new commits)
214 points to one of the parents of the current working directory (wdir).
213 points to one of the parents of the current working directory (wdir).
215
214
216 While this is normally the case, it can on occasion be false; for example,
215 While this is normally the case, it can on occasion be false; for example,
217 immediately after a pull, the active bookmark can be moved to point
216 immediately after a pull, the active bookmark can be moved to point
218 to a place different than the wdir. This is solved by running `hg update`.
217 to a place different than the wdir. This is solved by running `hg update`.
219 """
218 """
220 mark = repo._activebookmark
219 mark = repo._activebookmark
221 marks = repo._bookmarks
220 marks = repo._bookmarks
222 parents = [p.node() for p in repo[None].parents()]
221 parents = [p.node() for p in repo[None].parents()]
223 return (mark in marks and marks[mark] in parents)
222 return (mark in marks and marks[mark] in parents)
224
223
225 def deletedivergent(repo, deletefrom, bm):
224 def deletedivergent(repo, deletefrom, bm):
226 '''Delete divergent versions of bm on nodes in deletefrom.
225 '''Delete divergent versions of bm on nodes in deletefrom.
227
226
228 Return True if at least one bookmark was deleted, False otherwise.'''
227 Return True if at least one bookmark was deleted, False otherwise.'''
229 deleted = False
228 deleted = False
230 marks = repo._bookmarks
229 marks = repo._bookmarks
231 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
230 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
232 for mark in divergent:
231 for mark in divergent:
233 if mark == '@' or '@' not in mark:
232 if mark == '@' or '@' not in mark:
234 # can't be divergent by definition
233 # can't be divergent by definition
235 continue
234 continue
236 if mark and marks[mark] in deletefrom:
235 if mark and marks[mark] in deletefrom:
237 if mark != bm:
236 if mark != bm:
238 del marks[mark]
237 del marks[mark]
239 deleted = True
238 deleted = True
240 return deleted
239 return deleted
241
240
242 def calculateupdate(ui, repo, checkout):
241 def calculateupdate(ui, repo, checkout):
243 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
242 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
244 check out and where to move the active bookmark from, if needed.'''
243 check out and where to move the active bookmark from, if needed.'''
245 movemarkfrom = None
244 movemarkfrom = None
246 if checkout is None:
245 if checkout is None:
247 activemark = repo._activebookmark
246 activemark = repo._activebookmark
248 if isactivewdirparent(repo):
247 if isactivewdirparent(repo):
249 movemarkfrom = repo['.'].node()
248 movemarkfrom = repo['.'].node()
250 elif activemark:
249 elif activemark:
251 ui.status(_("updating to active bookmark %s\n") % activemark)
250 ui.status(_("updating to active bookmark %s\n") % activemark)
252 checkout = activemark
251 checkout = activemark
253 return (checkout, movemarkfrom)
252 return (checkout, movemarkfrom)
254
253
255 def update(repo, parents, node):
254 def update(repo, parents, node):
256 deletefrom = parents
255 deletefrom = parents
257 marks = repo._bookmarks
256 marks = repo._bookmarks
258 update = False
257 update = False
259 active = marks.active
258 active = marks.active
260 if not active:
259 if not active:
261 return False
260 return False
262
261
263 if marks[active] in parents:
262 if marks[active] in parents:
264 new = repo[node]
263 new = repo[node]
265 divs = [repo[b] for b in marks
264 divs = [repo[b] for b in marks
266 if b.split('@', 1)[0] == active.split('@', 1)[0]]
265 if b.split('@', 1)[0] == active.split('@', 1)[0]]
267 anc = repo.changelog.ancestors([new.rev()])
266 anc = repo.changelog.ancestors([new.rev()])
268 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
267 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
269 if validdest(repo, repo[marks[active]], new):
268 if validdest(repo, repo[marks[active]], new):
270 marks[active] = new.node()
269 marks[active] = new.node()
271 update = True
270 update = True
272
271
273 if deletedivergent(repo, deletefrom, active):
272 if deletedivergent(repo, deletefrom, active):
274 update = True
273 update = True
275
274
276 if update:
275 if update:
277 lock = tr = None
276 lock = tr = None
278 try:
277 try:
279 lock = repo.lock()
278 lock = repo.lock()
280 tr = repo.transaction('bookmark')
279 tr = repo.transaction('bookmark')
281 marks.recordchange(tr)
280 marks.recordchange(tr)
282 tr.close()
281 tr.close()
283 finally:
282 finally:
284 lockmod.release(tr, lock)
283 lockmod.release(tr, lock)
285 return update
284 return update
286
285
287 def listbinbookmarks(repo):
286 def listbinbookmarks(repo):
288 # We may try to list bookmarks on a repo type that does not
287 # We may try to list bookmarks on a repo type that does not
289 # support it (e.g., statichttprepository).
288 # support it (e.g., statichttprepository).
290 marks = getattr(repo, '_bookmarks', {})
289 marks = getattr(repo, '_bookmarks', {})
291
290
292 hasnode = repo.changelog.hasnode
291 hasnode = repo.changelog.hasnode
293 for k, v in marks.iteritems():
292 for k, v in marks.iteritems():
294 # don't expose local divergent bookmarks
293 # don't expose local divergent bookmarks
295 if hasnode(v) and ('@' not in k or k.endswith('@')):
294 if hasnode(v) and ('@' not in k or k.endswith('@')):
296 yield k, v
295 yield k, v
297
296
298 def listbookmarks(repo):
297 def listbookmarks(repo):
299 d = {}
298 d = {}
300 for book, node in listbinbookmarks(repo):
299 for book, node in listbinbookmarks(repo):
301 d[book] = hex(node)
300 d[book] = hex(node)
302 return d
301 return d
303
302
304 def pushbookmark(repo, key, old, new):
303 def pushbookmark(repo, key, old, new):
305 w = l = tr = None
304 w = l = tr = None
306 try:
305 try:
307 w = repo.wlock()
306 w = repo.wlock()
308 l = repo.lock()
307 l = repo.lock()
309 tr = repo.transaction('bookmarks')
308 tr = repo.transaction('bookmarks')
310 marks = repo._bookmarks
309 marks = repo._bookmarks
311 existing = hex(marks.get(key, ''))
310 existing = hex(marks.get(key, ''))
312 if existing != old and existing != new:
311 if existing != old and existing != new:
313 return False
312 return False
314 if new == '':
313 if new == '':
315 del marks[key]
314 del marks[key]
316 else:
315 else:
317 if new not in repo:
316 if new not in repo:
318 return False
317 return False
319 marks[key] = repo[new].node()
318 marks[key] = repo[new].node()
320 marks.recordchange(tr)
319 marks.recordchange(tr)
321 tr.close()
320 tr.close()
322 return True
321 return True
323 finally:
322 finally:
324 lockmod.release(tr, l, w)
323 lockmod.release(tr, l, w)
325
324
326 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
325 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
327 '''Compare bookmarks between srcmarks and dstmarks
326 '''Compare bookmarks between srcmarks and dstmarks
328
327
329 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
328 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
330 differ, invalid)", each are list of bookmarks below:
329 differ, invalid)", each are list of bookmarks below:
331
330
332 :addsrc: added on src side (removed on dst side, perhaps)
331 :addsrc: added on src side (removed on dst side, perhaps)
333 :adddst: added on dst side (removed on src side, perhaps)
332 :adddst: added on dst side (removed on src side, perhaps)
334 :advsrc: advanced on src side
333 :advsrc: advanced on src side
335 :advdst: advanced on dst side
334 :advdst: advanced on dst side
336 :diverge: diverge
335 :diverge: diverge
337 :differ: changed, but changeset referred on src is unknown on dst
336 :differ: changed, but changeset referred on src is unknown on dst
338 :invalid: unknown on both side
337 :invalid: unknown on both side
339 :same: same on both side
338 :same: same on both side
340
339
341 Each elements of lists in result tuple is tuple "(bookmark name,
340 Each elements of lists in result tuple is tuple "(bookmark name,
342 changeset ID on source side, changeset ID on destination
341 changeset ID on source side, changeset ID on destination
343 side)". Each changeset IDs are 40 hexadecimal digit string or
342 side)". Each changeset IDs are 40 hexadecimal digit string or
344 None.
343 None.
345
344
346 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
345 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
347 "invalid" list may be unknown for repo.
346 "invalid" list may be unknown for repo.
348
347
349 If "targets" is specified, only bookmarks listed in it are
348 If "targets" is specified, only bookmarks listed in it are
350 examined.
349 examined.
351 '''
350 '''
352
351
353 if targets:
352 if targets:
354 bset = set(targets)
353 bset = set(targets)
355 else:
354 else:
356 srcmarkset = set(srcmarks)
355 srcmarkset = set(srcmarks)
357 dstmarkset = set(dstmarks)
356 dstmarkset = set(dstmarks)
358 bset = srcmarkset | dstmarkset
357 bset = srcmarkset | dstmarkset
359
358
360 results = ([], [], [], [], [], [], [], [])
359 results = ([], [], [], [], [], [], [], [])
361 addsrc = results[0].append
360 addsrc = results[0].append
362 adddst = results[1].append
361 adddst = results[1].append
363 advsrc = results[2].append
362 advsrc = results[2].append
364 advdst = results[3].append
363 advdst = results[3].append
365 diverge = results[4].append
364 diverge = results[4].append
366 differ = results[5].append
365 differ = results[5].append
367 invalid = results[6].append
366 invalid = results[6].append
368 same = results[7].append
367 same = results[7].append
369
368
370 for b in sorted(bset):
369 for b in sorted(bset):
371 if b not in srcmarks:
370 if b not in srcmarks:
372 if b in dstmarks:
371 if b in dstmarks:
373 adddst((b, None, dstmarks[b]))
372 adddst((b, None, dstmarks[b]))
374 else:
373 else:
375 invalid((b, None, None))
374 invalid((b, None, None))
376 elif b not in dstmarks:
375 elif b not in dstmarks:
377 addsrc((b, srcmarks[b], None))
376 addsrc((b, srcmarks[b], None))
378 else:
377 else:
379 scid = srcmarks[b]
378 scid = srcmarks[b]
380 dcid = dstmarks[b]
379 dcid = dstmarks[b]
381 if scid == dcid:
380 if scid == dcid:
382 same((b, scid, dcid))
381 same((b, scid, dcid))
383 elif scid in repo and dcid in repo:
382 elif scid in repo and dcid in repo:
384 sctx = repo[scid]
383 sctx = repo[scid]
385 dctx = repo[dcid]
384 dctx = repo[dcid]
386 if sctx.rev() < dctx.rev():
385 if sctx.rev() < dctx.rev():
387 if validdest(repo, sctx, dctx):
386 if validdest(repo, sctx, dctx):
388 advdst((b, scid, dcid))
387 advdst((b, scid, dcid))
389 else:
388 else:
390 diverge((b, scid, dcid))
389 diverge((b, scid, dcid))
391 else:
390 else:
392 if validdest(repo, dctx, sctx):
391 if validdest(repo, dctx, sctx):
393 advsrc((b, scid, dcid))
392 advsrc((b, scid, dcid))
394 else:
393 else:
395 diverge((b, scid, dcid))
394 diverge((b, scid, dcid))
396 else:
395 else:
397 # it is too expensive to examine in detail, in this case
396 # it is too expensive to examine in detail, in this case
398 differ((b, scid, dcid))
397 differ((b, scid, dcid))
399
398
400 return results
399 return results
401
400
402 def _diverge(ui, b, path, localmarks, remotenode):
401 def _diverge(ui, b, path, localmarks, remotenode):
403 '''Return appropriate diverged bookmark for specified ``path``
402 '''Return appropriate diverged bookmark for specified ``path``
404
403
405 This returns None, if it is failed to assign any divergent
404 This returns None, if it is failed to assign any divergent
406 bookmark name.
405 bookmark name.
407
406
408 This reuses already existing one with "@number" suffix, if it
407 This reuses already existing one with "@number" suffix, if it
409 refers ``remotenode``.
408 refers ``remotenode``.
410 '''
409 '''
411 if b == '@':
410 if b == '@':
412 b = ''
411 b = ''
413 # try to use an @pathalias suffix
412 # try to use an @pathalias suffix
414 # if an @pathalias already exists, we overwrite (update) it
413 # if an @pathalias already exists, we overwrite (update) it
415 if path.startswith("file:"):
414 if path.startswith("file:"):
416 path = util.url(path).path
415 path = util.url(path).path
417 for p, u in ui.configitems("paths"):
416 for p, u in ui.configitems("paths"):
418 if u.startswith("file:"):
417 if u.startswith("file:"):
419 u = util.url(u).path
418 u = util.url(u).path
420 if path == u:
419 if path == u:
421 return '%s@%s' % (b, p)
420 return '%s@%s' % (b, p)
422
421
423 # assign a unique "@number" suffix newly
422 # assign a unique "@number" suffix newly
424 for x in range(1, 100):
423 for x in range(1, 100):
425 n = '%s@%d' % (b, x)
424 n = '%s@%d' % (b, x)
426 if n not in localmarks or localmarks[n] == remotenode:
425 if n not in localmarks or localmarks[n] == remotenode:
427 return n
426 return n
428
427
429 return None
428 return None
430
429
431 def unhexlifybookmarks(marks):
430 def unhexlifybookmarks(marks):
432 binremotemarks = {}
431 binremotemarks = {}
433 for name, node in marks.items():
432 for name, node in marks.items():
434 binremotemarks[name] = bin(node)
433 binremotemarks[name] = bin(node)
435 return binremotemarks
434 return binremotemarks
436
435
437 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
436 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
438 ui.debug("checking for updated bookmarks\n")
437 ui.debug("checking for updated bookmarks\n")
439 localmarks = repo._bookmarks
438 localmarks = repo._bookmarks
440 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
439 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
441 ) = comparebookmarks(repo, remotemarks, localmarks)
440 ) = comparebookmarks(repo, remotemarks, localmarks)
442
441
443 status = ui.status
442 status = ui.status
444 warn = ui.warn
443 warn = ui.warn
445 if ui.configbool('ui', 'quietbookmarkmove', False):
444 if ui.configbool('ui', 'quietbookmarkmove', False):
446 status = warn = ui.debug
445 status = warn = ui.debug
447
446
448 explicit = set(explicit)
447 explicit = set(explicit)
449 changed = []
448 changed = []
450 for b, scid, dcid in addsrc:
449 for b, scid, dcid in addsrc:
451 if scid in repo: # add remote bookmarks for changes we already have
450 if scid in repo: # add remote bookmarks for changes we already have
452 changed.append((b, scid, status,
451 changed.append((b, scid, status,
453 _("adding remote bookmark %s\n") % (b)))
452 _("adding remote bookmark %s\n") % (b)))
454 elif b in explicit:
453 elif b in explicit:
455 explicit.remove(b)
454 explicit.remove(b)
456 ui.warn(_("remote bookmark %s points to locally missing %s\n")
455 ui.warn(_("remote bookmark %s points to locally missing %s\n")
457 % (b, hex(scid)[:12]))
456 % (b, hex(scid)[:12]))
458
457
459 for b, scid, dcid in advsrc:
458 for b, scid, dcid in advsrc:
460 changed.append((b, scid, status,
459 changed.append((b, scid, status,
461 _("updating bookmark %s\n") % (b)))
460 _("updating bookmark %s\n") % (b)))
462 # remove normal movement from explicit set
461 # remove normal movement from explicit set
463 explicit.difference_update(d[0] for d in changed)
462 explicit.difference_update(d[0] for d in changed)
464
463
465 for b, scid, dcid in diverge:
464 for b, scid, dcid in diverge:
466 if b in explicit:
465 if b in explicit:
467 explicit.discard(b)
466 explicit.discard(b)
468 changed.append((b, scid, status,
467 changed.append((b, scid, status,
469 _("importing bookmark %s\n") % (b)))
468 _("importing bookmark %s\n") % (b)))
470 else:
469 else:
471 db = _diverge(ui, b, path, localmarks, scid)
470 db = _diverge(ui, b, path, localmarks, scid)
472 if db:
471 if db:
473 changed.append((db, scid, warn,
472 changed.append((db, scid, warn,
474 _("divergent bookmark %s stored as %s\n") %
473 _("divergent bookmark %s stored as %s\n") %
475 (b, db)))
474 (b, db)))
476 else:
475 else:
477 warn(_("warning: failed to assign numbered name "
476 warn(_("warning: failed to assign numbered name "
478 "to divergent bookmark %s\n") % (b))
477 "to divergent bookmark %s\n") % (b))
479 for b, scid, dcid in adddst + advdst:
478 for b, scid, dcid in adddst + advdst:
480 if b in explicit:
479 if b in explicit:
481 explicit.discard(b)
480 explicit.discard(b)
482 changed.append((b, scid, status,
481 changed.append((b, scid, status,
483 _("importing bookmark %s\n") % (b)))
482 _("importing bookmark %s\n") % (b)))
484 for b, scid, dcid in differ:
483 for b, scid, dcid in differ:
485 if b in explicit:
484 if b in explicit:
486 explicit.remove(b)
485 explicit.remove(b)
487 ui.warn(_("remote bookmark %s points to locally missing %s\n")
486 ui.warn(_("remote bookmark %s points to locally missing %s\n")
488 % (b, hex(scid)[:12]))
487 % (b, hex(scid)[:12]))
489
488
490 if changed:
489 if changed:
491 tr = trfunc()
490 tr = trfunc()
492 for b, node, writer, msg in sorted(changed):
491 for b, node, writer, msg in sorted(changed):
493 localmarks[b] = node
492 localmarks[b] = node
494 writer(msg)
493 writer(msg)
495 localmarks.recordchange(tr)
494 localmarks.recordchange(tr)
496
495
497 def incoming(ui, repo, other):
496 def incoming(ui, repo, other):
498 '''Show bookmarks incoming from other to repo
497 '''Show bookmarks incoming from other to repo
499 '''
498 '''
500 ui.status(_("searching for changed bookmarks\n"))
499 ui.status(_("searching for changed bookmarks\n"))
501
500
502 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
501 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
503 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
502 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
504 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
503 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
505
504
506 incomings = []
505 incomings = []
507 if ui.debugflag:
506 if ui.debugflag:
508 getid = lambda id: id
507 getid = lambda id: id
509 else:
508 else:
510 getid = lambda id: id[:12]
509 getid = lambda id: id[:12]
511 if ui.verbose:
510 if ui.verbose:
512 def add(b, id, st):
511 def add(b, id, st):
513 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
512 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
514 else:
513 else:
515 def add(b, id, st):
514 def add(b, id, st):
516 incomings.append(" %-25s %s\n" % (b, getid(id)))
515 incomings.append(" %-25s %s\n" % (b, getid(id)))
517 for b, scid, dcid in addsrc:
516 for b, scid, dcid in addsrc:
518 # i18n: "added" refers to a bookmark
517 # i18n: "added" refers to a bookmark
519 add(b, hex(scid), _('added'))
518 add(b, hex(scid), _('added'))
520 for b, scid, dcid in advsrc:
519 for b, scid, dcid in advsrc:
521 # i18n: "advanced" refers to a bookmark
520 # i18n: "advanced" refers to a bookmark
522 add(b, hex(scid), _('advanced'))
521 add(b, hex(scid), _('advanced'))
523 for b, scid, dcid in diverge:
522 for b, scid, dcid in diverge:
524 # i18n: "diverged" refers to a bookmark
523 # i18n: "diverged" refers to a bookmark
525 add(b, hex(scid), _('diverged'))
524 add(b, hex(scid), _('diverged'))
526 for b, scid, dcid in differ:
525 for b, scid, dcid in differ:
527 # i18n: "changed" refers to a bookmark
526 # i18n: "changed" refers to a bookmark
528 add(b, hex(scid), _('changed'))
527 add(b, hex(scid), _('changed'))
529
528
530 if not incomings:
529 if not incomings:
531 ui.status(_("no changed bookmarks found\n"))
530 ui.status(_("no changed bookmarks found\n"))
532 return 1
531 return 1
533
532
534 for s in sorted(incomings):
533 for s in sorted(incomings):
535 ui.write(s)
534 ui.write(s)
536
535
537 return 0
536 return 0
538
537
539 def outgoing(ui, repo, other):
538 def outgoing(ui, repo, other):
540 '''Show bookmarks outgoing from repo to other
539 '''Show bookmarks outgoing from repo to other
541 '''
540 '''
542 ui.status(_("searching for changed bookmarks\n"))
541 ui.status(_("searching for changed bookmarks\n"))
543
542
544 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
543 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
545 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
544 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
546 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
545 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
547
546
548 outgoings = []
547 outgoings = []
549 if ui.debugflag:
548 if ui.debugflag:
550 getid = lambda id: id
549 getid = lambda id: id
551 else:
550 else:
552 getid = lambda id: id[:12]
551 getid = lambda id: id[:12]
553 if ui.verbose:
552 if ui.verbose:
554 def add(b, id, st):
553 def add(b, id, st):
555 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
554 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
556 else:
555 else:
557 def add(b, id, st):
556 def add(b, id, st):
558 outgoings.append(" %-25s %s\n" % (b, getid(id)))
557 outgoings.append(" %-25s %s\n" % (b, getid(id)))
559 for b, scid, dcid in addsrc:
558 for b, scid, dcid in addsrc:
560 # i18n: "added refers to a bookmark
559 # i18n: "added refers to a bookmark
561 add(b, hex(scid), _('added'))
560 add(b, hex(scid), _('added'))
562 for b, scid, dcid in adddst:
561 for b, scid, dcid in adddst:
563 # i18n: "deleted" refers to a bookmark
562 # i18n: "deleted" refers to a bookmark
564 add(b, ' ' * 40, _('deleted'))
563 add(b, ' ' * 40, _('deleted'))
565 for b, scid, dcid in advsrc:
564 for b, scid, dcid in advsrc:
566 # i18n: "advanced" refers to a bookmark
565 # i18n: "advanced" refers to a bookmark
567 add(b, hex(scid), _('advanced'))
566 add(b, hex(scid), _('advanced'))
568 for b, scid, dcid in diverge:
567 for b, scid, dcid in diverge:
569 # i18n: "diverged" refers to a bookmark
568 # i18n: "diverged" refers to a bookmark
570 add(b, hex(scid), _('diverged'))
569 add(b, hex(scid), _('diverged'))
571 for b, scid, dcid in differ:
570 for b, scid, dcid in differ:
572 # i18n: "changed" refers to a bookmark
571 # i18n: "changed" refers to a bookmark
573 add(b, hex(scid), _('changed'))
572 add(b, hex(scid), _('changed'))
574
573
575 if not outgoings:
574 if not outgoings:
576 ui.status(_("no changed bookmarks found\n"))
575 ui.status(_("no changed bookmarks found\n"))
577 return 1
576 return 1
578
577
579 for s in sorted(outgoings):
578 for s in sorted(outgoings):
580 ui.write(s)
579 ui.write(s)
581
580
582 return 0
581 return 0
583
582
584 def summary(repo, other):
583 def summary(repo, other):
585 '''Compare bookmarks between repo and other for "hg summary" output
584 '''Compare bookmarks between repo and other for "hg summary" output
586
585
587 This returns "(# of incoming, # of outgoing)" tuple.
586 This returns "(# of incoming, # of outgoing)" tuple.
588 '''
587 '''
589 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
588 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
590 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
589 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
591 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
590 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
592 return (len(addsrc), len(adddst))
591 return (len(addsrc), len(adddst))
593
592
594 def validdest(repo, old, new):
593 def validdest(repo, old, new):
595 """Is the new bookmark destination a valid update from the old one"""
594 """Is the new bookmark destination a valid update from the old one"""
596 repo = repo.unfiltered()
595 repo = repo.unfiltered()
597 if old == new:
596 if old == new:
598 # Old == new -> nothing to update.
597 # Old == new -> nothing to update.
599 return False
598 return False
600 elif not old:
599 elif not old:
601 # old is nullrev, anything is valid.
600 # old is nullrev, anything is valid.
602 # (new != nullrev has been excluded by the previous check)
601 # (new != nullrev has been excluded by the previous check)
603 return True
602 return True
604 elif repo.obsstore:
603 elif repo.obsstore:
605 return new.node() in obsolete.foreground(repo, [old.node()])
604 return new.node() in obsolete.foreground(repo, [old.node()])
606 else:
605 else:
607 # still an independent clause as it is lazier (and therefore faster)
606 # still an independent clause as it is lazier (and therefore faster)
608 return old.descendant(new)
607 return old.descendant(new)
@@ -1,1260 +1,1260
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 )
27 )
28
28
29 propertycache = util.propertycache
29 propertycache = util.propertycache
30 filecache = scmutil.filecache
30 filecache = scmutil.filecache
31 _rangemask = 0x7fffffff
31 _rangemask = 0x7fffffff
32
32
33 dirstatetuple = parsers.dirstatetuple
33 dirstatetuple = parsers.dirstatetuple
34
34
35 class repocache(filecache):
35 class repocache(filecache):
36 """filecache for files in .hg/"""
36 """filecache for files in .hg/"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj._opener.join(fname)
38 return obj._opener.join(fname)
39
39
40 class rootcache(filecache):
40 class rootcache(filecache):
41 """filecache for files in the repository root"""
41 """filecache for files in the repository root"""
42 def join(self, obj, fname):
42 def join(self, obj, fname):
43 return obj._join(fname)
43 return obj._join(fname)
44
44
45 def _getfsnow(vfs):
45 def _getfsnow(vfs):
46 '''Get "now" timestamp on filesystem'''
46 '''Get "now" timestamp on filesystem'''
47 tmpfd, tmpname = vfs.mkstemp()
47 tmpfd, tmpname = vfs.mkstemp()
48 try:
48 try:
49 return os.fstat(tmpfd).st_mtime
49 return os.fstat(tmpfd).st_mtime
50 finally:
50 finally:
51 os.close(tmpfd)
51 os.close(tmpfd)
52 vfs.unlink(tmpname)
52 vfs.unlink(tmpname)
53
53
54 def nonnormalentries(dmap):
54 def nonnormalentries(dmap):
55 '''Compute the nonnormal dirstate entries from the dmap'''
55 '''Compute the nonnormal dirstate entries from the dmap'''
56 try:
56 try:
57 return parsers.nonnormalentries(dmap)
57 return parsers.nonnormalentries(dmap)
58 except AttributeError:
58 except AttributeError:
59 return set(fname for fname, e in dmap.iteritems()
59 return set(fname for fname, e in dmap.iteritems()
60 if e[0] != 'n' or e[3] == -1)
60 if e[0] != 'n' or e[3] == -1)
61
61
62 def _trypending(root, vfs, filename):
62 def _trypending(root, vfs, filename):
63 '''Open file to be read according to HG_PENDING environment variable
63 '''Open file to be read according to HG_PENDING environment variable
64
64
65 This opens '.pending' of specified 'filename' only when HG_PENDING
65 This opens '.pending' of specified 'filename' only when HG_PENDING
66 is equal to 'root'.
66 is equal to 'root'.
67
67
68 This returns '(fp, is_pending_opened)' tuple.
68 This returns '(fp, is_pending_opened)' tuple.
69 '''
69 '''
70 if root == os.environ.get('HG_PENDING'):
70 if root == encoding.environ.get('HG_PENDING'):
71 try:
71 try:
72 return (vfs('%s.pending' % filename), True)
72 return (vfs('%s.pending' % filename), True)
73 except IOError as inst:
73 except IOError as inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 return (vfs(filename), False)
76 return (vfs(filename), False)
77
77
78 class dirstate(object):
78 class dirstate(object):
79
79
80 def __init__(self, opener, ui, root, validate):
80 def __init__(self, opener, ui, root, validate):
81 '''Create a new dirstate object.
81 '''Create a new dirstate object.
82
82
83 opener is an open()-like callable that can be used to open the
83 opener is an open()-like callable that can be used to open the
84 dirstate file; root is the root of the directory tracked by
84 dirstate file; root is the root of the directory tracked by
85 the dirstate.
85 the dirstate.
86 '''
86 '''
87 self._opener = opener
87 self._opener = opener
88 self._validate = validate
88 self._validate = validate
89 self._root = root
89 self._root = root
90 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
91 # UNC path pointing to root share (issue4557)
91 # UNC path pointing to root share (issue4557)
92 self._rootdir = pathutil.normasprefix(root)
92 self._rootdir = pathutil.normasprefix(root)
93 # internal config: ui.forcecwd
93 # internal config: ui.forcecwd
94 forcecwd = ui.config('ui', 'forcecwd')
94 forcecwd = ui.config('ui', 'forcecwd')
95 if forcecwd:
95 if forcecwd:
96 self._cwd = forcecwd
96 self._cwd = forcecwd
97 self._dirty = False
97 self._dirty = False
98 self._dirtypl = False
98 self._dirtypl = False
99 self._lastnormaltime = 0
99 self._lastnormaltime = 0
100 self._ui = ui
100 self._ui = ui
101 self._filecache = {}
101 self._filecache = {}
102 self._parentwriters = 0
102 self._parentwriters = 0
103 self._filename = 'dirstate'
103 self._filename = 'dirstate'
104 self._pendingfilename = '%s.pending' % self._filename
104 self._pendingfilename = '%s.pending' % self._filename
105 self._plchangecallbacks = {}
105 self._plchangecallbacks = {}
106 self._origpl = None
106 self._origpl = None
107
107
108 # for consistent view between _pl() and _read() invocations
108 # for consistent view between _pl() and _read() invocations
109 self._pendingmode = None
109 self._pendingmode = None
110
110
111 def beginparentchange(self):
111 def beginparentchange(self):
112 '''Marks the beginning of a set of changes that involve changing
112 '''Marks the beginning of a set of changes that involve changing
113 the dirstate parents. If there is an exception during this time,
113 the dirstate parents. If there is an exception during this time,
114 the dirstate will not be written when the wlock is released. This
114 the dirstate will not be written when the wlock is released. This
115 prevents writing an incoherent dirstate where the parent doesn't
115 prevents writing an incoherent dirstate where the parent doesn't
116 match the contents.
116 match the contents.
117 '''
117 '''
118 self._parentwriters += 1
118 self._parentwriters += 1
119
119
120 def endparentchange(self):
120 def endparentchange(self):
121 '''Marks the end of a set of changes that involve changing the
121 '''Marks the end of a set of changes that involve changing the
122 dirstate parents. Once all parent changes have been marked done,
122 dirstate parents. Once all parent changes have been marked done,
123 the wlock will be free to write the dirstate on release.
123 the wlock will be free to write the dirstate on release.
124 '''
124 '''
125 if self._parentwriters > 0:
125 if self._parentwriters > 0:
126 self._parentwriters -= 1
126 self._parentwriters -= 1
127
127
128 def pendingparentchange(self):
128 def pendingparentchange(self):
129 '''Returns true if the dirstate is in the middle of a set of changes
129 '''Returns true if the dirstate is in the middle of a set of changes
130 that modify the dirstate parent.
130 that modify the dirstate parent.
131 '''
131 '''
132 return self._parentwriters > 0
132 return self._parentwriters > 0
133
133
134 @propertycache
134 @propertycache
135 def _map(self):
135 def _map(self):
136 '''Return the dirstate contents as a map from filename to
136 '''Return the dirstate contents as a map from filename to
137 (state, mode, size, time).'''
137 (state, mode, size, time).'''
138 self._read()
138 self._read()
139 return self._map
139 return self._map
140
140
141 @propertycache
141 @propertycache
142 def _copymap(self):
142 def _copymap(self):
143 self._read()
143 self._read()
144 return self._copymap
144 return self._copymap
145
145
146 @propertycache
146 @propertycache
147 def _nonnormalset(self):
147 def _nonnormalset(self):
148 return nonnormalentries(self._map)
148 return nonnormalentries(self._map)
149
149
150 @propertycache
150 @propertycache
151 def _filefoldmap(self):
151 def _filefoldmap(self):
152 try:
152 try:
153 makefilefoldmap = parsers.make_file_foldmap
153 makefilefoldmap = parsers.make_file_foldmap
154 except AttributeError:
154 except AttributeError:
155 pass
155 pass
156 else:
156 else:
157 return makefilefoldmap(self._map, util.normcasespec,
157 return makefilefoldmap(self._map, util.normcasespec,
158 util.normcasefallback)
158 util.normcasefallback)
159
159
160 f = {}
160 f = {}
161 normcase = util.normcase
161 normcase = util.normcase
162 for name, s in self._map.iteritems():
162 for name, s in self._map.iteritems():
163 if s[0] != 'r':
163 if s[0] != 'r':
164 f[normcase(name)] = name
164 f[normcase(name)] = name
165 f['.'] = '.' # prevents useless util.fspath() invocation
165 f['.'] = '.' # prevents useless util.fspath() invocation
166 return f
166 return f
167
167
168 @propertycache
168 @propertycache
169 def _dirfoldmap(self):
169 def _dirfoldmap(self):
170 f = {}
170 f = {}
171 normcase = util.normcase
171 normcase = util.normcase
172 for name in self._dirs:
172 for name in self._dirs:
173 f[normcase(name)] = name
173 f[normcase(name)] = name
174 return f
174 return f
175
175
176 @repocache('branch')
176 @repocache('branch')
177 def _branch(self):
177 def _branch(self):
178 try:
178 try:
179 return self._opener.read("branch").strip() or "default"
179 return self._opener.read("branch").strip() or "default"
180 except IOError as inst:
180 except IOError as inst:
181 if inst.errno != errno.ENOENT:
181 if inst.errno != errno.ENOENT:
182 raise
182 raise
183 return "default"
183 return "default"
184
184
185 @propertycache
185 @propertycache
186 def _pl(self):
186 def _pl(self):
187 try:
187 try:
188 fp = self._opendirstatefile()
188 fp = self._opendirstatefile()
189 st = fp.read(40)
189 st = fp.read(40)
190 fp.close()
190 fp.close()
191 l = len(st)
191 l = len(st)
192 if l == 40:
192 if l == 40:
193 return st[:20], st[20:40]
193 return st[:20], st[20:40]
194 elif l > 0 and l < 40:
194 elif l > 0 and l < 40:
195 raise error.Abort(_('working directory state appears damaged!'))
195 raise error.Abort(_('working directory state appears damaged!'))
196 except IOError as err:
196 except IOError as err:
197 if err.errno != errno.ENOENT:
197 if err.errno != errno.ENOENT:
198 raise
198 raise
199 return [nullid, nullid]
199 return [nullid, nullid]
200
200
201 @propertycache
201 @propertycache
202 def _dirs(self):
202 def _dirs(self):
203 return util.dirs(self._map, 'r')
203 return util.dirs(self._map, 'r')
204
204
205 def dirs(self):
205 def dirs(self):
206 return self._dirs
206 return self._dirs
207
207
208 @rootcache('.hgignore')
208 @rootcache('.hgignore')
209 def _ignore(self):
209 def _ignore(self):
210 files = self._ignorefiles()
210 files = self._ignorefiles()
211 if not files:
211 if not files:
212 return util.never
212 return util.never
213
213
214 pats = ['include:%s' % f for f in files]
214 pats = ['include:%s' % f for f in files]
215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
216
216
217 @propertycache
217 @propertycache
218 def _slash(self):
218 def _slash(self):
219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
220
220
221 @propertycache
221 @propertycache
222 def _checklink(self):
222 def _checklink(self):
223 return util.checklink(self._root)
223 return util.checklink(self._root)
224
224
225 @propertycache
225 @propertycache
226 def _checkexec(self):
226 def _checkexec(self):
227 return util.checkexec(self._root)
227 return util.checkexec(self._root)
228
228
229 @propertycache
229 @propertycache
230 def _checkcase(self):
230 def _checkcase(self):
231 return not util.fscasesensitive(self._join('.hg'))
231 return not util.fscasesensitive(self._join('.hg'))
232
232
233 def _join(self, f):
233 def _join(self, f):
234 # much faster than os.path.join()
234 # much faster than os.path.join()
235 # it's safe because f is always a relative path
235 # it's safe because f is always a relative path
236 return self._rootdir + f
236 return self._rootdir + f
237
237
238 def flagfunc(self, buildfallback):
238 def flagfunc(self, buildfallback):
239 if self._checklink and self._checkexec:
239 if self._checklink and self._checkexec:
240 def f(x):
240 def f(x):
241 try:
241 try:
242 st = os.lstat(self._join(x))
242 st = os.lstat(self._join(x))
243 if util.statislink(st):
243 if util.statislink(st):
244 return 'l'
244 return 'l'
245 if util.statisexec(st):
245 if util.statisexec(st):
246 return 'x'
246 return 'x'
247 except OSError:
247 except OSError:
248 pass
248 pass
249 return ''
249 return ''
250 return f
250 return f
251
251
252 fallback = buildfallback()
252 fallback = buildfallback()
253 if self._checklink:
253 if self._checklink:
254 def f(x):
254 def f(x):
255 if os.path.islink(self._join(x)):
255 if os.path.islink(self._join(x)):
256 return 'l'
256 return 'l'
257 if 'x' in fallback(x):
257 if 'x' in fallback(x):
258 return 'x'
258 return 'x'
259 return ''
259 return ''
260 return f
260 return f
261 if self._checkexec:
261 if self._checkexec:
262 def f(x):
262 def f(x):
263 if 'l' in fallback(x):
263 if 'l' in fallback(x):
264 return 'l'
264 return 'l'
265 if util.isexec(self._join(x)):
265 if util.isexec(self._join(x)):
266 return 'x'
266 return 'x'
267 return ''
267 return ''
268 return f
268 return f
269 else:
269 else:
270 return fallback
270 return fallback
271
271
272 @propertycache
272 @propertycache
273 def _cwd(self):
273 def _cwd(self):
274 return pycompat.getcwd()
274 return pycompat.getcwd()
275
275
276 def getcwd(self):
276 def getcwd(self):
277 '''Return the path from which a canonical path is calculated.
277 '''Return the path from which a canonical path is calculated.
278
278
279 This path should be used to resolve file patterns or to convert
279 This path should be used to resolve file patterns or to convert
280 canonical paths back to file paths for display. It shouldn't be
280 canonical paths back to file paths for display. It shouldn't be
281 used to get real file paths. Use vfs functions instead.
281 used to get real file paths. Use vfs functions instead.
282 '''
282 '''
283 cwd = self._cwd
283 cwd = self._cwd
284 if cwd == self._root:
284 if cwd == self._root:
285 return ''
285 return ''
286 # self._root ends with a path separator if self._root is '/' or 'C:\'
286 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 rootsep = self._root
287 rootsep = self._root
288 if not util.endswithsep(rootsep):
288 if not util.endswithsep(rootsep):
289 rootsep += pycompat.ossep
289 rootsep += pycompat.ossep
290 if cwd.startswith(rootsep):
290 if cwd.startswith(rootsep):
291 return cwd[len(rootsep):]
291 return cwd[len(rootsep):]
292 else:
292 else:
293 # we're outside the repo. return an absolute path.
293 # we're outside the repo. return an absolute path.
294 return cwd
294 return cwd
295
295
296 def pathto(self, f, cwd=None):
296 def pathto(self, f, cwd=None):
297 if cwd is None:
297 if cwd is None:
298 cwd = self.getcwd()
298 cwd = self.getcwd()
299 path = util.pathto(self._root, cwd, f)
299 path = util.pathto(self._root, cwd, f)
300 if self._slash:
300 if self._slash:
301 return util.pconvert(path)
301 return util.pconvert(path)
302 return path
302 return path
303
303
304 def __getitem__(self, key):
304 def __getitem__(self, key):
305 '''Return the current state of key (a filename) in the dirstate.
305 '''Return the current state of key (a filename) in the dirstate.
306
306
307 States are:
307 States are:
308 n normal
308 n normal
309 m needs merging
309 m needs merging
310 r marked for removal
310 r marked for removal
311 a marked for addition
311 a marked for addition
312 ? not tracked
312 ? not tracked
313 '''
313 '''
314 return self._map.get(key, ("?",))[0]
314 return self._map.get(key, ("?",))[0]
315
315
316 def __contains__(self, key):
316 def __contains__(self, key):
317 return key in self._map
317 return key in self._map
318
318
319 def __iter__(self):
319 def __iter__(self):
320 for x in sorted(self._map):
320 for x in sorted(self._map):
321 yield x
321 yield x
322
322
323 def iteritems(self):
323 def iteritems(self):
324 return self._map.iteritems()
324 return self._map.iteritems()
325
325
326 def parents(self):
326 def parents(self):
327 return [self._validate(p) for p in self._pl]
327 return [self._validate(p) for p in self._pl]
328
328
329 def p1(self):
329 def p1(self):
330 return self._validate(self._pl[0])
330 return self._validate(self._pl[0])
331
331
332 def p2(self):
332 def p2(self):
333 return self._validate(self._pl[1])
333 return self._validate(self._pl[1])
334
334
335 def branch(self):
335 def branch(self):
336 return encoding.tolocal(self._branch)
336 return encoding.tolocal(self._branch)
337
337
338 def setparents(self, p1, p2=nullid):
338 def setparents(self, p1, p2=nullid):
339 """Set dirstate parents to p1 and p2.
339 """Set dirstate parents to p1 and p2.
340
340
341 When moving from two parents to one, 'm' merged entries a
341 When moving from two parents to one, 'm' merged entries a
342 adjusted to normal and previous copy records discarded and
342 adjusted to normal and previous copy records discarded and
343 returned by the call.
343 returned by the call.
344
344
345 See localrepo.setparents()
345 See localrepo.setparents()
346 """
346 """
347 if self._parentwriters == 0:
347 if self._parentwriters == 0:
348 raise ValueError("cannot set dirstate parent without "
348 raise ValueError("cannot set dirstate parent without "
349 "calling dirstate.beginparentchange")
349 "calling dirstate.beginparentchange")
350
350
351 self._dirty = self._dirtypl = True
351 self._dirty = self._dirtypl = True
352 oldp2 = self._pl[1]
352 oldp2 = self._pl[1]
353 if self._origpl is None:
353 if self._origpl is None:
354 self._origpl = self._pl
354 self._origpl = self._pl
355 self._pl = p1, p2
355 self._pl = p1, p2
356 copies = {}
356 copies = {}
357 if oldp2 != nullid and p2 == nullid:
357 if oldp2 != nullid and p2 == nullid:
358 for f, s in self._map.iteritems():
358 for f, s in self._map.iteritems():
359 # Discard 'm' markers when moving away from a merge state
359 # Discard 'm' markers when moving away from a merge state
360 if s[0] == 'm':
360 if s[0] == 'm':
361 if f in self._copymap:
361 if f in self._copymap:
362 copies[f] = self._copymap[f]
362 copies[f] = self._copymap[f]
363 self.normallookup(f)
363 self.normallookup(f)
364 # Also fix up otherparent markers
364 # Also fix up otherparent markers
365 elif s[0] == 'n' and s[2] == -2:
365 elif s[0] == 'n' and s[2] == -2:
366 if f in self._copymap:
366 if f in self._copymap:
367 copies[f] = self._copymap[f]
367 copies[f] = self._copymap[f]
368 self.add(f)
368 self.add(f)
369 return copies
369 return copies
370
370
371 def setbranch(self, branch):
371 def setbranch(self, branch):
372 self._branch = encoding.fromlocal(branch)
372 self._branch = encoding.fromlocal(branch)
373 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
373 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
374 try:
374 try:
375 f.write(self._branch + '\n')
375 f.write(self._branch + '\n')
376 f.close()
376 f.close()
377
377
378 # make sure filecache has the correct stat info for _branch after
378 # make sure filecache has the correct stat info for _branch after
379 # replacing the underlying file
379 # replacing the underlying file
380 ce = self._filecache['_branch']
380 ce = self._filecache['_branch']
381 if ce:
381 if ce:
382 ce.refresh()
382 ce.refresh()
383 except: # re-raises
383 except: # re-raises
384 f.discard()
384 f.discard()
385 raise
385 raise
386
386
387 def _opendirstatefile(self):
387 def _opendirstatefile(self):
388 fp, mode = _trypending(self._root, self._opener, self._filename)
388 fp, mode = _trypending(self._root, self._opener, self._filename)
389 if self._pendingmode is not None and self._pendingmode != mode:
389 if self._pendingmode is not None and self._pendingmode != mode:
390 fp.close()
390 fp.close()
391 raise error.Abort(_('working directory state may be '
391 raise error.Abort(_('working directory state may be '
392 'changed parallelly'))
392 'changed parallelly'))
393 self._pendingmode = mode
393 self._pendingmode = mode
394 return fp
394 return fp
395
395
396 def _read(self):
396 def _read(self):
397 self._map = {}
397 self._map = {}
398 self._copymap = {}
398 self._copymap = {}
399 try:
399 try:
400 fp = self._opendirstatefile()
400 fp = self._opendirstatefile()
401 try:
401 try:
402 st = fp.read()
402 st = fp.read()
403 finally:
403 finally:
404 fp.close()
404 fp.close()
405 except IOError as err:
405 except IOError as err:
406 if err.errno != errno.ENOENT:
406 if err.errno != errno.ENOENT:
407 raise
407 raise
408 return
408 return
409 if not st:
409 if not st:
410 return
410 return
411
411
412 if util.safehasattr(parsers, 'dict_new_presized'):
412 if util.safehasattr(parsers, 'dict_new_presized'):
413 # Make an estimate of the number of files in the dirstate based on
413 # Make an estimate of the number of files in the dirstate based on
414 # its size. From a linear regression on a set of real-world repos,
414 # its size. From a linear regression on a set of real-world repos,
415 # all over 10,000 files, the size of a dirstate entry is 85
415 # all over 10,000 files, the size of a dirstate entry is 85
416 # bytes. The cost of resizing is significantly higher than the cost
416 # bytes. The cost of resizing is significantly higher than the cost
417 # of filling in a larger presized dict, so subtract 20% from the
417 # of filling in a larger presized dict, so subtract 20% from the
418 # size.
418 # size.
419 #
419 #
420 # This heuristic is imperfect in many ways, so in a future dirstate
420 # This heuristic is imperfect in many ways, so in a future dirstate
421 # format update it makes sense to just record the number of entries
421 # format update it makes sense to just record the number of entries
422 # on write.
422 # on write.
423 self._map = parsers.dict_new_presized(len(st) / 71)
423 self._map = parsers.dict_new_presized(len(st) / 71)
424
424
425 # Python's garbage collector triggers a GC each time a certain number
425 # Python's garbage collector triggers a GC each time a certain number
426 # of container objects (the number being defined by
426 # of container objects (the number being defined by
427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
428 # for each file in the dirstate. The C version then immediately marks
428 # for each file in the dirstate. The C version then immediately marks
429 # them as not to be tracked by the collector. However, this has no
429 # them as not to be tracked by the collector. However, this has no
430 # effect on when GCs are triggered, only on what objects the GC looks
430 # effect on when GCs are triggered, only on what objects the GC looks
431 # into. This means that O(number of files) GCs are unavoidable.
431 # into. This means that O(number of files) GCs are unavoidable.
432 # Depending on when in the process's lifetime the dirstate is parsed,
432 # Depending on when in the process's lifetime the dirstate is parsed,
433 # this can get very expensive. As a workaround, disable GC while
433 # this can get very expensive. As a workaround, disable GC while
434 # parsing the dirstate.
434 # parsing the dirstate.
435 #
435 #
436 # (we cannot decorate the function directly since it is in a C module)
436 # (we cannot decorate the function directly since it is in a C module)
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 p = parse_dirstate(self._map, self._copymap, st)
438 p = parse_dirstate(self._map, self._copymap, st)
439 if not self._dirtypl:
439 if not self._dirtypl:
440 self._pl = p
440 self._pl = p
441
441
442 def invalidate(self):
442 def invalidate(self):
443 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
443 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
444 "_pl", "_dirs", "_ignore", "_nonnormalset"):
444 "_pl", "_dirs", "_ignore", "_nonnormalset"):
445 if a in self.__dict__:
445 if a in self.__dict__:
446 delattr(self, a)
446 delattr(self, a)
447 self._lastnormaltime = 0
447 self._lastnormaltime = 0
448 self._dirty = False
448 self._dirty = False
449 self._parentwriters = 0
449 self._parentwriters = 0
450 self._origpl = None
450 self._origpl = None
451
451
452 def copy(self, source, dest):
452 def copy(self, source, dest):
453 """Mark dest as a copy of source. Unmark dest if source is None."""
453 """Mark dest as a copy of source. Unmark dest if source is None."""
454 if source == dest:
454 if source == dest:
455 return
455 return
456 self._dirty = True
456 self._dirty = True
457 if source is not None:
457 if source is not None:
458 self._copymap[dest] = source
458 self._copymap[dest] = source
459 elif dest in self._copymap:
459 elif dest in self._copymap:
460 del self._copymap[dest]
460 del self._copymap[dest]
461
461
462 def copied(self, file):
462 def copied(self, file):
463 return self._copymap.get(file, None)
463 return self._copymap.get(file, None)
464
464
465 def copies(self):
465 def copies(self):
466 return self._copymap
466 return self._copymap
467
467
468 def _droppath(self, f):
468 def _droppath(self, f):
469 if self[f] not in "?r" and "_dirs" in self.__dict__:
469 if self[f] not in "?r" and "_dirs" in self.__dict__:
470 self._dirs.delpath(f)
470 self._dirs.delpath(f)
471
471
472 if "_filefoldmap" in self.__dict__:
472 if "_filefoldmap" in self.__dict__:
473 normed = util.normcase(f)
473 normed = util.normcase(f)
474 if normed in self._filefoldmap:
474 if normed in self._filefoldmap:
475 del self._filefoldmap[normed]
475 del self._filefoldmap[normed]
476
476
477 def _addpath(self, f, state, mode, size, mtime):
477 def _addpath(self, f, state, mode, size, mtime):
478 oldstate = self[f]
478 oldstate = self[f]
479 if state == 'a' or oldstate == 'r':
479 if state == 'a' or oldstate == 'r':
480 scmutil.checkfilename(f)
480 scmutil.checkfilename(f)
481 if f in self._dirs:
481 if f in self._dirs:
482 raise error.Abort(_('directory %r already in dirstate') % f)
482 raise error.Abort(_('directory %r already in dirstate') % f)
483 # shadows
483 # shadows
484 for d in util.finddirs(f):
484 for d in util.finddirs(f):
485 if d in self._dirs:
485 if d in self._dirs:
486 break
486 break
487 if d in self._map and self[d] != 'r':
487 if d in self._map and self[d] != 'r':
488 raise error.Abort(
488 raise error.Abort(
489 _('file %r in dirstate clashes with %r') % (d, f))
489 _('file %r in dirstate clashes with %r') % (d, f))
490 if oldstate in "?r" and "_dirs" in self.__dict__:
490 if oldstate in "?r" and "_dirs" in self.__dict__:
491 self._dirs.addpath(f)
491 self._dirs.addpath(f)
492 self._dirty = True
492 self._dirty = True
493 self._map[f] = dirstatetuple(state, mode, size, mtime)
493 self._map[f] = dirstatetuple(state, mode, size, mtime)
494 if state != 'n' or mtime == -1:
494 if state != 'n' or mtime == -1:
495 self._nonnormalset.add(f)
495 self._nonnormalset.add(f)
496
496
497 def normal(self, f):
497 def normal(self, f):
498 '''Mark a file normal and clean.'''
498 '''Mark a file normal and clean.'''
499 s = os.lstat(self._join(f))
499 s = os.lstat(self._join(f))
500 mtime = s.st_mtime
500 mtime = s.st_mtime
501 self._addpath(f, 'n', s.st_mode,
501 self._addpath(f, 'n', s.st_mode,
502 s.st_size & _rangemask, mtime & _rangemask)
502 s.st_size & _rangemask, mtime & _rangemask)
503 if f in self._copymap:
503 if f in self._copymap:
504 del self._copymap[f]
504 del self._copymap[f]
505 if f in self._nonnormalset:
505 if f in self._nonnormalset:
506 self._nonnormalset.remove(f)
506 self._nonnormalset.remove(f)
507 if mtime > self._lastnormaltime:
507 if mtime > self._lastnormaltime:
508 # Remember the most recent modification timeslot for status(),
508 # Remember the most recent modification timeslot for status(),
509 # to make sure we won't miss future size-preserving file content
509 # to make sure we won't miss future size-preserving file content
510 # modifications that happen within the same timeslot.
510 # modifications that happen within the same timeslot.
511 self._lastnormaltime = mtime
511 self._lastnormaltime = mtime
512
512
513 def normallookup(self, f):
513 def normallookup(self, f):
514 '''Mark a file normal, but possibly dirty.'''
514 '''Mark a file normal, but possibly dirty.'''
515 if self._pl[1] != nullid and f in self._map:
515 if self._pl[1] != nullid and f in self._map:
516 # if there is a merge going on and the file was either
516 # if there is a merge going on and the file was either
517 # in state 'm' (-1) or coming from other parent (-2) before
517 # in state 'm' (-1) or coming from other parent (-2) before
518 # being removed, restore that state.
518 # being removed, restore that state.
519 entry = self._map[f]
519 entry = self._map[f]
520 if entry[0] == 'r' and entry[2] in (-1, -2):
520 if entry[0] == 'r' and entry[2] in (-1, -2):
521 source = self._copymap.get(f)
521 source = self._copymap.get(f)
522 if entry[2] == -1:
522 if entry[2] == -1:
523 self.merge(f)
523 self.merge(f)
524 elif entry[2] == -2:
524 elif entry[2] == -2:
525 self.otherparent(f)
525 self.otherparent(f)
526 if source:
526 if source:
527 self.copy(source, f)
527 self.copy(source, f)
528 return
528 return
529 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
529 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
530 return
530 return
531 self._addpath(f, 'n', 0, -1, -1)
531 self._addpath(f, 'n', 0, -1, -1)
532 if f in self._copymap:
532 if f in self._copymap:
533 del self._copymap[f]
533 del self._copymap[f]
534 if f in self._nonnormalset:
534 if f in self._nonnormalset:
535 self._nonnormalset.remove(f)
535 self._nonnormalset.remove(f)
536
536
537 def otherparent(self, f):
537 def otherparent(self, f):
538 '''Mark as coming from the other parent, always dirty.'''
538 '''Mark as coming from the other parent, always dirty.'''
539 if self._pl[1] == nullid:
539 if self._pl[1] == nullid:
540 raise error.Abort(_("setting %r to other parent "
540 raise error.Abort(_("setting %r to other parent "
541 "only allowed in merges") % f)
541 "only allowed in merges") % f)
542 if f in self and self[f] == 'n':
542 if f in self and self[f] == 'n':
543 # merge-like
543 # merge-like
544 self._addpath(f, 'm', 0, -2, -1)
544 self._addpath(f, 'm', 0, -2, -1)
545 else:
545 else:
546 # add-like
546 # add-like
547 self._addpath(f, 'n', 0, -2, -1)
547 self._addpath(f, 'n', 0, -2, -1)
548
548
549 if f in self._copymap:
549 if f in self._copymap:
550 del self._copymap[f]
550 del self._copymap[f]
551
551
552 def add(self, f):
552 def add(self, f):
553 '''Mark a file added.'''
553 '''Mark a file added.'''
554 self._addpath(f, 'a', 0, -1, -1)
554 self._addpath(f, 'a', 0, -1, -1)
555 if f in self._copymap:
555 if f in self._copymap:
556 del self._copymap[f]
556 del self._copymap[f]
557
557
558 def remove(self, f):
558 def remove(self, f):
559 '''Mark a file removed.'''
559 '''Mark a file removed.'''
560 self._dirty = True
560 self._dirty = True
561 self._droppath(f)
561 self._droppath(f)
562 size = 0
562 size = 0
563 if self._pl[1] != nullid and f in self._map:
563 if self._pl[1] != nullid and f in self._map:
564 # backup the previous state
564 # backup the previous state
565 entry = self._map[f]
565 entry = self._map[f]
566 if entry[0] == 'm': # merge
566 if entry[0] == 'm': # merge
567 size = -1
567 size = -1
568 elif entry[0] == 'n' and entry[2] == -2: # other parent
568 elif entry[0] == 'n' and entry[2] == -2: # other parent
569 size = -2
569 size = -2
570 self._map[f] = dirstatetuple('r', 0, size, 0)
570 self._map[f] = dirstatetuple('r', 0, size, 0)
571 self._nonnormalset.add(f)
571 self._nonnormalset.add(f)
572 if size == 0 and f in self._copymap:
572 if size == 0 and f in self._copymap:
573 del self._copymap[f]
573 del self._copymap[f]
574
574
575 def merge(self, f):
575 def merge(self, f):
576 '''Mark a file merged.'''
576 '''Mark a file merged.'''
577 if self._pl[1] == nullid:
577 if self._pl[1] == nullid:
578 return self.normallookup(f)
578 return self.normallookup(f)
579 return self.otherparent(f)
579 return self.otherparent(f)
580
580
581 def drop(self, f):
581 def drop(self, f):
582 '''Drop a file from the dirstate'''
582 '''Drop a file from the dirstate'''
583 if f in self._map:
583 if f in self._map:
584 self._dirty = True
584 self._dirty = True
585 self._droppath(f)
585 self._droppath(f)
586 del self._map[f]
586 del self._map[f]
587 if f in self._nonnormalset:
587 if f in self._nonnormalset:
588 self._nonnormalset.remove(f)
588 self._nonnormalset.remove(f)
589 if f in self._copymap:
589 if f in self._copymap:
590 del self._copymap[f]
590 del self._copymap[f]
591
591
592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
593 if exists is None:
593 if exists is None:
594 exists = os.path.lexists(os.path.join(self._root, path))
594 exists = os.path.lexists(os.path.join(self._root, path))
595 if not exists:
595 if not exists:
596 # Maybe a path component exists
596 # Maybe a path component exists
597 if not ignoremissing and '/' in path:
597 if not ignoremissing and '/' in path:
598 d, f = path.rsplit('/', 1)
598 d, f = path.rsplit('/', 1)
599 d = self._normalize(d, False, ignoremissing, None)
599 d = self._normalize(d, False, ignoremissing, None)
600 folded = d + "/" + f
600 folded = d + "/" + f
601 else:
601 else:
602 # No path components, preserve original case
602 # No path components, preserve original case
603 folded = path
603 folded = path
604 else:
604 else:
605 # recursively normalize leading directory components
605 # recursively normalize leading directory components
606 # against dirstate
606 # against dirstate
607 if '/' in normed:
607 if '/' in normed:
608 d, f = normed.rsplit('/', 1)
608 d, f = normed.rsplit('/', 1)
609 d = self._normalize(d, False, ignoremissing, True)
609 d = self._normalize(d, False, ignoremissing, True)
610 r = self._root + "/" + d
610 r = self._root + "/" + d
611 folded = d + "/" + util.fspath(f, r)
611 folded = d + "/" + util.fspath(f, r)
612 else:
612 else:
613 folded = util.fspath(normed, self._root)
613 folded = util.fspath(normed, self._root)
614 storemap[normed] = folded
614 storemap[normed] = folded
615
615
616 return folded
616 return folded
617
617
618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
619 normed = util.normcase(path)
619 normed = util.normcase(path)
620 folded = self._filefoldmap.get(normed, None)
620 folded = self._filefoldmap.get(normed, None)
621 if folded is None:
621 if folded is None:
622 if isknown:
622 if isknown:
623 folded = path
623 folded = path
624 else:
624 else:
625 folded = self._discoverpath(path, normed, ignoremissing, exists,
625 folded = self._discoverpath(path, normed, ignoremissing, exists,
626 self._filefoldmap)
626 self._filefoldmap)
627 return folded
627 return folded
628
628
629 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
629 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
630 normed = util.normcase(path)
630 normed = util.normcase(path)
631 folded = self._filefoldmap.get(normed, None)
631 folded = self._filefoldmap.get(normed, None)
632 if folded is None:
632 if folded is None:
633 folded = self._dirfoldmap.get(normed, None)
633 folded = self._dirfoldmap.get(normed, None)
634 if folded is None:
634 if folded is None:
635 if isknown:
635 if isknown:
636 folded = path
636 folded = path
637 else:
637 else:
638 # store discovered result in dirfoldmap so that future
638 # store discovered result in dirfoldmap so that future
639 # normalizefile calls don't start matching directories
639 # normalizefile calls don't start matching directories
640 folded = self._discoverpath(path, normed, ignoremissing, exists,
640 folded = self._discoverpath(path, normed, ignoremissing, exists,
641 self._dirfoldmap)
641 self._dirfoldmap)
642 return folded
642 return folded
643
643
644 def normalize(self, path, isknown=False, ignoremissing=False):
644 def normalize(self, path, isknown=False, ignoremissing=False):
645 '''
645 '''
646 normalize the case of a pathname when on a casefolding filesystem
646 normalize the case of a pathname when on a casefolding filesystem
647
647
648 isknown specifies whether the filename came from walking the
648 isknown specifies whether the filename came from walking the
649 disk, to avoid extra filesystem access.
649 disk, to avoid extra filesystem access.
650
650
651 If ignoremissing is True, missing path are returned
651 If ignoremissing is True, missing path are returned
652 unchanged. Otherwise, we try harder to normalize possibly
652 unchanged. Otherwise, we try harder to normalize possibly
653 existing path components.
653 existing path components.
654
654
655 The normalized case is determined based on the following precedence:
655 The normalized case is determined based on the following precedence:
656
656
657 - version of name already stored in the dirstate
657 - version of name already stored in the dirstate
658 - version of name stored on disk
658 - version of name stored on disk
659 - version provided via command arguments
659 - version provided via command arguments
660 '''
660 '''
661
661
662 if self._checkcase:
662 if self._checkcase:
663 return self._normalize(path, isknown, ignoremissing)
663 return self._normalize(path, isknown, ignoremissing)
664 return path
664 return path
665
665
666 def clear(self):
666 def clear(self):
667 self._map = {}
667 self._map = {}
668 self._nonnormalset = set()
668 self._nonnormalset = set()
669 if "_dirs" in self.__dict__:
669 if "_dirs" in self.__dict__:
670 delattr(self, "_dirs")
670 delattr(self, "_dirs")
671 self._copymap = {}
671 self._copymap = {}
672 self._pl = [nullid, nullid]
672 self._pl = [nullid, nullid]
673 self._lastnormaltime = 0
673 self._lastnormaltime = 0
674 self._dirty = True
674 self._dirty = True
675
675
676 def rebuild(self, parent, allfiles, changedfiles=None):
676 def rebuild(self, parent, allfiles, changedfiles=None):
677 if changedfiles is None:
677 if changedfiles is None:
678 # Rebuild entire dirstate
678 # Rebuild entire dirstate
679 changedfiles = allfiles
679 changedfiles = allfiles
680 lastnormaltime = self._lastnormaltime
680 lastnormaltime = self._lastnormaltime
681 self.clear()
681 self.clear()
682 self._lastnormaltime = lastnormaltime
682 self._lastnormaltime = lastnormaltime
683
683
684 if self._origpl is None:
684 if self._origpl is None:
685 self._origpl = self._pl
685 self._origpl = self._pl
686 self._pl = (parent, nullid)
686 self._pl = (parent, nullid)
687 for f in changedfiles:
687 for f in changedfiles:
688 if f in allfiles:
688 if f in allfiles:
689 self.normallookup(f)
689 self.normallookup(f)
690 else:
690 else:
691 self.drop(f)
691 self.drop(f)
692
692
693 self._dirty = True
693 self._dirty = True
694
694
695 def write(self, tr):
695 def write(self, tr):
696 if not self._dirty:
696 if not self._dirty:
697 return
697 return
698
698
699 filename = self._filename
699 filename = self._filename
700 if tr:
700 if tr:
701 # 'dirstate.write()' is not only for writing in-memory
701 # 'dirstate.write()' is not only for writing in-memory
702 # changes out, but also for dropping ambiguous timestamp.
702 # changes out, but also for dropping ambiguous timestamp.
703 # delayed writing re-raise "ambiguous timestamp issue".
703 # delayed writing re-raise "ambiguous timestamp issue".
704 # See also the wiki page below for detail:
704 # See also the wiki page below for detail:
705 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
705 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
706
706
707 # emulate dropping timestamp in 'parsers.pack_dirstate'
707 # emulate dropping timestamp in 'parsers.pack_dirstate'
708 now = _getfsnow(self._opener)
708 now = _getfsnow(self._opener)
709 dmap = self._map
709 dmap = self._map
710 for f, e in dmap.iteritems():
710 for f, e in dmap.iteritems():
711 if e[0] == 'n' and e[3] == now:
711 if e[0] == 'n' and e[3] == now:
712 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
712 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
713 self._nonnormalset.add(f)
713 self._nonnormalset.add(f)
714
714
715 # emulate that all 'dirstate.normal' results are written out
715 # emulate that all 'dirstate.normal' results are written out
716 self._lastnormaltime = 0
716 self._lastnormaltime = 0
717
717
718 # delay writing in-memory changes out
718 # delay writing in-memory changes out
719 tr.addfilegenerator('dirstate', (self._filename,),
719 tr.addfilegenerator('dirstate', (self._filename,),
720 self._writedirstate, location='plain')
720 self._writedirstate, location='plain')
721 return
721 return
722
722
723 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
723 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
724 self._writedirstate(st)
724 self._writedirstate(st)
725
725
726 def addparentchangecallback(self, category, callback):
726 def addparentchangecallback(self, category, callback):
727 """add a callback to be called when the wd parents are changed
727 """add a callback to be called when the wd parents are changed
728
728
729 Callback will be called with the following arguments:
729 Callback will be called with the following arguments:
730 dirstate, (oldp1, oldp2), (newp1, newp2)
730 dirstate, (oldp1, oldp2), (newp1, newp2)
731
731
732 Category is a unique identifier to allow overwriting an old callback
732 Category is a unique identifier to allow overwriting an old callback
733 with a newer callback.
733 with a newer callback.
734 """
734 """
735 self._plchangecallbacks[category] = callback
735 self._plchangecallbacks[category] = callback
736
736
737 def _writedirstate(self, st):
737 def _writedirstate(self, st):
738 # notify callbacks about parents change
738 # notify callbacks about parents change
739 if self._origpl is not None and self._origpl != self._pl:
739 if self._origpl is not None and self._origpl != self._pl:
740 for c, callback in sorted(self._plchangecallbacks.iteritems()):
740 for c, callback in sorted(self._plchangecallbacks.iteritems()):
741 callback(self, self._origpl, self._pl)
741 callback(self, self._origpl, self._pl)
742 self._origpl = None
742 self._origpl = None
743 # use the modification time of the newly created temporary file as the
743 # use the modification time of the newly created temporary file as the
744 # filesystem's notion of 'now'
744 # filesystem's notion of 'now'
745 now = util.fstat(st).st_mtime & _rangemask
745 now = util.fstat(st).st_mtime & _rangemask
746
746
747 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
747 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
748 # timestamp of each entries in dirstate, because of 'now > mtime'
748 # timestamp of each entries in dirstate, because of 'now > mtime'
749 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
749 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
750 if delaywrite > 0:
750 if delaywrite > 0:
751 # do we have any files to delay for?
751 # do we have any files to delay for?
752 for f, e in self._map.iteritems():
752 for f, e in self._map.iteritems():
753 if e[0] == 'n' and e[3] == now:
753 if e[0] == 'n' and e[3] == now:
754 import time # to avoid useless import
754 import time # to avoid useless import
755 # rather than sleep n seconds, sleep until the next
755 # rather than sleep n seconds, sleep until the next
756 # multiple of n seconds
756 # multiple of n seconds
757 clock = time.time()
757 clock = time.time()
758 start = int(clock) - (int(clock) % delaywrite)
758 start = int(clock) - (int(clock) % delaywrite)
759 end = start + delaywrite
759 end = start + delaywrite
760 time.sleep(end - clock)
760 time.sleep(end - clock)
761 now = end # trust our estimate that the end is near now
761 now = end # trust our estimate that the end is near now
762 break
762 break
763
763
764 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
764 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
765 self._nonnormalset = nonnormalentries(self._map)
765 self._nonnormalset = nonnormalentries(self._map)
766 st.close()
766 st.close()
767 self._lastnormaltime = 0
767 self._lastnormaltime = 0
768 self._dirty = self._dirtypl = False
768 self._dirty = self._dirtypl = False
769
769
770 def _dirignore(self, f):
770 def _dirignore(self, f):
771 if f == '.':
771 if f == '.':
772 return False
772 return False
773 if self._ignore(f):
773 if self._ignore(f):
774 return True
774 return True
775 for p in util.finddirs(f):
775 for p in util.finddirs(f):
776 if self._ignore(p):
776 if self._ignore(p):
777 return True
777 return True
778 return False
778 return False
779
779
780 def _ignorefiles(self):
780 def _ignorefiles(self):
781 files = []
781 files = []
782 if os.path.exists(self._join('.hgignore')):
782 if os.path.exists(self._join('.hgignore')):
783 files.append(self._join('.hgignore'))
783 files.append(self._join('.hgignore'))
784 for name, path in self._ui.configitems("ui"):
784 for name, path in self._ui.configitems("ui"):
785 if name == 'ignore' or name.startswith('ignore.'):
785 if name == 'ignore' or name.startswith('ignore.'):
786 # we need to use os.path.join here rather than self._join
786 # we need to use os.path.join here rather than self._join
787 # because path is arbitrary and user-specified
787 # because path is arbitrary and user-specified
788 files.append(os.path.join(self._rootdir, util.expandpath(path)))
788 files.append(os.path.join(self._rootdir, util.expandpath(path)))
789 return files
789 return files
790
790
791 def _ignorefileandline(self, f):
791 def _ignorefileandline(self, f):
792 files = collections.deque(self._ignorefiles())
792 files = collections.deque(self._ignorefiles())
793 visited = set()
793 visited = set()
794 while files:
794 while files:
795 i = files.popleft()
795 i = files.popleft()
796 patterns = matchmod.readpatternfile(i, self._ui.warn,
796 patterns = matchmod.readpatternfile(i, self._ui.warn,
797 sourceinfo=True)
797 sourceinfo=True)
798 for pattern, lineno, line in patterns:
798 for pattern, lineno, line in patterns:
799 kind, p = matchmod._patsplit(pattern, 'glob')
799 kind, p = matchmod._patsplit(pattern, 'glob')
800 if kind == "subinclude":
800 if kind == "subinclude":
801 if p not in visited:
801 if p not in visited:
802 files.append(p)
802 files.append(p)
803 continue
803 continue
804 m = matchmod.match(self._root, '', [], [pattern],
804 m = matchmod.match(self._root, '', [], [pattern],
805 warn=self._ui.warn)
805 warn=self._ui.warn)
806 if m(f):
806 if m(f):
807 return (i, lineno, line)
807 return (i, lineno, line)
808 visited.add(i)
808 visited.add(i)
809 return (None, -1, "")
809 return (None, -1, "")
810
810
811 def _walkexplicit(self, match, subrepos):
811 def _walkexplicit(self, match, subrepos):
812 '''Get stat data about the files explicitly specified by match.
812 '''Get stat data about the files explicitly specified by match.
813
813
814 Return a triple (results, dirsfound, dirsnotfound).
814 Return a triple (results, dirsfound, dirsnotfound).
815 - results is a mapping from filename to stat result. It also contains
815 - results is a mapping from filename to stat result. It also contains
816 listings mapping subrepos and .hg to None.
816 listings mapping subrepos and .hg to None.
817 - dirsfound is a list of files found to be directories.
817 - dirsfound is a list of files found to be directories.
818 - dirsnotfound is a list of files that the dirstate thinks are
818 - dirsnotfound is a list of files that the dirstate thinks are
819 directories and that were not found.'''
819 directories and that were not found.'''
820
820
821 def badtype(mode):
821 def badtype(mode):
822 kind = _('unknown')
822 kind = _('unknown')
823 if stat.S_ISCHR(mode):
823 if stat.S_ISCHR(mode):
824 kind = _('character device')
824 kind = _('character device')
825 elif stat.S_ISBLK(mode):
825 elif stat.S_ISBLK(mode):
826 kind = _('block device')
826 kind = _('block device')
827 elif stat.S_ISFIFO(mode):
827 elif stat.S_ISFIFO(mode):
828 kind = _('fifo')
828 kind = _('fifo')
829 elif stat.S_ISSOCK(mode):
829 elif stat.S_ISSOCK(mode):
830 kind = _('socket')
830 kind = _('socket')
831 elif stat.S_ISDIR(mode):
831 elif stat.S_ISDIR(mode):
832 kind = _('directory')
832 kind = _('directory')
833 return _('unsupported file type (type is %s)') % kind
833 return _('unsupported file type (type is %s)') % kind
834
834
835 matchedir = match.explicitdir
835 matchedir = match.explicitdir
836 badfn = match.bad
836 badfn = match.bad
837 dmap = self._map
837 dmap = self._map
838 lstat = os.lstat
838 lstat = os.lstat
839 getkind = stat.S_IFMT
839 getkind = stat.S_IFMT
840 dirkind = stat.S_IFDIR
840 dirkind = stat.S_IFDIR
841 regkind = stat.S_IFREG
841 regkind = stat.S_IFREG
842 lnkkind = stat.S_IFLNK
842 lnkkind = stat.S_IFLNK
843 join = self._join
843 join = self._join
844 dirsfound = []
844 dirsfound = []
845 foundadd = dirsfound.append
845 foundadd = dirsfound.append
846 dirsnotfound = []
846 dirsnotfound = []
847 notfoundadd = dirsnotfound.append
847 notfoundadd = dirsnotfound.append
848
848
849 if not match.isexact() and self._checkcase:
849 if not match.isexact() and self._checkcase:
850 normalize = self._normalize
850 normalize = self._normalize
851 else:
851 else:
852 normalize = None
852 normalize = None
853
853
854 files = sorted(match.files())
854 files = sorted(match.files())
855 subrepos.sort()
855 subrepos.sort()
856 i, j = 0, 0
856 i, j = 0, 0
857 while i < len(files) and j < len(subrepos):
857 while i < len(files) and j < len(subrepos):
858 subpath = subrepos[j] + "/"
858 subpath = subrepos[j] + "/"
859 if files[i] < subpath:
859 if files[i] < subpath:
860 i += 1
860 i += 1
861 continue
861 continue
862 while i < len(files) and files[i].startswith(subpath):
862 while i < len(files) and files[i].startswith(subpath):
863 del files[i]
863 del files[i]
864 j += 1
864 j += 1
865
865
866 if not files or '.' in files:
866 if not files or '.' in files:
867 files = ['.']
867 files = ['.']
868 results = dict.fromkeys(subrepos)
868 results = dict.fromkeys(subrepos)
869 results['.hg'] = None
869 results['.hg'] = None
870
870
871 alldirs = None
871 alldirs = None
872 for ff in files:
872 for ff in files:
873 # constructing the foldmap is expensive, so don't do it for the
873 # constructing the foldmap is expensive, so don't do it for the
874 # common case where files is ['.']
874 # common case where files is ['.']
875 if normalize and ff != '.':
875 if normalize and ff != '.':
876 nf = normalize(ff, False, True)
876 nf = normalize(ff, False, True)
877 else:
877 else:
878 nf = ff
878 nf = ff
879 if nf in results:
879 if nf in results:
880 continue
880 continue
881
881
882 try:
882 try:
883 st = lstat(join(nf))
883 st = lstat(join(nf))
884 kind = getkind(st.st_mode)
884 kind = getkind(st.st_mode)
885 if kind == dirkind:
885 if kind == dirkind:
886 if nf in dmap:
886 if nf in dmap:
887 # file replaced by dir on disk but still in dirstate
887 # file replaced by dir on disk but still in dirstate
888 results[nf] = None
888 results[nf] = None
889 if matchedir:
889 if matchedir:
890 matchedir(nf)
890 matchedir(nf)
891 foundadd((nf, ff))
891 foundadd((nf, ff))
892 elif kind == regkind or kind == lnkkind:
892 elif kind == regkind or kind == lnkkind:
893 results[nf] = st
893 results[nf] = st
894 else:
894 else:
895 badfn(ff, badtype(kind))
895 badfn(ff, badtype(kind))
896 if nf in dmap:
896 if nf in dmap:
897 results[nf] = None
897 results[nf] = None
898 except OSError as inst: # nf not found on disk - it is dirstate only
898 except OSError as inst: # nf not found on disk - it is dirstate only
899 if nf in dmap: # does it exactly match a missing file?
899 if nf in dmap: # does it exactly match a missing file?
900 results[nf] = None
900 results[nf] = None
901 else: # does it match a missing directory?
901 else: # does it match a missing directory?
902 if alldirs is None:
902 if alldirs is None:
903 alldirs = util.dirs(dmap)
903 alldirs = util.dirs(dmap)
904 if nf in alldirs:
904 if nf in alldirs:
905 if matchedir:
905 if matchedir:
906 matchedir(nf)
906 matchedir(nf)
907 notfoundadd(nf)
907 notfoundadd(nf)
908 else:
908 else:
909 badfn(ff, inst.strerror)
909 badfn(ff, inst.strerror)
910
910
911 # Case insensitive filesystems cannot rely on lstat() failing to detect
911 # Case insensitive filesystems cannot rely on lstat() failing to detect
912 # a case-only rename. Prune the stat object for any file that does not
912 # a case-only rename. Prune the stat object for any file that does not
913 # match the case in the filesystem, if there are multiple files that
913 # match the case in the filesystem, if there are multiple files that
914 # normalize to the same path.
914 # normalize to the same path.
915 if match.isexact() and self._checkcase:
915 if match.isexact() and self._checkcase:
916 normed = {}
916 normed = {}
917
917
918 for f, st in results.iteritems():
918 for f, st in results.iteritems():
919 if st is None:
919 if st is None:
920 continue
920 continue
921
921
922 nc = util.normcase(f)
922 nc = util.normcase(f)
923 paths = normed.get(nc)
923 paths = normed.get(nc)
924
924
925 if paths is None:
925 if paths is None:
926 paths = set()
926 paths = set()
927 normed[nc] = paths
927 normed[nc] = paths
928
928
929 paths.add(f)
929 paths.add(f)
930
930
931 for norm, paths in normed.iteritems():
931 for norm, paths in normed.iteritems():
932 if len(paths) > 1:
932 if len(paths) > 1:
933 for path in paths:
933 for path in paths:
934 folded = self._discoverpath(path, norm, True, None,
934 folded = self._discoverpath(path, norm, True, None,
935 self._dirfoldmap)
935 self._dirfoldmap)
936 if path != folded:
936 if path != folded:
937 results[path] = None
937 results[path] = None
938
938
939 return results, dirsfound, dirsnotfound
939 return results, dirsfound, dirsnotfound
940
940
941 def walk(self, match, subrepos, unknown, ignored, full=True):
941 def walk(self, match, subrepos, unknown, ignored, full=True):
942 '''
942 '''
943 Walk recursively through the directory tree, finding all files
943 Walk recursively through the directory tree, finding all files
944 matched by match.
944 matched by match.
945
945
946 If full is False, maybe skip some known-clean files.
946 If full is False, maybe skip some known-clean files.
947
947
948 Return a dict mapping filename to stat-like object (either
948 Return a dict mapping filename to stat-like object (either
949 mercurial.osutil.stat instance or return value of os.stat()).
949 mercurial.osutil.stat instance or return value of os.stat()).
950
950
951 '''
951 '''
952 # full is a flag that extensions that hook into walk can use -- this
952 # full is a flag that extensions that hook into walk can use -- this
953 # implementation doesn't use it at all. This satisfies the contract
953 # implementation doesn't use it at all. This satisfies the contract
954 # because we only guarantee a "maybe".
954 # because we only guarantee a "maybe".
955
955
956 if ignored:
956 if ignored:
957 ignore = util.never
957 ignore = util.never
958 dirignore = util.never
958 dirignore = util.never
959 elif unknown:
959 elif unknown:
960 ignore = self._ignore
960 ignore = self._ignore
961 dirignore = self._dirignore
961 dirignore = self._dirignore
962 else:
962 else:
963 # if not unknown and not ignored, drop dir recursion and step 2
963 # if not unknown and not ignored, drop dir recursion and step 2
964 ignore = util.always
964 ignore = util.always
965 dirignore = util.always
965 dirignore = util.always
966
966
967 matchfn = match.matchfn
967 matchfn = match.matchfn
968 matchalways = match.always()
968 matchalways = match.always()
969 matchtdir = match.traversedir
969 matchtdir = match.traversedir
970 dmap = self._map
970 dmap = self._map
971 listdir = osutil.listdir
971 listdir = osutil.listdir
972 lstat = os.lstat
972 lstat = os.lstat
973 dirkind = stat.S_IFDIR
973 dirkind = stat.S_IFDIR
974 regkind = stat.S_IFREG
974 regkind = stat.S_IFREG
975 lnkkind = stat.S_IFLNK
975 lnkkind = stat.S_IFLNK
976 join = self._join
976 join = self._join
977
977
978 exact = skipstep3 = False
978 exact = skipstep3 = False
979 if match.isexact(): # match.exact
979 if match.isexact(): # match.exact
980 exact = True
980 exact = True
981 dirignore = util.always # skip step 2
981 dirignore = util.always # skip step 2
982 elif match.prefix(): # match.match, no patterns
982 elif match.prefix(): # match.match, no patterns
983 skipstep3 = True
983 skipstep3 = True
984
984
985 if not exact and self._checkcase:
985 if not exact and self._checkcase:
986 normalize = self._normalize
986 normalize = self._normalize
987 normalizefile = self._normalizefile
987 normalizefile = self._normalizefile
988 skipstep3 = False
988 skipstep3 = False
989 else:
989 else:
990 normalize = self._normalize
990 normalize = self._normalize
991 normalizefile = None
991 normalizefile = None
992
992
993 # step 1: find all explicit files
993 # step 1: find all explicit files
994 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
994 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
995
995
996 skipstep3 = skipstep3 and not (work or dirsnotfound)
996 skipstep3 = skipstep3 and not (work or dirsnotfound)
997 work = [d for d in work if not dirignore(d[0])]
997 work = [d for d in work if not dirignore(d[0])]
998
998
999 # step 2: visit subdirectories
999 # step 2: visit subdirectories
1000 def traverse(work, alreadynormed):
1000 def traverse(work, alreadynormed):
1001 wadd = work.append
1001 wadd = work.append
1002 while work:
1002 while work:
1003 nd = work.pop()
1003 nd = work.pop()
1004 skip = None
1004 skip = None
1005 if nd == '.':
1005 if nd == '.':
1006 nd = ''
1006 nd = ''
1007 else:
1007 else:
1008 skip = '.hg'
1008 skip = '.hg'
1009 try:
1009 try:
1010 entries = listdir(join(nd), stat=True, skip=skip)
1010 entries = listdir(join(nd), stat=True, skip=skip)
1011 except OSError as inst:
1011 except OSError as inst:
1012 if inst.errno in (errno.EACCES, errno.ENOENT):
1012 if inst.errno in (errno.EACCES, errno.ENOENT):
1013 match.bad(self.pathto(nd), inst.strerror)
1013 match.bad(self.pathto(nd), inst.strerror)
1014 continue
1014 continue
1015 raise
1015 raise
1016 for f, kind, st in entries:
1016 for f, kind, st in entries:
1017 if normalizefile:
1017 if normalizefile:
1018 # even though f might be a directory, we're only
1018 # even though f might be a directory, we're only
1019 # interested in comparing it to files currently in the
1019 # interested in comparing it to files currently in the
1020 # dmap -- therefore normalizefile is enough
1020 # dmap -- therefore normalizefile is enough
1021 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1021 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1022 True)
1022 True)
1023 else:
1023 else:
1024 nf = nd and (nd + "/" + f) or f
1024 nf = nd and (nd + "/" + f) or f
1025 if nf not in results:
1025 if nf not in results:
1026 if kind == dirkind:
1026 if kind == dirkind:
1027 if not ignore(nf):
1027 if not ignore(nf):
1028 if matchtdir:
1028 if matchtdir:
1029 matchtdir(nf)
1029 matchtdir(nf)
1030 wadd(nf)
1030 wadd(nf)
1031 if nf in dmap and (matchalways or matchfn(nf)):
1031 if nf in dmap and (matchalways or matchfn(nf)):
1032 results[nf] = None
1032 results[nf] = None
1033 elif kind == regkind or kind == lnkkind:
1033 elif kind == regkind or kind == lnkkind:
1034 if nf in dmap:
1034 if nf in dmap:
1035 if matchalways or matchfn(nf):
1035 if matchalways or matchfn(nf):
1036 results[nf] = st
1036 results[nf] = st
1037 elif ((matchalways or matchfn(nf))
1037 elif ((matchalways or matchfn(nf))
1038 and not ignore(nf)):
1038 and not ignore(nf)):
1039 # unknown file -- normalize if necessary
1039 # unknown file -- normalize if necessary
1040 if not alreadynormed:
1040 if not alreadynormed:
1041 nf = normalize(nf, False, True)
1041 nf = normalize(nf, False, True)
1042 results[nf] = st
1042 results[nf] = st
1043 elif nf in dmap and (matchalways or matchfn(nf)):
1043 elif nf in dmap and (matchalways or matchfn(nf)):
1044 results[nf] = None
1044 results[nf] = None
1045
1045
1046 for nd, d in work:
1046 for nd, d in work:
1047 # alreadynormed means that processwork doesn't have to do any
1047 # alreadynormed means that processwork doesn't have to do any
1048 # expensive directory normalization
1048 # expensive directory normalization
1049 alreadynormed = not normalize or nd == d
1049 alreadynormed = not normalize or nd == d
1050 traverse([d], alreadynormed)
1050 traverse([d], alreadynormed)
1051
1051
1052 for s in subrepos:
1052 for s in subrepos:
1053 del results[s]
1053 del results[s]
1054 del results['.hg']
1054 del results['.hg']
1055
1055
1056 # step 3: visit remaining files from dmap
1056 # step 3: visit remaining files from dmap
1057 if not skipstep3 and not exact:
1057 if not skipstep3 and not exact:
1058 # If a dmap file is not in results yet, it was either
1058 # If a dmap file is not in results yet, it was either
1059 # a) not matching matchfn b) ignored, c) missing, or d) under a
1059 # a) not matching matchfn b) ignored, c) missing, or d) under a
1060 # symlink directory.
1060 # symlink directory.
1061 if not results and matchalways:
1061 if not results and matchalways:
1062 visit = dmap.keys()
1062 visit = dmap.keys()
1063 else:
1063 else:
1064 visit = [f for f in dmap if f not in results and matchfn(f)]
1064 visit = [f for f in dmap if f not in results and matchfn(f)]
1065 visit.sort()
1065 visit.sort()
1066
1066
1067 if unknown:
1067 if unknown:
1068 # unknown == True means we walked all dirs under the roots
1068 # unknown == True means we walked all dirs under the roots
1069 # that wasn't ignored, and everything that matched was stat'ed
1069 # that wasn't ignored, and everything that matched was stat'ed
1070 # and is already in results.
1070 # and is already in results.
1071 # The rest must thus be ignored or under a symlink.
1071 # The rest must thus be ignored or under a symlink.
1072 audit_path = pathutil.pathauditor(self._root)
1072 audit_path = pathutil.pathauditor(self._root)
1073
1073
1074 for nf in iter(visit):
1074 for nf in iter(visit):
1075 # If a stat for the same file was already added with a
1075 # If a stat for the same file was already added with a
1076 # different case, don't add one for this, since that would
1076 # different case, don't add one for this, since that would
1077 # make it appear as if the file exists under both names
1077 # make it appear as if the file exists under both names
1078 # on disk.
1078 # on disk.
1079 if (normalizefile and
1079 if (normalizefile and
1080 normalizefile(nf, True, True) in results):
1080 normalizefile(nf, True, True) in results):
1081 results[nf] = None
1081 results[nf] = None
1082 # Report ignored items in the dmap as long as they are not
1082 # Report ignored items in the dmap as long as they are not
1083 # under a symlink directory.
1083 # under a symlink directory.
1084 elif audit_path.check(nf):
1084 elif audit_path.check(nf):
1085 try:
1085 try:
1086 results[nf] = lstat(join(nf))
1086 results[nf] = lstat(join(nf))
1087 # file was just ignored, no links, and exists
1087 # file was just ignored, no links, and exists
1088 except OSError:
1088 except OSError:
1089 # file doesn't exist
1089 # file doesn't exist
1090 results[nf] = None
1090 results[nf] = None
1091 else:
1091 else:
1092 # It's either missing or under a symlink directory
1092 # It's either missing or under a symlink directory
1093 # which we in this case report as missing
1093 # which we in this case report as missing
1094 results[nf] = None
1094 results[nf] = None
1095 else:
1095 else:
1096 # We may not have walked the full directory tree above,
1096 # We may not have walked the full directory tree above,
1097 # so stat and check everything we missed.
1097 # so stat and check everything we missed.
1098 nf = iter(visit).next
1098 nf = iter(visit).next
1099 for st in util.statfiles([join(i) for i in visit]):
1099 for st in util.statfiles([join(i) for i in visit]):
1100 results[nf()] = st
1100 results[nf()] = st
1101 return results
1101 return results
1102
1102
1103 def status(self, match, subrepos, ignored, clean, unknown):
1103 def status(self, match, subrepos, ignored, clean, unknown):
1104 '''Determine the status of the working copy relative to the
1104 '''Determine the status of the working copy relative to the
1105 dirstate and return a pair of (unsure, status), where status is of type
1105 dirstate and return a pair of (unsure, status), where status is of type
1106 scmutil.status and:
1106 scmutil.status and:
1107
1107
1108 unsure:
1108 unsure:
1109 files that might have been modified since the dirstate was
1109 files that might have been modified since the dirstate was
1110 written, but need to be read to be sure (size is the same
1110 written, but need to be read to be sure (size is the same
1111 but mtime differs)
1111 but mtime differs)
1112 status.modified:
1112 status.modified:
1113 files that have definitely been modified since the dirstate
1113 files that have definitely been modified since the dirstate
1114 was written (different size or mode)
1114 was written (different size or mode)
1115 status.clean:
1115 status.clean:
1116 files that have definitely not been modified since the
1116 files that have definitely not been modified since the
1117 dirstate was written
1117 dirstate was written
1118 '''
1118 '''
1119 listignored, listclean, listunknown = ignored, clean, unknown
1119 listignored, listclean, listunknown = ignored, clean, unknown
1120 lookup, modified, added, unknown, ignored = [], [], [], [], []
1120 lookup, modified, added, unknown, ignored = [], [], [], [], []
1121 removed, deleted, clean = [], [], []
1121 removed, deleted, clean = [], [], []
1122
1122
1123 dmap = self._map
1123 dmap = self._map
1124 ladd = lookup.append # aka "unsure"
1124 ladd = lookup.append # aka "unsure"
1125 madd = modified.append
1125 madd = modified.append
1126 aadd = added.append
1126 aadd = added.append
1127 uadd = unknown.append
1127 uadd = unknown.append
1128 iadd = ignored.append
1128 iadd = ignored.append
1129 radd = removed.append
1129 radd = removed.append
1130 dadd = deleted.append
1130 dadd = deleted.append
1131 cadd = clean.append
1131 cadd = clean.append
1132 mexact = match.exact
1132 mexact = match.exact
1133 dirignore = self._dirignore
1133 dirignore = self._dirignore
1134 checkexec = self._checkexec
1134 checkexec = self._checkexec
1135 copymap = self._copymap
1135 copymap = self._copymap
1136 lastnormaltime = self._lastnormaltime
1136 lastnormaltime = self._lastnormaltime
1137
1137
1138 # We need to do full walks when either
1138 # We need to do full walks when either
1139 # - we're listing all clean files, or
1139 # - we're listing all clean files, or
1140 # - match.traversedir does something, because match.traversedir should
1140 # - match.traversedir does something, because match.traversedir should
1141 # be called for every dir in the working dir
1141 # be called for every dir in the working dir
1142 full = listclean or match.traversedir is not None
1142 full = listclean or match.traversedir is not None
1143 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1143 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1144 full=full).iteritems():
1144 full=full).iteritems():
1145 if fn not in dmap:
1145 if fn not in dmap:
1146 if (listignored or mexact(fn)) and dirignore(fn):
1146 if (listignored or mexact(fn)) and dirignore(fn):
1147 if listignored:
1147 if listignored:
1148 iadd(fn)
1148 iadd(fn)
1149 else:
1149 else:
1150 uadd(fn)
1150 uadd(fn)
1151 continue
1151 continue
1152
1152
1153 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1153 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1154 # written like that for performance reasons. dmap[fn] is not a
1154 # written like that for performance reasons. dmap[fn] is not a
1155 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1155 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1156 # opcode has fast paths when the value to be unpacked is a tuple or
1156 # opcode has fast paths when the value to be unpacked is a tuple or
1157 # a list, but falls back to creating a full-fledged iterator in
1157 # a list, but falls back to creating a full-fledged iterator in
1158 # general. That is much slower than simply accessing and storing the
1158 # general. That is much slower than simply accessing and storing the
1159 # tuple members one by one.
1159 # tuple members one by one.
1160 t = dmap[fn]
1160 t = dmap[fn]
1161 state = t[0]
1161 state = t[0]
1162 mode = t[1]
1162 mode = t[1]
1163 size = t[2]
1163 size = t[2]
1164 time = t[3]
1164 time = t[3]
1165
1165
1166 if not st and state in "nma":
1166 if not st and state in "nma":
1167 dadd(fn)
1167 dadd(fn)
1168 elif state == 'n':
1168 elif state == 'n':
1169 if (size >= 0 and
1169 if (size >= 0 and
1170 ((size != st.st_size and size != st.st_size & _rangemask)
1170 ((size != st.st_size and size != st.st_size & _rangemask)
1171 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1171 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1172 or size == -2 # other parent
1172 or size == -2 # other parent
1173 or fn in copymap):
1173 or fn in copymap):
1174 madd(fn)
1174 madd(fn)
1175 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1175 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1176 ladd(fn)
1176 ladd(fn)
1177 elif st.st_mtime == lastnormaltime:
1177 elif st.st_mtime == lastnormaltime:
1178 # fn may have just been marked as normal and it may have
1178 # fn may have just been marked as normal and it may have
1179 # changed in the same second without changing its size.
1179 # changed in the same second without changing its size.
1180 # This can happen if we quickly do multiple commits.
1180 # This can happen if we quickly do multiple commits.
1181 # Force lookup, so we don't miss such a racy file change.
1181 # Force lookup, so we don't miss such a racy file change.
1182 ladd(fn)
1182 ladd(fn)
1183 elif listclean:
1183 elif listclean:
1184 cadd(fn)
1184 cadd(fn)
1185 elif state == 'm':
1185 elif state == 'm':
1186 madd(fn)
1186 madd(fn)
1187 elif state == 'a':
1187 elif state == 'a':
1188 aadd(fn)
1188 aadd(fn)
1189 elif state == 'r':
1189 elif state == 'r':
1190 radd(fn)
1190 radd(fn)
1191
1191
1192 return (lookup, scmutil.status(modified, added, removed, deleted,
1192 return (lookup, scmutil.status(modified, added, removed, deleted,
1193 unknown, ignored, clean))
1193 unknown, ignored, clean))
1194
1194
1195 def matches(self, match):
1195 def matches(self, match):
1196 '''
1196 '''
1197 return files in the dirstate (in whatever state) filtered by match
1197 return files in the dirstate (in whatever state) filtered by match
1198 '''
1198 '''
1199 dmap = self._map
1199 dmap = self._map
1200 if match.always():
1200 if match.always():
1201 return dmap.keys()
1201 return dmap.keys()
1202 files = match.files()
1202 files = match.files()
1203 if match.isexact():
1203 if match.isexact():
1204 # fast path -- filter the other way around, since typically files is
1204 # fast path -- filter the other way around, since typically files is
1205 # much smaller than dmap
1205 # much smaller than dmap
1206 return [f for f in files if f in dmap]
1206 return [f for f in files if f in dmap]
1207 if match.prefix() and all(fn in dmap for fn in files):
1207 if match.prefix() and all(fn in dmap for fn in files):
1208 # fast path -- all the values are known to be files, so just return
1208 # fast path -- all the values are known to be files, so just return
1209 # that
1209 # that
1210 return list(files)
1210 return list(files)
1211 return [f for f in dmap if match(f)]
1211 return [f for f in dmap if match(f)]
1212
1212
1213 def _actualfilename(self, tr):
1213 def _actualfilename(self, tr):
1214 if tr:
1214 if tr:
1215 return self._pendingfilename
1215 return self._pendingfilename
1216 else:
1216 else:
1217 return self._filename
1217 return self._filename
1218
1218
1219 def savebackup(self, tr, suffix='', prefix=''):
1219 def savebackup(self, tr, suffix='', prefix=''):
1220 '''Save current dirstate into backup file with suffix'''
1220 '''Save current dirstate into backup file with suffix'''
1221 assert len(suffix) > 0 or len(prefix) > 0
1221 assert len(suffix) > 0 or len(prefix) > 0
1222 filename = self._actualfilename(tr)
1222 filename = self._actualfilename(tr)
1223
1223
1224 # use '_writedirstate' instead of 'write' to write changes certainly,
1224 # use '_writedirstate' instead of 'write' to write changes certainly,
1225 # because the latter omits writing out if transaction is running.
1225 # because the latter omits writing out if transaction is running.
1226 # output file will be used to create backup of dirstate at this point.
1226 # output file will be used to create backup of dirstate at this point.
1227 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1227 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1228 checkambig=True))
1228 checkambig=True))
1229
1229
1230 if tr:
1230 if tr:
1231 # ensure that subsequent tr.writepending returns True for
1231 # ensure that subsequent tr.writepending returns True for
1232 # changes written out above, even if dirstate is never
1232 # changes written out above, even if dirstate is never
1233 # changed after this
1233 # changed after this
1234 tr.addfilegenerator('dirstate', (self._filename,),
1234 tr.addfilegenerator('dirstate', (self._filename,),
1235 self._writedirstate, location='plain')
1235 self._writedirstate, location='plain')
1236
1236
1237 # ensure that pending file written above is unlinked at
1237 # ensure that pending file written above is unlinked at
1238 # failure, even if tr.writepending isn't invoked until the
1238 # failure, even if tr.writepending isn't invoked until the
1239 # end of this transaction
1239 # end of this transaction
1240 tr.registertmp(filename, location='plain')
1240 tr.registertmp(filename, location='plain')
1241
1241
1242 self._opener.write(prefix + self._filename + suffix,
1242 self._opener.write(prefix + self._filename + suffix,
1243 self._opener.tryread(filename))
1243 self._opener.tryread(filename))
1244
1244
1245 def restorebackup(self, tr, suffix='', prefix=''):
1245 def restorebackup(self, tr, suffix='', prefix=''):
1246 '''Restore dirstate by backup file with suffix'''
1246 '''Restore dirstate by backup file with suffix'''
1247 assert len(suffix) > 0 or len(prefix) > 0
1247 assert len(suffix) > 0 or len(prefix) > 0
1248 # this "invalidate()" prevents "wlock.release()" from writing
1248 # this "invalidate()" prevents "wlock.release()" from writing
1249 # changes of dirstate out after restoring from backup file
1249 # changes of dirstate out after restoring from backup file
1250 self.invalidate()
1250 self.invalidate()
1251 filename = self._actualfilename(tr)
1251 filename = self._actualfilename(tr)
1252 # using self._filename to avoid having "pending" in the backup filename
1252 # using self._filename to avoid having "pending" in the backup filename
1253 self._opener.rename(prefix + self._filename + suffix, filename,
1253 self._opener.rename(prefix + self._filename + suffix, filename,
1254 checkambig=True)
1254 checkambig=True)
1255
1255
1256 def clearbackup(self, tr, suffix='', prefix=''):
1256 def clearbackup(self, tr, suffix='', prefix=''):
1257 '''Clear backup file with suffix'''
1257 '''Clear backup file with suffix'''
1258 assert len(suffix) > 0 or len(prefix) > 0
1258 assert len(suffix) > 0 or len(prefix) > 0
1259 # using self._filename to avoid having "pending" in the backup filename
1259 # using self._filename to avoid having "pending" in the backup filename
1260 self._opener.unlink(prefix + self._filename + suffix)
1260 self._opener.unlink(prefix + self._filename + suffix)
@@ -1,2004 +1,2004
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 repoview,
51 repoview,
52 revset,
52 revset,
53 scmutil,
53 scmutil,
54 store,
54 store,
55 subrepo,
55 subrepo,
56 tags as tagsmod,
56 tags as tagsmod,
57 transaction,
57 transaction,
58 util,
58 util,
59 )
59 )
60
60
61 release = lockmod.release
61 release = lockmod.release
62 urlerr = util.urlerr
62 urlerr = util.urlerr
63 urlreq = util.urlreq
63 urlreq = util.urlreq
64
64
65 class repofilecache(scmutil.filecache):
65 class repofilecache(scmutil.filecache):
66 """All filecache usage on repo are done for logic that should be unfiltered
66 """All filecache usage on repo are done for logic that should be unfiltered
67 """
67 """
68
68
69 def __get__(self, repo, type=None):
69 def __get__(self, repo, type=None):
70 if repo is None:
70 if repo is None:
71 return self
71 return self
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
73 def __set__(self, repo, value):
73 def __set__(self, repo, value):
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
75 def __delete__(self, repo):
75 def __delete__(self, repo):
76 return super(repofilecache, self).__delete__(repo.unfiltered())
76 return super(repofilecache, self).__delete__(repo.unfiltered())
77
77
78 class storecache(repofilecache):
78 class storecache(repofilecache):
79 """filecache for files in the store"""
79 """filecache for files in the store"""
80 def join(self, obj, fname):
80 def join(self, obj, fname):
81 return obj.sjoin(fname)
81 return obj.sjoin(fname)
82
82
83 class unfilteredpropertycache(util.propertycache):
83 class unfilteredpropertycache(util.propertycache):
84 """propertycache that apply to unfiltered repo only"""
84 """propertycache that apply to unfiltered repo only"""
85
85
86 def __get__(self, repo, type=None):
86 def __get__(self, repo, type=None):
87 unfi = repo.unfiltered()
87 unfi = repo.unfiltered()
88 if unfi is repo:
88 if unfi is repo:
89 return super(unfilteredpropertycache, self).__get__(unfi)
89 return super(unfilteredpropertycache, self).__get__(unfi)
90 return getattr(unfi, self.name)
90 return getattr(unfi, self.name)
91
91
92 class filteredpropertycache(util.propertycache):
92 class filteredpropertycache(util.propertycache):
93 """propertycache that must take filtering in account"""
93 """propertycache that must take filtering in account"""
94
94
95 def cachevalue(self, obj, value):
95 def cachevalue(self, obj, value):
96 object.__setattr__(obj, self.name, value)
96 object.__setattr__(obj, self.name, value)
97
97
98
98
99 def hasunfilteredcache(repo, name):
99 def hasunfilteredcache(repo, name):
100 """check if a repo has an unfilteredpropertycache value for <name>"""
100 """check if a repo has an unfilteredpropertycache value for <name>"""
101 return name in vars(repo.unfiltered())
101 return name in vars(repo.unfiltered())
102
102
103 def unfilteredmethod(orig):
103 def unfilteredmethod(orig):
104 """decorate method that always need to be run on unfiltered version"""
104 """decorate method that always need to be run on unfiltered version"""
105 def wrapper(repo, *args, **kwargs):
105 def wrapper(repo, *args, **kwargs):
106 return orig(repo.unfiltered(), *args, **kwargs)
106 return orig(repo.unfiltered(), *args, **kwargs)
107 return wrapper
107 return wrapper
108
108
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
110 'unbundle'))
110 'unbundle'))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 legacycaps = moderncaps.union(set(['changegroupsubset']))
112
112
113 class localpeer(peer.peerrepository):
113 class localpeer(peer.peerrepository):
114 '''peer for a local repo; reflects only the most recent API'''
114 '''peer for a local repo; reflects only the most recent API'''
115
115
116 def __init__(self, repo, caps=moderncaps):
116 def __init__(self, repo, caps=moderncaps):
117 peer.peerrepository.__init__(self)
117 peer.peerrepository.__init__(self)
118 self._repo = repo.filtered('served')
118 self._repo = repo.filtered('served')
119 self.ui = repo.ui
119 self.ui = repo.ui
120 self._caps = repo._restrictcapabilities(caps)
120 self._caps = repo._restrictcapabilities(caps)
121 self.requirements = repo.requirements
121 self.requirements = repo.requirements
122 self.supportedformats = repo.supportedformats
122 self.supportedformats = repo.supportedformats
123
123
124 def close(self):
124 def close(self):
125 self._repo.close()
125 self._repo.close()
126
126
127 def _capabilities(self):
127 def _capabilities(self):
128 return self._caps
128 return self._caps
129
129
130 def local(self):
130 def local(self):
131 return self._repo
131 return self._repo
132
132
133 def canpush(self):
133 def canpush(self):
134 return True
134 return True
135
135
136 def url(self):
136 def url(self):
137 return self._repo.url()
137 return self._repo.url()
138
138
139 def lookup(self, key):
139 def lookup(self, key):
140 return self._repo.lookup(key)
140 return self._repo.lookup(key)
141
141
142 def branchmap(self):
142 def branchmap(self):
143 return self._repo.branchmap()
143 return self._repo.branchmap()
144
144
145 def heads(self):
145 def heads(self):
146 return self._repo.heads()
146 return self._repo.heads()
147
147
148 def known(self, nodes):
148 def known(self, nodes):
149 return self._repo.known(nodes)
149 return self._repo.known(nodes)
150
150
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
152 **kwargs):
152 **kwargs):
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
154 common=common, bundlecaps=bundlecaps,
154 common=common, bundlecaps=bundlecaps,
155 **kwargs)
155 **kwargs)
156 cb = util.chunkbuffer(chunks)
156 cb = util.chunkbuffer(chunks)
157
157
158 if bundlecaps is not None and 'HG20' in bundlecaps:
158 if bundlecaps is not None and 'HG20' in bundlecaps:
159 # When requesting a bundle2, getbundle returns a stream to make the
159 # When requesting a bundle2, getbundle returns a stream to make the
160 # wire level function happier. We need to build a proper object
160 # wire level function happier. We need to build a proper object
161 # from it in local peer.
161 # from it in local peer.
162 return bundle2.getunbundler(self.ui, cb)
162 return bundle2.getunbundler(self.ui, cb)
163 else:
163 else:
164 return changegroup.getunbundler('01', cb, None)
164 return changegroup.getunbundler('01', cb, None)
165
165
166 # TODO We might want to move the next two calls into legacypeer and add
166 # TODO We might want to move the next two calls into legacypeer and add
167 # unbundle instead.
167 # unbundle instead.
168
168
169 def unbundle(self, cg, heads, url):
169 def unbundle(self, cg, heads, url):
170 """apply a bundle on a repo
170 """apply a bundle on a repo
171
171
172 This function handles the repo locking itself."""
172 This function handles the repo locking itself."""
173 try:
173 try:
174 try:
174 try:
175 cg = exchange.readbundle(self.ui, cg, None)
175 cg = exchange.readbundle(self.ui, cg, None)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
177 if util.safehasattr(ret, 'getchunks'):
177 if util.safehasattr(ret, 'getchunks'):
178 # This is a bundle20 object, turn it into an unbundler.
178 # This is a bundle20 object, turn it into an unbundler.
179 # This little dance should be dropped eventually when the
179 # This little dance should be dropped eventually when the
180 # API is finally improved.
180 # API is finally improved.
181 stream = util.chunkbuffer(ret.getchunks())
181 stream = util.chunkbuffer(ret.getchunks())
182 ret = bundle2.getunbundler(self.ui, stream)
182 ret = bundle2.getunbundler(self.ui, stream)
183 return ret
183 return ret
184 except Exception as exc:
184 except Exception as exc:
185 # If the exception contains output salvaged from a bundle2
185 # If the exception contains output salvaged from a bundle2
186 # reply, we need to make sure it is printed before continuing
186 # reply, we need to make sure it is printed before continuing
187 # to fail. So we build a bundle2 with such output and consume
187 # to fail. So we build a bundle2 with such output and consume
188 # it directly.
188 # it directly.
189 #
189 #
190 # This is not very elegant but allows a "simple" solution for
190 # This is not very elegant but allows a "simple" solution for
191 # issue4594
191 # issue4594
192 output = getattr(exc, '_bundle2salvagedoutput', ())
192 output = getattr(exc, '_bundle2salvagedoutput', ())
193 if output:
193 if output:
194 bundler = bundle2.bundle20(self._repo.ui)
194 bundler = bundle2.bundle20(self._repo.ui)
195 for out in output:
195 for out in output:
196 bundler.addpart(out)
196 bundler.addpart(out)
197 stream = util.chunkbuffer(bundler.getchunks())
197 stream = util.chunkbuffer(bundler.getchunks())
198 b = bundle2.getunbundler(self.ui, stream)
198 b = bundle2.getunbundler(self.ui, stream)
199 bundle2.processbundle(self._repo, b)
199 bundle2.processbundle(self._repo, b)
200 raise
200 raise
201 except error.PushRaced as exc:
201 except error.PushRaced as exc:
202 raise error.ResponseError(_('push failed:'), str(exc))
202 raise error.ResponseError(_('push failed:'), str(exc))
203
203
204 def lock(self):
204 def lock(self):
205 return self._repo.lock()
205 return self._repo.lock()
206
206
207 def addchangegroup(self, cg, source, url):
207 def addchangegroup(self, cg, source, url):
208 return cg.apply(self._repo, source, url)
208 return cg.apply(self._repo, source, url)
209
209
210 def pushkey(self, namespace, key, old, new):
210 def pushkey(self, namespace, key, old, new):
211 return self._repo.pushkey(namespace, key, old, new)
211 return self._repo.pushkey(namespace, key, old, new)
212
212
213 def listkeys(self, namespace):
213 def listkeys(self, namespace):
214 return self._repo.listkeys(namespace)
214 return self._repo.listkeys(namespace)
215
215
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 def debugwireargs(self, one, two, three=None, four=None, five=None):
217 '''used to test argument passing over the wire'''
217 '''used to test argument passing over the wire'''
218 return "%s %s %s %s %s" % (one, two, three, four, five)
218 return "%s %s %s %s %s" % (one, two, three, four, five)
219
219
220 class locallegacypeer(localpeer):
220 class locallegacypeer(localpeer):
221 '''peer extension which implements legacy methods too; used for tests with
221 '''peer extension which implements legacy methods too; used for tests with
222 restricted capabilities'''
222 restricted capabilities'''
223
223
224 def __init__(self, repo):
224 def __init__(self, repo):
225 localpeer.__init__(self, repo, caps=legacycaps)
225 localpeer.__init__(self, repo, caps=legacycaps)
226
226
227 def branches(self, nodes):
227 def branches(self, nodes):
228 return self._repo.branches(nodes)
228 return self._repo.branches(nodes)
229
229
230 def between(self, pairs):
230 def between(self, pairs):
231 return self._repo.between(pairs)
231 return self._repo.between(pairs)
232
232
233 def changegroup(self, basenodes, source):
233 def changegroup(self, basenodes, source):
234 return changegroup.changegroup(self._repo, basenodes, source)
234 return changegroup.changegroup(self._repo, basenodes, source)
235
235
236 def changegroupsubset(self, bases, heads, source):
236 def changegroupsubset(self, bases, heads, source):
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
238
238
239 class localrepository(object):
239 class localrepository(object):
240
240
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
242 'manifestv2'))
242 'manifestv2'))
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
244 'dotencode'))
244 'dotencode'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
246 filtername = None
246 filtername = None
247
247
248 # a list of (ui, featureset) functions.
248 # a list of (ui, featureset) functions.
249 # only functions defined in module of enabled extensions are invoked
249 # only functions defined in module of enabled extensions are invoked
250 featuresetupfuncs = set()
250 featuresetupfuncs = set()
251
251
252 def __init__(self, baseui, path, create=False):
252 def __init__(self, baseui, path, create=False):
253 self.requirements = set()
253 self.requirements = set()
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
255 self.wopener = self.wvfs
255 self.wopener = self.wvfs
256 self.root = self.wvfs.base
256 self.root = self.wvfs.base
257 self.path = self.wvfs.join(".hg")
257 self.path = self.wvfs.join(".hg")
258 self.origroot = path
258 self.origroot = path
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
261 realfs=False)
261 realfs=False)
262 self.vfs = scmutil.vfs(self.path)
262 self.vfs = scmutil.vfs(self.path)
263 self.opener = self.vfs
263 self.opener = self.vfs
264 self.baseui = baseui
264 self.baseui = baseui
265 self.ui = baseui.copy()
265 self.ui = baseui.copy()
266 self.ui.copy = baseui.copy # prevent copying repo configuration
266 self.ui.copy = baseui.copy # prevent copying repo configuration
267 # A list of callback to shape the phase if no data were found.
267 # A list of callback to shape the phase if no data were found.
268 # Callback are in the form: func(repo, roots) --> processed root.
268 # Callback are in the form: func(repo, roots) --> processed root.
269 # This list it to be filled by extension during repo setup
269 # This list it to be filled by extension during repo setup
270 self._phasedefaults = []
270 self._phasedefaults = []
271 try:
271 try:
272 self.ui.readconfig(self.join("hgrc"), self.root)
272 self.ui.readconfig(self.join("hgrc"), self.root)
273 extensions.loadall(self.ui)
273 extensions.loadall(self.ui)
274 except IOError:
274 except IOError:
275 pass
275 pass
276
276
277 if self.featuresetupfuncs:
277 if self.featuresetupfuncs:
278 self.supported = set(self._basesupported) # use private copy
278 self.supported = set(self._basesupported) # use private copy
279 extmods = set(m.__name__ for n, m
279 extmods = set(m.__name__ for n, m
280 in extensions.extensions(self.ui))
280 in extensions.extensions(self.ui))
281 for setupfunc in self.featuresetupfuncs:
281 for setupfunc in self.featuresetupfuncs:
282 if setupfunc.__module__ in extmods:
282 if setupfunc.__module__ in extmods:
283 setupfunc(self.ui, self.supported)
283 setupfunc(self.ui, self.supported)
284 else:
284 else:
285 self.supported = self._basesupported
285 self.supported = self._basesupported
286
286
287 if not self.vfs.isdir():
287 if not self.vfs.isdir():
288 if create:
288 if create:
289 self.requirements = newreporequirements(self)
289 self.requirements = newreporequirements(self)
290
290
291 if not self.wvfs.exists():
291 if not self.wvfs.exists():
292 self.wvfs.makedirs()
292 self.wvfs.makedirs()
293 self.vfs.makedir(notindexed=True)
293 self.vfs.makedir(notindexed=True)
294
294
295 if 'store' in self.requirements:
295 if 'store' in self.requirements:
296 self.vfs.mkdir("store")
296 self.vfs.mkdir("store")
297
297
298 # create an invalid changelog
298 # create an invalid changelog
299 self.vfs.append(
299 self.vfs.append(
300 "00changelog.i",
300 "00changelog.i",
301 '\0\0\0\2' # represents revlogv2
301 '\0\0\0\2' # represents revlogv2
302 ' dummy changelog to prevent using the old repo layout'
302 ' dummy changelog to prevent using the old repo layout'
303 )
303 )
304 else:
304 else:
305 raise error.RepoError(_("repository %s not found") % path)
305 raise error.RepoError(_("repository %s not found") % path)
306 elif create:
306 elif create:
307 raise error.RepoError(_("repository %s already exists") % path)
307 raise error.RepoError(_("repository %s already exists") % path)
308 else:
308 else:
309 try:
309 try:
310 self.requirements = scmutil.readrequires(
310 self.requirements = scmutil.readrequires(
311 self.vfs, self.supported)
311 self.vfs, self.supported)
312 except IOError as inst:
312 except IOError as inst:
313 if inst.errno != errno.ENOENT:
313 if inst.errno != errno.ENOENT:
314 raise
314 raise
315
315
316 self.sharedpath = self.path
316 self.sharedpath = self.path
317 try:
317 try:
318 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
319 realpath=True)
319 realpath=True)
320 s = vfs.base
320 s = vfs.base
321 if not vfs.exists():
321 if not vfs.exists():
322 raise error.RepoError(
322 raise error.RepoError(
323 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 _('.hg/sharedpath points to nonexistent directory %s') % s)
324 self.sharedpath = s
324 self.sharedpath = s
325 except IOError as inst:
325 except IOError as inst:
326 if inst.errno != errno.ENOENT:
326 if inst.errno != errno.ENOENT:
327 raise
327 raise
328
328
329 self.store = store.store(
329 self.store = store.store(
330 self.requirements, self.sharedpath, scmutil.vfs)
330 self.requirements, self.sharedpath, scmutil.vfs)
331 self.spath = self.store.path
331 self.spath = self.store.path
332 self.svfs = self.store.vfs
332 self.svfs = self.store.vfs
333 self.sjoin = self.store.join
333 self.sjoin = self.store.join
334 self.vfs.createmode = self.store.createmode
334 self.vfs.createmode = self.store.createmode
335 self._applyopenerreqs()
335 self._applyopenerreqs()
336 if create:
336 if create:
337 self._writerequirements()
337 self._writerequirements()
338
338
339 self._dirstatevalidatewarned = False
339 self._dirstatevalidatewarned = False
340
340
341 self._branchcaches = {}
341 self._branchcaches = {}
342 self._revbranchcache = None
342 self._revbranchcache = None
343 self.filterpats = {}
343 self.filterpats = {}
344 self._datafilters = {}
344 self._datafilters = {}
345 self._transref = self._lockref = self._wlockref = None
345 self._transref = self._lockref = self._wlockref = None
346
346
347 # A cache for various files under .hg/ that tracks file changes,
347 # A cache for various files under .hg/ that tracks file changes,
348 # (used by the filecache decorator)
348 # (used by the filecache decorator)
349 #
349 #
350 # Maps a property name to its util.filecacheentry
350 # Maps a property name to its util.filecacheentry
351 self._filecache = {}
351 self._filecache = {}
352
352
353 # hold sets of revision to be filtered
353 # hold sets of revision to be filtered
354 # should be cleared when something might have changed the filter value:
354 # should be cleared when something might have changed the filter value:
355 # - new changesets,
355 # - new changesets,
356 # - phase change,
356 # - phase change,
357 # - new obsolescence marker,
357 # - new obsolescence marker,
358 # - working directory parent change,
358 # - working directory parent change,
359 # - bookmark changes
359 # - bookmark changes
360 self.filteredrevcache = {}
360 self.filteredrevcache = {}
361
361
362 # generic mapping between names and nodes
362 # generic mapping between names and nodes
363 self.names = namespaces.namespaces()
363 self.names = namespaces.namespaces()
364
364
365 def close(self):
365 def close(self):
366 self._writecaches()
366 self._writecaches()
367
367
368 def _writecaches(self):
368 def _writecaches(self):
369 if self._revbranchcache:
369 if self._revbranchcache:
370 self._revbranchcache.write()
370 self._revbranchcache.write()
371
371
372 def _restrictcapabilities(self, caps):
372 def _restrictcapabilities(self, caps):
373 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 if self.ui.configbool('experimental', 'bundle2-advertise', True):
374 caps = set(caps)
374 caps = set(caps)
375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
376 caps.add('bundle2=' + urlreq.quote(capsblob))
376 caps.add('bundle2=' + urlreq.quote(capsblob))
377 return caps
377 return caps
378
378
379 def _applyopenerreqs(self):
379 def _applyopenerreqs(self):
380 self.svfs.options = dict((r, 1) for r in self.requirements
380 self.svfs.options = dict((r, 1) for r in self.requirements
381 if r in self.openerreqs)
381 if r in self.openerreqs)
382 # experimental config: format.chunkcachesize
382 # experimental config: format.chunkcachesize
383 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
384 if chunkcachesize is not None:
384 if chunkcachesize is not None:
385 self.svfs.options['chunkcachesize'] = chunkcachesize
385 self.svfs.options['chunkcachesize'] = chunkcachesize
386 # experimental config: format.maxchainlen
386 # experimental config: format.maxchainlen
387 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 maxchainlen = self.ui.configint('format', 'maxchainlen')
388 if maxchainlen is not None:
388 if maxchainlen is not None:
389 self.svfs.options['maxchainlen'] = maxchainlen
389 self.svfs.options['maxchainlen'] = maxchainlen
390 # experimental config: format.manifestcachesize
390 # experimental config: format.manifestcachesize
391 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
392 if manifestcachesize is not None:
392 if manifestcachesize is not None:
393 self.svfs.options['manifestcachesize'] = manifestcachesize
393 self.svfs.options['manifestcachesize'] = manifestcachesize
394 # experimental config: format.aggressivemergedeltas
394 # experimental config: format.aggressivemergedeltas
395 aggressivemergedeltas = self.ui.configbool('format',
395 aggressivemergedeltas = self.ui.configbool('format',
396 'aggressivemergedeltas', False)
396 'aggressivemergedeltas', False)
397 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
398 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
399
399
400 def _writerequirements(self):
400 def _writerequirements(self):
401 scmutil.writerequires(self.vfs, self.requirements)
401 scmutil.writerequires(self.vfs, self.requirements)
402
402
403 def _checknested(self, path):
403 def _checknested(self, path):
404 """Determine if path is a legal nested repository."""
404 """Determine if path is a legal nested repository."""
405 if not path.startswith(self.root):
405 if not path.startswith(self.root):
406 return False
406 return False
407 subpath = path[len(self.root) + 1:]
407 subpath = path[len(self.root) + 1:]
408 normsubpath = util.pconvert(subpath)
408 normsubpath = util.pconvert(subpath)
409
409
410 # XXX: Checking against the current working copy is wrong in
410 # XXX: Checking against the current working copy is wrong in
411 # the sense that it can reject things like
411 # the sense that it can reject things like
412 #
412 #
413 # $ hg cat -r 10 sub/x.txt
413 # $ hg cat -r 10 sub/x.txt
414 #
414 #
415 # if sub/ is no longer a subrepository in the working copy
415 # if sub/ is no longer a subrepository in the working copy
416 # parent revision.
416 # parent revision.
417 #
417 #
418 # However, it can of course also allow things that would have
418 # However, it can of course also allow things that would have
419 # been rejected before, such as the above cat command if sub/
419 # been rejected before, such as the above cat command if sub/
420 # is a subrepository now, but was a normal directory before.
420 # is a subrepository now, but was a normal directory before.
421 # The old path auditor would have rejected by mistake since it
421 # The old path auditor would have rejected by mistake since it
422 # panics when it sees sub/.hg/.
422 # panics when it sees sub/.hg/.
423 #
423 #
424 # All in all, checking against the working copy seems sensible
424 # All in all, checking against the working copy seems sensible
425 # since we want to prevent access to nested repositories on
425 # since we want to prevent access to nested repositories on
426 # the filesystem *now*.
426 # the filesystem *now*.
427 ctx = self[None]
427 ctx = self[None]
428 parts = util.splitpath(subpath)
428 parts = util.splitpath(subpath)
429 while parts:
429 while parts:
430 prefix = '/'.join(parts)
430 prefix = '/'.join(parts)
431 if prefix in ctx.substate:
431 if prefix in ctx.substate:
432 if prefix == normsubpath:
432 if prefix == normsubpath:
433 return True
433 return True
434 else:
434 else:
435 sub = ctx.sub(prefix)
435 sub = ctx.sub(prefix)
436 return sub.checknested(subpath[len(prefix) + 1:])
436 return sub.checknested(subpath[len(prefix) + 1:])
437 else:
437 else:
438 parts.pop()
438 parts.pop()
439 return False
439 return False
440
440
441 def peer(self):
441 def peer(self):
442 return localpeer(self) # not cached to avoid reference cycle
442 return localpeer(self) # not cached to avoid reference cycle
443
443
444 def unfiltered(self):
444 def unfiltered(self):
445 """Return unfiltered version of the repository
445 """Return unfiltered version of the repository
446
446
447 Intended to be overwritten by filtered repo."""
447 Intended to be overwritten by filtered repo."""
448 return self
448 return self
449
449
450 def filtered(self, name):
450 def filtered(self, name):
451 """Return a filtered version of a repository"""
451 """Return a filtered version of a repository"""
452 # build a new class with the mixin and the current class
452 # build a new class with the mixin and the current class
453 # (possibly subclass of the repo)
453 # (possibly subclass of the repo)
454 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 class proxycls(repoview.repoview, self.unfiltered().__class__):
455 pass
455 pass
456 return proxycls(self, name)
456 return proxycls(self, name)
457
457
458 @repofilecache('bookmarks', 'bookmarks.current')
458 @repofilecache('bookmarks', 'bookmarks.current')
459 def _bookmarks(self):
459 def _bookmarks(self):
460 return bookmarks.bmstore(self)
460 return bookmarks.bmstore(self)
461
461
462 @property
462 @property
463 def _activebookmark(self):
463 def _activebookmark(self):
464 return self._bookmarks.active
464 return self._bookmarks.active
465
465
466 def bookmarkheads(self, bookmark):
466 def bookmarkheads(self, bookmark):
467 name = bookmark.split('@', 1)[0]
467 name = bookmark.split('@', 1)[0]
468 heads = []
468 heads = []
469 for mark, n in self._bookmarks.iteritems():
469 for mark, n in self._bookmarks.iteritems():
470 if mark.split('@', 1)[0] == name:
470 if mark.split('@', 1)[0] == name:
471 heads.append(n)
471 heads.append(n)
472 return heads
472 return heads
473
473
474 # _phaserevs and _phasesets depend on changelog. what we need is to
474 # _phaserevs and _phasesets depend on changelog. what we need is to
475 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
476 # can't be easily expressed in filecache mechanism.
476 # can't be easily expressed in filecache mechanism.
477 @storecache('phaseroots', '00changelog.i')
477 @storecache('phaseroots', '00changelog.i')
478 def _phasecache(self):
478 def _phasecache(self):
479 return phases.phasecache(self, self._phasedefaults)
479 return phases.phasecache(self, self._phasedefaults)
480
480
481 @storecache('obsstore')
481 @storecache('obsstore')
482 def obsstore(self):
482 def obsstore(self):
483 # read default format for new obsstore.
483 # read default format for new obsstore.
484 # developer config: format.obsstore-version
484 # developer config: format.obsstore-version
485 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 defaultformat = self.ui.configint('format', 'obsstore-version', None)
486 # rely on obsstore class default when possible.
486 # rely on obsstore class default when possible.
487 kwargs = {}
487 kwargs = {}
488 if defaultformat is not None:
488 if defaultformat is not None:
489 kwargs['defaultformat'] = defaultformat
489 kwargs['defaultformat'] = defaultformat
490 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
491 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 store = obsolete.obsstore(self.svfs, readonly=readonly,
492 **kwargs)
492 **kwargs)
493 if store and readonly:
493 if store and readonly:
494 self.ui.warn(
494 self.ui.warn(
495 _('obsolete feature not enabled but %i markers found!\n')
495 _('obsolete feature not enabled but %i markers found!\n')
496 % len(list(store)))
496 % len(list(store)))
497 return store
497 return store
498
498
499 @storecache('00changelog.i')
499 @storecache('00changelog.i')
500 def changelog(self):
500 def changelog(self):
501 c = changelog.changelog(self.svfs)
501 c = changelog.changelog(self.svfs)
502 if 'HG_PENDING' in os.environ:
502 if 'HG_PENDING' in encoding.environ:
503 p = os.environ['HG_PENDING']
503 p = encoding.environ['HG_PENDING']
504 if p.startswith(self.root):
504 if p.startswith(self.root):
505 c.readpending('00changelog.i.a')
505 c.readpending('00changelog.i.a')
506 return c
506 return c
507
507
508 def _constructmanifest(self):
508 def _constructmanifest(self):
509 # This is a temporary function while we migrate from manifest to
509 # This is a temporary function while we migrate from manifest to
510 # manifestlog. It allows bundlerepo and unionrepo to intercept the
510 # manifestlog. It allows bundlerepo and unionrepo to intercept the
511 # manifest creation.
511 # manifest creation.
512 return manifest.manifestrevlog(self.svfs)
512 return manifest.manifestrevlog(self.svfs)
513
513
514 @storecache('00manifest.i')
514 @storecache('00manifest.i')
515 def manifestlog(self):
515 def manifestlog(self):
516 return manifest.manifestlog(self.svfs, self)
516 return manifest.manifestlog(self.svfs, self)
517
517
518 @repofilecache('dirstate')
518 @repofilecache('dirstate')
519 def dirstate(self):
519 def dirstate(self):
520 return dirstate.dirstate(self.vfs, self.ui, self.root,
520 return dirstate.dirstate(self.vfs, self.ui, self.root,
521 self._dirstatevalidate)
521 self._dirstatevalidate)
522
522
523 def _dirstatevalidate(self, node):
523 def _dirstatevalidate(self, node):
524 try:
524 try:
525 self.changelog.rev(node)
525 self.changelog.rev(node)
526 return node
526 return node
527 except error.LookupError:
527 except error.LookupError:
528 if not self._dirstatevalidatewarned:
528 if not self._dirstatevalidatewarned:
529 self._dirstatevalidatewarned = True
529 self._dirstatevalidatewarned = True
530 self.ui.warn(_("warning: ignoring unknown"
530 self.ui.warn(_("warning: ignoring unknown"
531 " working parent %s!\n") % short(node))
531 " working parent %s!\n") % short(node))
532 return nullid
532 return nullid
533
533
534 def __getitem__(self, changeid):
534 def __getitem__(self, changeid):
535 if changeid is None or changeid == wdirrev:
535 if changeid is None or changeid == wdirrev:
536 return context.workingctx(self)
536 return context.workingctx(self)
537 if isinstance(changeid, slice):
537 if isinstance(changeid, slice):
538 return [context.changectx(self, i)
538 return [context.changectx(self, i)
539 for i in xrange(*changeid.indices(len(self)))
539 for i in xrange(*changeid.indices(len(self)))
540 if i not in self.changelog.filteredrevs]
540 if i not in self.changelog.filteredrevs]
541 return context.changectx(self, changeid)
541 return context.changectx(self, changeid)
542
542
543 def __contains__(self, changeid):
543 def __contains__(self, changeid):
544 try:
544 try:
545 self[changeid]
545 self[changeid]
546 return True
546 return True
547 except error.RepoLookupError:
547 except error.RepoLookupError:
548 return False
548 return False
549
549
550 def __nonzero__(self):
550 def __nonzero__(self):
551 return True
551 return True
552
552
553 def __len__(self):
553 def __len__(self):
554 return len(self.changelog)
554 return len(self.changelog)
555
555
556 def __iter__(self):
556 def __iter__(self):
557 return iter(self.changelog)
557 return iter(self.changelog)
558
558
559 def revs(self, expr, *args):
559 def revs(self, expr, *args):
560 '''Find revisions matching a revset.
560 '''Find revisions matching a revset.
561
561
562 The revset is specified as a string ``expr`` that may contain
562 The revset is specified as a string ``expr`` that may contain
563 %-formatting to escape certain types. See ``revset.formatspec``.
563 %-formatting to escape certain types. See ``revset.formatspec``.
564
564
565 Revset aliases from the configuration are not expanded. To expand
565 Revset aliases from the configuration are not expanded. To expand
566 user aliases, consider calling ``scmutil.revrange()``.
566 user aliases, consider calling ``scmutil.revrange()``.
567
567
568 Returns a revset.abstractsmartset, which is a list-like interface
568 Returns a revset.abstractsmartset, which is a list-like interface
569 that contains integer revisions.
569 that contains integer revisions.
570 '''
570 '''
571 expr = revset.formatspec(expr, *args)
571 expr = revset.formatspec(expr, *args)
572 m = revset.match(None, expr)
572 m = revset.match(None, expr)
573 return m(self)
573 return m(self)
574
574
575 def set(self, expr, *args):
575 def set(self, expr, *args):
576 '''Find revisions matching a revset and emit changectx instances.
576 '''Find revisions matching a revset and emit changectx instances.
577
577
578 This is a convenience wrapper around ``revs()`` that iterates the
578 This is a convenience wrapper around ``revs()`` that iterates the
579 result and is a generator of changectx instances.
579 result and is a generator of changectx instances.
580
580
581 Revset aliases from the configuration are not expanded. To expand
581 Revset aliases from the configuration are not expanded. To expand
582 user aliases, consider calling ``scmutil.revrange()``.
582 user aliases, consider calling ``scmutil.revrange()``.
583 '''
583 '''
584 for r in self.revs(expr, *args):
584 for r in self.revs(expr, *args):
585 yield self[r]
585 yield self[r]
586
586
587 def url(self):
587 def url(self):
588 return 'file:' + self.root
588 return 'file:' + self.root
589
589
590 def hook(self, name, throw=False, **args):
590 def hook(self, name, throw=False, **args):
591 """Call a hook, passing this repo instance.
591 """Call a hook, passing this repo instance.
592
592
593 This a convenience method to aid invoking hooks. Extensions likely
593 This a convenience method to aid invoking hooks. Extensions likely
594 won't call this unless they have registered a custom hook or are
594 won't call this unless they have registered a custom hook or are
595 replacing code that is expected to call a hook.
595 replacing code that is expected to call a hook.
596 """
596 """
597 return hook.hook(self.ui, self, name, throw, **args)
597 return hook.hook(self.ui, self, name, throw, **args)
598
598
599 @unfilteredmethod
599 @unfilteredmethod
600 def _tag(self, names, node, message, local, user, date, extra=None,
600 def _tag(self, names, node, message, local, user, date, extra=None,
601 editor=False):
601 editor=False):
602 if isinstance(names, str):
602 if isinstance(names, str):
603 names = (names,)
603 names = (names,)
604
604
605 branches = self.branchmap()
605 branches = self.branchmap()
606 for name in names:
606 for name in names:
607 self.hook('pretag', throw=True, node=hex(node), tag=name,
607 self.hook('pretag', throw=True, node=hex(node), tag=name,
608 local=local)
608 local=local)
609 if name in branches:
609 if name in branches:
610 self.ui.warn(_("warning: tag %s conflicts with existing"
610 self.ui.warn(_("warning: tag %s conflicts with existing"
611 " branch name\n") % name)
611 " branch name\n") % name)
612
612
613 def writetags(fp, names, munge, prevtags):
613 def writetags(fp, names, munge, prevtags):
614 fp.seek(0, 2)
614 fp.seek(0, 2)
615 if prevtags and prevtags[-1] != '\n':
615 if prevtags and prevtags[-1] != '\n':
616 fp.write('\n')
616 fp.write('\n')
617 for name in names:
617 for name in names:
618 if munge:
618 if munge:
619 m = munge(name)
619 m = munge(name)
620 else:
620 else:
621 m = name
621 m = name
622
622
623 if (self._tagscache.tagtypes and
623 if (self._tagscache.tagtypes and
624 name in self._tagscache.tagtypes):
624 name in self._tagscache.tagtypes):
625 old = self.tags().get(name, nullid)
625 old = self.tags().get(name, nullid)
626 fp.write('%s %s\n' % (hex(old), m))
626 fp.write('%s %s\n' % (hex(old), m))
627 fp.write('%s %s\n' % (hex(node), m))
627 fp.write('%s %s\n' % (hex(node), m))
628 fp.close()
628 fp.close()
629
629
630 prevtags = ''
630 prevtags = ''
631 if local:
631 if local:
632 try:
632 try:
633 fp = self.vfs('localtags', 'r+')
633 fp = self.vfs('localtags', 'r+')
634 except IOError:
634 except IOError:
635 fp = self.vfs('localtags', 'a')
635 fp = self.vfs('localtags', 'a')
636 else:
636 else:
637 prevtags = fp.read()
637 prevtags = fp.read()
638
638
639 # local tags are stored in the current charset
639 # local tags are stored in the current charset
640 writetags(fp, names, None, prevtags)
640 writetags(fp, names, None, prevtags)
641 for name in names:
641 for name in names:
642 self.hook('tag', node=hex(node), tag=name, local=local)
642 self.hook('tag', node=hex(node), tag=name, local=local)
643 return
643 return
644
644
645 try:
645 try:
646 fp = self.wfile('.hgtags', 'rb+')
646 fp = self.wfile('.hgtags', 'rb+')
647 except IOError as e:
647 except IOError as e:
648 if e.errno != errno.ENOENT:
648 if e.errno != errno.ENOENT:
649 raise
649 raise
650 fp = self.wfile('.hgtags', 'ab')
650 fp = self.wfile('.hgtags', 'ab')
651 else:
651 else:
652 prevtags = fp.read()
652 prevtags = fp.read()
653
653
654 # committed tags are stored in UTF-8
654 # committed tags are stored in UTF-8
655 writetags(fp, names, encoding.fromlocal, prevtags)
655 writetags(fp, names, encoding.fromlocal, prevtags)
656
656
657 fp.close()
657 fp.close()
658
658
659 self.invalidatecaches()
659 self.invalidatecaches()
660
660
661 if '.hgtags' not in self.dirstate:
661 if '.hgtags' not in self.dirstate:
662 self[None].add(['.hgtags'])
662 self[None].add(['.hgtags'])
663
663
664 m = matchmod.exact(self.root, '', ['.hgtags'])
664 m = matchmod.exact(self.root, '', ['.hgtags'])
665 tagnode = self.commit(message, user, date, extra=extra, match=m,
665 tagnode = self.commit(message, user, date, extra=extra, match=m,
666 editor=editor)
666 editor=editor)
667
667
668 for name in names:
668 for name in names:
669 self.hook('tag', node=hex(node), tag=name, local=local)
669 self.hook('tag', node=hex(node), tag=name, local=local)
670
670
671 return tagnode
671 return tagnode
672
672
673 def tag(self, names, node, message, local, user, date, editor=False):
673 def tag(self, names, node, message, local, user, date, editor=False):
674 '''tag a revision with one or more symbolic names.
674 '''tag a revision with one or more symbolic names.
675
675
676 names is a list of strings or, when adding a single tag, names may be a
676 names is a list of strings or, when adding a single tag, names may be a
677 string.
677 string.
678
678
679 if local is True, the tags are stored in a per-repository file.
679 if local is True, the tags are stored in a per-repository file.
680 otherwise, they are stored in the .hgtags file, and a new
680 otherwise, they are stored in the .hgtags file, and a new
681 changeset is committed with the change.
681 changeset is committed with the change.
682
682
683 keyword arguments:
683 keyword arguments:
684
684
685 local: whether to store tags in non-version-controlled file
685 local: whether to store tags in non-version-controlled file
686 (default False)
686 (default False)
687
687
688 message: commit message to use if committing
688 message: commit message to use if committing
689
689
690 user: name of user to use if committing
690 user: name of user to use if committing
691
691
692 date: date tuple to use if committing'''
692 date: date tuple to use if committing'''
693
693
694 if not local:
694 if not local:
695 m = matchmod.exact(self.root, '', ['.hgtags'])
695 m = matchmod.exact(self.root, '', ['.hgtags'])
696 if any(self.status(match=m, unknown=True, ignored=True)):
696 if any(self.status(match=m, unknown=True, ignored=True)):
697 raise error.Abort(_('working copy of .hgtags is changed'),
697 raise error.Abort(_('working copy of .hgtags is changed'),
698 hint=_('please commit .hgtags manually'))
698 hint=_('please commit .hgtags manually'))
699
699
700 self.tags() # instantiate the cache
700 self.tags() # instantiate the cache
701 self._tag(names, node, message, local, user, date, editor=editor)
701 self._tag(names, node, message, local, user, date, editor=editor)
702
702
703 @filteredpropertycache
703 @filteredpropertycache
704 def _tagscache(self):
704 def _tagscache(self):
705 '''Returns a tagscache object that contains various tags related
705 '''Returns a tagscache object that contains various tags related
706 caches.'''
706 caches.'''
707
707
708 # This simplifies its cache management by having one decorated
708 # This simplifies its cache management by having one decorated
709 # function (this one) and the rest simply fetch things from it.
709 # function (this one) and the rest simply fetch things from it.
710 class tagscache(object):
710 class tagscache(object):
711 def __init__(self):
711 def __init__(self):
712 # These two define the set of tags for this repository. tags
712 # These two define the set of tags for this repository. tags
713 # maps tag name to node; tagtypes maps tag name to 'global' or
713 # maps tag name to node; tagtypes maps tag name to 'global' or
714 # 'local'. (Global tags are defined by .hgtags across all
714 # 'local'. (Global tags are defined by .hgtags across all
715 # heads, and local tags are defined in .hg/localtags.)
715 # heads, and local tags are defined in .hg/localtags.)
716 # They constitute the in-memory cache of tags.
716 # They constitute the in-memory cache of tags.
717 self.tags = self.tagtypes = None
717 self.tags = self.tagtypes = None
718
718
719 self.nodetagscache = self.tagslist = None
719 self.nodetagscache = self.tagslist = None
720
720
721 cache = tagscache()
721 cache = tagscache()
722 cache.tags, cache.tagtypes = self._findtags()
722 cache.tags, cache.tagtypes = self._findtags()
723
723
724 return cache
724 return cache
725
725
726 def tags(self):
726 def tags(self):
727 '''return a mapping of tag to node'''
727 '''return a mapping of tag to node'''
728 t = {}
728 t = {}
729 if self.changelog.filteredrevs:
729 if self.changelog.filteredrevs:
730 tags, tt = self._findtags()
730 tags, tt = self._findtags()
731 else:
731 else:
732 tags = self._tagscache.tags
732 tags = self._tagscache.tags
733 for k, v in tags.iteritems():
733 for k, v in tags.iteritems():
734 try:
734 try:
735 # ignore tags to unknown nodes
735 # ignore tags to unknown nodes
736 self.changelog.rev(v)
736 self.changelog.rev(v)
737 t[k] = v
737 t[k] = v
738 except (error.LookupError, ValueError):
738 except (error.LookupError, ValueError):
739 pass
739 pass
740 return t
740 return t
741
741
742 def _findtags(self):
742 def _findtags(self):
743 '''Do the hard work of finding tags. Return a pair of dicts
743 '''Do the hard work of finding tags. Return a pair of dicts
744 (tags, tagtypes) where tags maps tag name to node, and tagtypes
744 (tags, tagtypes) where tags maps tag name to node, and tagtypes
745 maps tag name to a string like \'global\' or \'local\'.
745 maps tag name to a string like \'global\' or \'local\'.
746 Subclasses or extensions are free to add their own tags, but
746 Subclasses or extensions are free to add their own tags, but
747 should be aware that the returned dicts will be retained for the
747 should be aware that the returned dicts will be retained for the
748 duration of the localrepo object.'''
748 duration of the localrepo object.'''
749
749
750 # XXX what tagtype should subclasses/extensions use? Currently
750 # XXX what tagtype should subclasses/extensions use? Currently
751 # mq and bookmarks add tags, but do not set the tagtype at all.
751 # mq and bookmarks add tags, but do not set the tagtype at all.
752 # Should each extension invent its own tag type? Should there
752 # Should each extension invent its own tag type? Should there
753 # be one tagtype for all such "virtual" tags? Or is the status
753 # be one tagtype for all such "virtual" tags? Or is the status
754 # quo fine?
754 # quo fine?
755
755
756 alltags = {} # map tag name to (node, hist)
756 alltags = {} # map tag name to (node, hist)
757 tagtypes = {}
757 tagtypes = {}
758
758
759 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
759 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
760 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
760 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
761
761
762 # Build the return dicts. Have to re-encode tag names because
762 # Build the return dicts. Have to re-encode tag names because
763 # the tags module always uses UTF-8 (in order not to lose info
763 # the tags module always uses UTF-8 (in order not to lose info
764 # writing to the cache), but the rest of Mercurial wants them in
764 # writing to the cache), but the rest of Mercurial wants them in
765 # local encoding.
765 # local encoding.
766 tags = {}
766 tags = {}
767 for (name, (node, hist)) in alltags.iteritems():
767 for (name, (node, hist)) in alltags.iteritems():
768 if node != nullid:
768 if node != nullid:
769 tags[encoding.tolocal(name)] = node
769 tags[encoding.tolocal(name)] = node
770 tags['tip'] = self.changelog.tip()
770 tags['tip'] = self.changelog.tip()
771 tagtypes = dict([(encoding.tolocal(name), value)
771 tagtypes = dict([(encoding.tolocal(name), value)
772 for (name, value) in tagtypes.iteritems()])
772 for (name, value) in tagtypes.iteritems()])
773 return (tags, tagtypes)
773 return (tags, tagtypes)
774
774
775 def tagtype(self, tagname):
775 def tagtype(self, tagname):
776 '''
776 '''
777 return the type of the given tag. result can be:
777 return the type of the given tag. result can be:
778
778
779 'local' : a local tag
779 'local' : a local tag
780 'global' : a global tag
780 'global' : a global tag
781 None : tag does not exist
781 None : tag does not exist
782 '''
782 '''
783
783
784 return self._tagscache.tagtypes.get(tagname)
784 return self._tagscache.tagtypes.get(tagname)
785
785
786 def tagslist(self):
786 def tagslist(self):
787 '''return a list of tags ordered by revision'''
787 '''return a list of tags ordered by revision'''
788 if not self._tagscache.tagslist:
788 if not self._tagscache.tagslist:
789 l = []
789 l = []
790 for t, n in self.tags().iteritems():
790 for t, n in self.tags().iteritems():
791 l.append((self.changelog.rev(n), t, n))
791 l.append((self.changelog.rev(n), t, n))
792 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
792 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
793
793
794 return self._tagscache.tagslist
794 return self._tagscache.tagslist
795
795
796 def nodetags(self, node):
796 def nodetags(self, node):
797 '''return the tags associated with a node'''
797 '''return the tags associated with a node'''
798 if not self._tagscache.nodetagscache:
798 if not self._tagscache.nodetagscache:
799 nodetagscache = {}
799 nodetagscache = {}
800 for t, n in self._tagscache.tags.iteritems():
800 for t, n in self._tagscache.tags.iteritems():
801 nodetagscache.setdefault(n, []).append(t)
801 nodetagscache.setdefault(n, []).append(t)
802 for tags in nodetagscache.itervalues():
802 for tags in nodetagscache.itervalues():
803 tags.sort()
803 tags.sort()
804 self._tagscache.nodetagscache = nodetagscache
804 self._tagscache.nodetagscache = nodetagscache
805 return self._tagscache.nodetagscache.get(node, [])
805 return self._tagscache.nodetagscache.get(node, [])
806
806
807 def nodebookmarks(self, node):
807 def nodebookmarks(self, node):
808 """return the list of bookmarks pointing to the specified node"""
808 """return the list of bookmarks pointing to the specified node"""
809 marks = []
809 marks = []
810 for bookmark, n in self._bookmarks.iteritems():
810 for bookmark, n in self._bookmarks.iteritems():
811 if n == node:
811 if n == node:
812 marks.append(bookmark)
812 marks.append(bookmark)
813 return sorted(marks)
813 return sorted(marks)
814
814
815 def branchmap(self):
815 def branchmap(self):
816 '''returns a dictionary {branch: [branchheads]} with branchheads
816 '''returns a dictionary {branch: [branchheads]} with branchheads
817 ordered by increasing revision number'''
817 ordered by increasing revision number'''
818 branchmap.updatecache(self)
818 branchmap.updatecache(self)
819 return self._branchcaches[self.filtername]
819 return self._branchcaches[self.filtername]
820
820
821 @unfilteredmethod
821 @unfilteredmethod
822 def revbranchcache(self):
822 def revbranchcache(self):
823 if not self._revbranchcache:
823 if not self._revbranchcache:
824 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
824 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
825 return self._revbranchcache
825 return self._revbranchcache
826
826
827 def branchtip(self, branch, ignoremissing=False):
827 def branchtip(self, branch, ignoremissing=False):
828 '''return the tip node for a given branch
828 '''return the tip node for a given branch
829
829
830 If ignoremissing is True, then this method will not raise an error.
830 If ignoremissing is True, then this method will not raise an error.
831 This is helpful for callers that only expect None for a missing branch
831 This is helpful for callers that only expect None for a missing branch
832 (e.g. namespace).
832 (e.g. namespace).
833
833
834 '''
834 '''
835 try:
835 try:
836 return self.branchmap().branchtip(branch)
836 return self.branchmap().branchtip(branch)
837 except KeyError:
837 except KeyError:
838 if not ignoremissing:
838 if not ignoremissing:
839 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
839 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
840 else:
840 else:
841 pass
841 pass
842
842
843 def lookup(self, key):
843 def lookup(self, key):
844 return self[key].node()
844 return self[key].node()
845
845
846 def lookupbranch(self, key, remote=None):
846 def lookupbranch(self, key, remote=None):
847 repo = remote or self
847 repo = remote or self
848 if key in repo.branchmap():
848 if key in repo.branchmap():
849 return key
849 return key
850
850
851 repo = (remote and remote.local()) and remote or self
851 repo = (remote and remote.local()) and remote or self
852 return repo[key].branch()
852 return repo[key].branch()
853
853
854 def known(self, nodes):
854 def known(self, nodes):
855 cl = self.changelog
855 cl = self.changelog
856 nm = cl.nodemap
856 nm = cl.nodemap
857 filtered = cl.filteredrevs
857 filtered = cl.filteredrevs
858 result = []
858 result = []
859 for n in nodes:
859 for n in nodes:
860 r = nm.get(n)
860 r = nm.get(n)
861 resp = not (r is None or r in filtered)
861 resp = not (r is None or r in filtered)
862 result.append(resp)
862 result.append(resp)
863 return result
863 return result
864
864
865 def local(self):
865 def local(self):
866 return self
866 return self
867
867
868 def publishing(self):
868 def publishing(self):
869 # it's safe (and desirable) to trust the publish flag unconditionally
869 # it's safe (and desirable) to trust the publish flag unconditionally
870 # so that we don't finalize changes shared between users via ssh or nfs
870 # so that we don't finalize changes shared between users via ssh or nfs
871 return self.ui.configbool('phases', 'publish', True, untrusted=True)
871 return self.ui.configbool('phases', 'publish', True, untrusted=True)
872
872
873 def cancopy(self):
873 def cancopy(self):
874 # so statichttprepo's override of local() works
874 # so statichttprepo's override of local() works
875 if not self.local():
875 if not self.local():
876 return False
876 return False
877 if not self.publishing():
877 if not self.publishing():
878 return True
878 return True
879 # if publishing we can't copy if there is filtered content
879 # if publishing we can't copy if there is filtered content
880 return not self.filtered('visible').changelog.filteredrevs
880 return not self.filtered('visible').changelog.filteredrevs
881
881
882 def shared(self):
882 def shared(self):
883 '''the type of shared repository (None if not shared)'''
883 '''the type of shared repository (None if not shared)'''
884 if self.sharedpath != self.path:
884 if self.sharedpath != self.path:
885 return 'store'
885 return 'store'
886 return None
886 return None
887
887
888 def join(self, f, *insidef):
888 def join(self, f, *insidef):
889 return self.vfs.join(os.path.join(f, *insidef))
889 return self.vfs.join(os.path.join(f, *insidef))
890
890
891 def wjoin(self, f, *insidef):
891 def wjoin(self, f, *insidef):
892 return self.vfs.reljoin(self.root, f, *insidef)
892 return self.vfs.reljoin(self.root, f, *insidef)
893
893
894 def file(self, f):
894 def file(self, f):
895 if f[0] == '/':
895 if f[0] == '/':
896 f = f[1:]
896 f = f[1:]
897 return filelog.filelog(self.svfs, f)
897 return filelog.filelog(self.svfs, f)
898
898
899 def changectx(self, changeid):
899 def changectx(self, changeid):
900 return self[changeid]
900 return self[changeid]
901
901
902 def setparents(self, p1, p2=nullid):
902 def setparents(self, p1, p2=nullid):
903 self.dirstate.beginparentchange()
903 self.dirstate.beginparentchange()
904 copies = self.dirstate.setparents(p1, p2)
904 copies = self.dirstate.setparents(p1, p2)
905 pctx = self[p1]
905 pctx = self[p1]
906 if copies:
906 if copies:
907 # Adjust copy records, the dirstate cannot do it, it
907 # Adjust copy records, the dirstate cannot do it, it
908 # requires access to parents manifests. Preserve them
908 # requires access to parents manifests. Preserve them
909 # only for entries added to first parent.
909 # only for entries added to first parent.
910 for f in copies:
910 for f in copies:
911 if f not in pctx and copies[f] in pctx:
911 if f not in pctx and copies[f] in pctx:
912 self.dirstate.copy(copies[f], f)
912 self.dirstate.copy(copies[f], f)
913 if p2 == nullid:
913 if p2 == nullid:
914 for f, s in sorted(self.dirstate.copies().items()):
914 for f, s in sorted(self.dirstate.copies().items()):
915 if f not in pctx and s not in pctx:
915 if f not in pctx and s not in pctx:
916 self.dirstate.copy(None, f)
916 self.dirstate.copy(None, f)
917 self.dirstate.endparentchange()
917 self.dirstate.endparentchange()
918
918
919 def filectx(self, path, changeid=None, fileid=None):
919 def filectx(self, path, changeid=None, fileid=None):
920 """changeid can be a changeset revision, node, or tag.
920 """changeid can be a changeset revision, node, or tag.
921 fileid can be a file revision or node."""
921 fileid can be a file revision or node."""
922 return context.filectx(self, path, changeid, fileid)
922 return context.filectx(self, path, changeid, fileid)
923
923
924 def getcwd(self):
924 def getcwd(self):
925 return self.dirstate.getcwd()
925 return self.dirstate.getcwd()
926
926
927 def pathto(self, f, cwd=None):
927 def pathto(self, f, cwd=None):
928 return self.dirstate.pathto(f, cwd)
928 return self.dirstate.pathto(f, cwd)
929
929
930 def wfile(self, f, mode='r'):
930 def wfile(self, f, mode='r'):
931 return self.wvfs(f, mode)
931 return self.wvfs(f, mode)
932
932
933 def _link(self, f):
933 def _link(self, f):
934 return self.wvfs.islink(f)
934 return self.wvfs.islink(f)
935
935
936 def _loadfilter(self, filter):
936 def _loadfilter(self, filter):
937 if filter not in self.filterpats:
937 if filter not in self.filterpats:
938 l = []
938 l = []
939 for pat, cmd in self.ui.configitems(filter):
939 for pat, cmd in self.ui.configitems(filter):
940 if cmd == '!':
940 if cmd == '!':
941 continue
941 continue
942 mf = matchmod.match(self.root, '', [pat])
942 mf = matchmod.match(self.root, '', [pat])
943 fn = None
943 fn = None
944 params = cmd
944 params = cmd
945 for name, filterfn in self._datafilters.iteritems():
945 for name, filterfn in self._datafilters.iteritems():
946 if cmd.startswith(name):
946 if cmd.startswith(name):
947 fn = filterfn
947 fn = filterfn
948 params = cmd[len(name):].lstrip()
948 params = cmd[len(name):].lstrip()
949 break
949 break
950 if not fn:
950 if not fn:
951 fn = lambda s, c, **kwargs: util.filter(s, c)
951 fn = lambda s, c, **kwargs: util.filter(s, c)
952 # Wrap old filters not supporting keyword arguments
952 # Wrap old filters not supporting keyword arguments
953 if not inspect.getargspec(fn)[2]:
953 if not inspect.getargspec(fn)[2]:
954 oldfn = fn
954 oldfn = fn
955 fn = lambda s, c, **kwargs: oldfn(s, c)
955 fn = lambda s, c, **kwargs: oldfn(s, c)
956 l.append((mf, fn, params))
956 l.append((mf, fn, params))
957 self.filterpats[filter] = l
957 self.filterpats[filter] = l
958 return self.filterpats[filter]
958 return self.filterpats[filter]
959
959
960 def _filter(self, filterpats, filename, data):
960 def _filter(self, filterpats, filename, data):
961 for mf, fn, cmd in filterpats:
961 for mf, fn, cmd in filterpats:
962 if mf(filename):
962 if mf(filename):
963 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
963 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
964 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
964 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
965 break
965 break
966
966
967 return data
967 return data
968
968
969 @unfilteredpropertycache
969 @unfilteredpropertycache
970 def _encodefilterpats(self):
970 def _encodefilterpats(self):
971 return self._loadfilter('encode')
971 return self._loadfilter('encode')
972
972
973 @unfilteredpropertycache
973 @unfilteredpropertycache
974 def _decodefilterpats(self):
974 def _decodefilterpats(self):
975 return self._loadfilter('decode')
975 return self._loadfilter('decode')
976
976
977 def adddatafilter(self, name, filter):
977 def adddatafilter(self, name, filter):
978 self._datafilters[name] = filter
978 self._datafilters[name] = filter
979
979
980 def wread(self, filename):
980 def wread(self, filename):
981 if self._link(filename):
981 if self._link(filename):
982 data = self.wvfs.readlink(filename)
982 data = self.wvfs.readlink(filename)
983 else:
983 else:
984 data = self.wvfs.read(filename)
984 data = self.wvfs.read(filename)
985 return self._filter(self._encodefilterpats, filename, data)
985 return self._filter(self._encodefilterpats, filename, data)
986
986
987 def wwrite(self, filename, data, flags, backgroundclose=False):
987 def wwrite(self, filename, data, flags, backgroundclose=False):
988 """write ``data`` into ``filename`` in the working directory
988 """write ``data`` into ``filename`` in the working directory
989
989
990 This returns length of written (maybe decoded) data.
990 This returns length of written (maybe decoded) data.
991 """
991 """
992 data = self._filter(self._decodefilterpats, filename, data)
992 data = self._filter(self._decodefilterpats, filename, data)
993 if 'l' in flags:
993 if 'l' in flags:
994 self.wvfs.symlink(data, filename)
994 self.wvfs.symlink(data, filename)
995 else:
995 else:
996 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
996 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
997 if 'x' in flags:
997 if 'x' in flags:
998 self.wvfs.setflags(filename, False, True)
998 self.wvfs.setflags(filename, False, True)
999 return len(data)
999 return len(data)
1000
1000
1001 def wwritedata(self, filename, data):
1001 def wwritedata(self, filename, data):
1002 return self._filter(self._decodefilterpats, filename, data)
1002 return self._filter(self._decodefilterpats, filename, data)
1003
1003
1004 def currenttransaction(self):
1004 def currenttransaction(self):
1005 """return the current transaction or None if non exists"""
1005 """return the current transaction or None if non exists"""
1006 if self._transref:
1006 if self._transref:
1007 tr = self._transref()
1007 tr = self._transref()
1008 else:
1008 else:
1009 tr = None
1009 tr = None
1010
1010
1011 if tr and tr.running():
1011 if tr and tr.running():
1012 return tr
1012 return tr
1013 return None
1013 return None
1014
1014
1015 def transaction(self, desc, report=None):
1015 def transaction(self, desc, report=None):
1016 if (self.ui.configbool('devel', 'all-warnings')
1016 if (self.ui.configbool('devel', 'all-warnings')
1017 or self.ui.configbool('devel', 'check-locks')):
1017 or self.ui.configbool('devel', 'check-locks')):
1018 if self._currentlock(self._lockref) is None:
1018 if self._currentlock(self._lockref) is None:
1019 raise error.ProgrammingError('transaction requires locking')
1019 raise error.ProgrammingError('transaction requires locking')
1020 tr = self.currenttransaction()
1020 tr = self.currenttransaction()
1021 if tr is not None:
1021 if tr is not None:
1022 return tr.nest()
1022 return tr.nest()
1023
1023
1024 # abort here if the journal already exists
1024 # abort here if the journal already exists
1025 if self.svfs.exists("journal"):
1025 if self.svfs.exists("journal"):
1026 raise error.RepoError(
1026 raise error.RepoError(
1027 _("abandoned transaction found"),
1027 _("abandoned transaction found"),
1028 hint=_("run 'hg recover' to clean up transaction"))
1028 hint=_("run 'hg recover' to clean up transaction"))
1029
1029
1030 idbase = "%.40f#%f" % (random.random(), time.time())
1030 idbase = "%.40f#%f" % (random.random(), time.time())
1031 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1031 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1032 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1032 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1033
1033
1034 self._writejournal(desc)
1034 self._writejournal(desc)
1035 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1035 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1036 if report:
1036 if report:
1037 rp = report
1037 rp = report
1038 else:
1038 else:
1039 rp = self.ui.warn
1039 rp = self.ui.warn
1040 vfsmap = {'plain': self.vfs} # root of .hg/
1040 vfsmap = {'plain': self.vfs} # root of .hg/
1041 # we must avoid cyclic reference between repo and transaction.
1041 # we must avoid cyclic reference between repo and transaction.
1042 reporef = weakref.ref(self)
1042 reporef = weakref.ref(self)
1043 def validate(tr):
1043 def validate(tr):
1044 """will run pre-closing hooks"""
1044 """will run pre-closing hooks"""
1045 reporef().hook('pretxnclose', throw=True,
1045 reporef().hook('pretxnclose', throw=True,
1046 txnname=desc, **tr.hookargs)
1046 txnname=desc, **tr.hookargs)
1047 def releasefn(tr, success):
1047 def releasefn(tr, success):
1048 repo = reporef()
1048 repo = reporef()
1049 if success:
1049 if success:
1050 # this should be explicitly invoked here, because
1050 # this should be explicitly invoked here, because
1051 # in-memory changes aren't written out at closing
1051 # in-memory changes aren't written out at closing
1052 # transaction, if tr.addfilegenerator (via
1052 # transaction, if tr.addfilegenerator (via
1053 # dirstate.write or so) isn't invoked while
1053 # dirstate.write or so) isn't invoked while
1054 # transaction running
1054 # transaction running
1055 repo.dirstate.write(None)
1055 repo.dirstate.write(None)
1056 else:
1056 else:
1057 # discard all changes (including ones already written
1057 # discard all changes (including ones already written
1058 # out) in this transaction
1058 # out) in this transaction
1059 repo.dirstate.restorebackup(None, prefix='journal.')
1059 repo.dirstate.restorebackup(None, prefix='journal.')
1060
1060
1061 repo.invalidate(clearfilecache=True)
1061 repo.invalidate(clearfilecache=True)
1062
1062
1063 tr = transaction.transaction(rp, self.svfs, vfsmap,
1063 tr = transaction.transaction(rp, self.svfs, vfsmap,
1064 "journal",
1064 "journal",
1065 "undo",
1065 "undo",
1066 aftertrans(renames),
1066 aftertrans(renames),
1067 self.store.createmode,
1067 self.store.createmode,
1068 validator=validate,
1068 validator=validate,
1069 releasefn=releasefn)
1069 releasefn=releasefn)
1070
1070
1071 tr.hookargs['txnid'] = txnid
1071 tr.hookargs['txnid'] = txnid
1072 # note: writing the fncache only during finalize mean that the file is
1072 # note: writing the fncache only during finalize mean that the file is
1073 # outdated when running hooks. As fncache is used for streaming clone,
1073 # outdated when running hooks. As fncache is used for streaming clone,
1074 # this is not expected to break anything that happen during the hooks.
1074 # this is not expected to break anything that happen during the hooks.
1075 tr.addfinalize('flush-fncache', self.store.write)
1075 tr.addfinalize('flush-fncache', self.store.write)
1076 def txnclosehook(tr2):
1076 def txnclosehook(tr2):
1077 """To be run if transaction is successful, will schedule a hook run
1077 """To be run if transaction is successful, will schedule a hook run
1078 """
1078 """
1079 # Don't reference tr2 in hook() so we don't hold a reference.
1079 # Don't reference tr2 in hook() so we don't hold a reference.
1080 # This reduces memory consumption when there are multiple
1080 # This reduces memory consumption when there are multiple
1081 # transactions per lock. This can likely go away if issue5045
1081 # transactions per lock. This can likely go away if issue5045
1082 # fixes the function accumulation.
1082 # fixes the function accumulation.
1083 hookargs = tr2.hookargs
1083 hookargs = tr2.hookargs
1084
1084
1085 def hook():
1085 def hook():
1086 reporef().hook('txnclose', throw=False, txnname=desc,
1086 reporef().hook('txnclose', throw=False, txnname=desc,
1087 **hookargs)
1087 **hookargs)
1088 reporef()._afterlock(hook)
1088 reporef()._afterlock(hook)
1089 tr.addfinalize('txnclose-hook', txnclosehook)
1089 tr.addfinalize('txnclose-hook', txnclosehook)
1090 def txnaborthook(tr2):
1090 def txnaborthook(tr2):
1091 """To be run if transaction is aborted
1091 """To be run if transaction is aborted
1092 """
1092 """
1093 reporef().hook('txnabort', throw=False, txnname=desc,
1093 reporef().hook('txnabort', throw=False, txnname=desc,
1094 **tr2.hookargs)
1094 **tr2.hookargs)
1095 tr.addabort('txnabort-hook', txnaborthook)
1095 tr.addabort('txnabort-hook', txnaborthook)
1096 # avoid eager cache invalidation. in-memory data should be identical
1096 # avoid eager cache invalidation. in-memory data should be identical
1097 # to stored data if transaction has no error.
1097 # to stored data if transaction has no error.
1098 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1098 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1099 self._transref = weakref.ref(tr)
1099 self._transref = weakref.ref(tr)
1100 return tr
1100 return tr
1101
1101
1102 def _journalfiles(self):
1102 def _journalfiles(self):
1103 return ((self.svfs, 'journal'),
1103 return ((self.svfs, 'journal'),
1104 (self.vfs, 'journal.dirstate'),
1104 (self.vfs, 'journal.dirstate'),
1105 (self.vfs, 'journal.branch'),
1105 (self.vfs, 'journal.branch'),
1106 (self.vfs, 'journal.desc'),
1106 (self.vfs, 'journal.desc'),
1107 (self.vfs, 'journal.bookmarks'),
1107 (self.vfs, 'journal.bookmarks'),
1108 (self.svfs, 'journal.phaseroots'))
1108 (self.svfs, 'journal.phaseroots'))
1109
1109
1110 def undofiles(self):
1110 def undofiles(self):
1111 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1111 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1112
1112
1113 def _writejournal(self, desc):
1113 def _writejournal(self, desc):
1114 self.dirstate.savebackup(None, prefix='journal.')
1114 self.dirstate.savebackup(None, prefix='journal.')
1115 self.vfs.write("journal.branch",
1115 self.vfs.write("journal.branch",
1116 encoding.fromlocal(self.dirstate.branch()))
1116 encoding.fromlocal(self.dirstate.branch()))
1117 self.vfs.write("journal.desc",
1117 self.vfs.write("journal.desc",
1118 "%d\n%s\n" % (len(self), desc))
1118 "%d\n%s\n" % (len(self), desc))
1119 self.vfs.write("journal.bookmarks",
1119 self.vfs.write("journal.bookmarks",
1120 self.vfs.tryread("bookmarks"))
1120 self.vfs.tryread("bookmarks"))
1121 self.svfs.write("journal.phaseroots",
1121 self.svfs.write("journal.phaseroots",
1122 self.svfs.tryread("phaseroots"))
1122 self.svfs.tryread("phaseroots"))
1123
1123
1124 def recover(self):
1124 def recover(self):
1125 with self.lock():
1125 with self.lock():
1126 if self.svfs.exists("journal"):
1126 if self.svfs.exists("journal"):
1127 self.ui.status(_("rolling back interrupted transaction\n"))
1127 self.ui.status(_("rolling back interrupted transaction\n"))
1128 vfsmap = {'': self.svfs,
1128 vfsmap = {'': self.svfs,
1129 'plain': self.vfs,}
1129 'plain': self.vfs,}
1130 transaction.rollback(self.svfs, vfsmap, "journal",
1130 transaction.rollback(self.svfs, vfsmap, "journal",
1131 self.ui.warn)
1131 self.ui.warn)
1132 self.invalidate()
1132 self.invalidate()
1133 return True
1133 return True
1134 else:
1134 else:
1135 self.ui.warn(_("no interrupted transaction available\n"))
1135 self.ui.warn(_("no interrupted transaction available\n"))
1136 return False
1136 return False
1137
1137
1138 def rollback(self, dryrun=False, force=False):
1138 def rollback(self, dryrun=False, force=False):
1139 wlock = lock = dsguard = None
1139 wlock = lock = dsguard = None
1140 try:
1140 try:
1141 wlock = self.wlock()
1141 wlock = self.wlock()
1142 lock = self.lock()
1142 lock = self.lock()
1143 if self.svfs.exists("undo"):
1143 if self.svfs.exists("undo"):
1144 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1144 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1145
1145
1146 return self._rollback(dryrun, force, dsguard)
1146 return self._rollback(dryrun, force, dsguard)
1147 else:
1147 else:
1148 self.ui.warn(_("no rollback information available\n"))
1148 self.ui.warn(_("no rollback information available\n"))
1149 return 1
1149 return 1
1150 finally:
1150 finally:
1151 release(dsguard, lock, wlock)
1151 release(dsguard, lock, wlock)
1152
1152
1153 @unfilteredmethod # Until we get smarter cache management
1153 @unfilteredmethod # Until we get smarter cache management
1154 def _rollback(self, dryrun, force, dsguard):
1154 def _rollback(self, dryrun, force, dsguard):
1155 ui = self.ui
1155 ui = self.ui
1156 try:
1156 try:
1157 args = self.vfs.read('undo.desc').splitlines()
1157 args = self.vfs.read('undo.desc').splitlines()
1158 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1158 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1159 if len(args) >= 3:
1159 if len(args) >= 3:
1160 detail = args[2]
1160 detail = args[2]
1161 oldtip = oldlen - 1
1161 oldtip = oldlen - 1
1162
1162
1163 if detail and ui.verbose:
1163 if detail and ui.verbose:
1164 msg = (_('repository tip rolled back to revision %s'
1164 msg = (_('repository tip rolled back to revision %s'
1165 ' (undo %s: %s)\n')
1165 ' (undo %s: %s)\n')
1166 % (oldtip, desc, detail))
1166 % (oldtip, desc, detail))
1167 else:
1167 else:
1168 msg = (_('repository tip rolled back to revision %s'
1168 msg = (_('repository tip rolled back to revision %s'
1169 ' (undo %s)\n')
1169 ' (undo %s)\n')
1170 % (oldtip, desc))
1170 % (oldtip, desc))
1171 except IOError:
1171 except IOError:
1172 msg = _('rolling back unknown transaction\n')
1172 msg = _('rolling back unknown transaction\n')
1173 desc = None
1173 desc = None
1174
1174
1175 if not force and self['.'] != self['tip'] and desc == 'commit':
1175 if not force and self['.'] != self['tip'] and desc == 'commit':
1176 raise error.Abort(
1176 raise error.Abort(
1177 _('rollback of last commit while not checked out '
1177 _('rollback of last commit while not checked out '
1178 'may lose data'), hint=_('use -f to force'))
1178 'may lose data'), hint=_('use -f to force'))
1179
1179
1180 ui.status(msg)
1180 ui.status(msg)
1181 if dryrun:
1181 if dryrun:
1182 return 0
1182 return 0
1183
1183
1184 parents = self.dirstate.parents()
1184 parents = self.dirstate.parents()
1185 self.destroying()
1185 self.destroying()
1186 vfsmap = {'plain': self.vfs, '': self.svfs}
1186 vfsmap = {'plain': self.vfs, '': self.svfs}
1187 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1187 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1188 if self.vfs.exists('undo.bookmarks'):
1188 if self.vfs.exists('undo.bookmarks'):
1189 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1189 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1190 if self.svfs.exists('undo.phaseroots'):
1190 if self.svfs.exists('undo.phaseroots'):
1191 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1191 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1192 self.invalidate()
1192 self.invalidate()
1193
1193
1194 parentgone = (parents[0] not in self.changelog.nodemap or
1194 parentgone = (parents[0] not in self.changelog.nodemap or
1195 parents[1] not in self.changelog.nodemap)
1195 parents[1] not in self.changelog.nodemap)
1196 if parentgone:
1196 if parentgone:
1197 # prevent dirstateguard from overwriting already restored one
1197 # prevent dirstateguard from overwriting already restored one
1198 dsguard.close()
1198 dsguard.close()
1199
1199
1200 self.dirstate.restorebackup(None, prefix='undo.')
1200 self.dirstate.restorebackup(None, prefix='undo.')
1201 try:
1201 try:
1202 branch = self.vfs.read('undo.branch')
1202 branch = self.vfs.read('undo.branch')
1203 self.dirstate.setbranch(encoding.tolocal(branch))
1203 self.dirstate.setbranch(encoding.tolocal(branch))
1204 except IOError:
1204 except IOError:
1205 ui.warn(_('named branch could not be reset: '
1205 ui.warn(_('named branch could not be reset: '
1206 'current branch is still \'%s\'\n')
1206 'current branch is still \'%s\'\n')
1207 % self.dirstate.branch())
1207 % self.dirstate.branch())
1208
1208
1209 parents = tuple([p.rev() for p in self[None].parents()])
1209 parents = tuple([p.rev() for p in self[None].parents()])
1210 if len(parents) > 1:
1210 if len(parents) > 1:
1211 ui.status(_('working directory now based on '
1211 ui.status(_('working directory now based on '
1212 'revisions %d and %d\n') % parents)
1212 'revisions %d and %d\n') % parents)
1213 else:
1213 else:
1214 ui.status(_('working directory now based on '
1214 ui.status(_('working directory now based on '
1215 'revision %d\n') % parents)
1215 'revision %d\n') % parents)
1216 mergemod.mergestate.clean(self, self['.'].node())
1216 mergemod.mergestate.clean(self, self['.'].node())
1217
1217
1218 # TODO: if we know which new heads may result from this rollback, pass
1218 # TODO: if we know which new heads may result from this rollback, pass
1219 # them to destroy(), which will prevent the branchhead cache from being
1219 # them to destroy(), which will prevent the branchhead cache from being
1220 # invalidated.
1220 # invalidated.
1221 self.destroyed()
1221 self.destroyed()
1222 return 0
1222 return 0
1223
1223
1224 def invalidatecaches(self):
1224 def invalidatecaches(self):
1225
1225
1226 if '_tagscache' in vars(self):
1226 if '_tagscache' in vars(self):
1227 # can't use delattr on proxy
1227 # can't use delattr on proxy
1228 del self.__dict__['_tagscache']
1228 del self.__dict__['_tagscache']
1229
1229
1230 self.unfiltered()._branchcaches.clear()
1230 self.unfiltered()._branchcaches.clear()
1231 self.invalidatevolatilesets()
1231 self.invalidatevolatilesets()
1232
1232
1233 def invalidatevolatilesets(self):
1233 def invalidatevolatilesets(self):
1234 self.filteredrevcache.clear()
1234 self.filteredrevcache.clear()
1235 obsolete.clearobscaches(self)
1235 obsolete.clearobscaches(self)
1236
1236
1237 def invalidatedirstate(self):
1237 def invalidatedirstate(self):
1238 '''Invalidates the dirstate, causing the next call to dirstate
1238 '''Invalidates the dirstate, causing the next call to dirstate
1239 to check if it was modified since the last time it was read,
1239 to check if it was modified since the last time it was read,
1240 rereading it if it has.
1240 rereading it if it has.
1241
1241
1242 This is different to dirstate.invalidate() that it doesn't always
1242 This is different to dirstate.invalidate() that it doesn't always
1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1244 explicitly read the dirstate again (i.e. restoring it to a previous
1244 explicitly read the dirstate again (i.e. restoring it to a previous
1245 known good state).'''
1245 known good state).'''
1246 if hasunfilteredcache(self, 'dirstate'):
1246 if hasunfilteredcache(self, 'dirstate'):
1247 for k in self.dirstate._filecache:
1247 for k in self.dirstate._filecache:
1248 try:
1248 try:
1249 delattr(self.dirstate, k)
1249 delattr(self.dirstate, k)
1250 except AttributeError:
1250 except AttributeError:
1251 pass
1251 pass
1252 delattr(self.unfiltered(), 'dirstate')
1252 delattr(self.unfiltered(), 'dirstate')
1253
1253
1254 def invalidate(self, clearfilecache=False):
1254 def invalidate(self, clearfilecache=False):
1255 '''Invalidates both store and non-store parts other than dirstate
1255 '''Invalidates both store and non-store parts other than dirstate
1256
1256
1257 If a transaction is running, invalidation of store is omitted,
1257 If a transaction is running, invalidation of store is omitted,
1258 because discarding in-memory changes might cause inconsistency
1258 because discarding in-memory changes might cause inconsistency
1259 (e.g. incomplete fncache causes unintentional failure, but
1259 (e.g. incomplete fncache causes unintentional failure, but
1260 redundant one doesn't).
1260 redundant one doesn't).
1261 '''
1261 '''
1262 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1262 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1263 for k in self._filecache.keys():
1263 for k in self._filecache.keys():
1264 # dirstate is invalidated separately in invalidatedirstate()
1264 # dirstate is invalidated separately in invalidatedirstate()
1265 if k == 'dirstate':
1265 if k == 'dirstate':
1266 continue
1266 continue
1267
1267
1268 if clearfilecache:
1268 if clearfilecache:
1269 del self._filecache[k]
1269 del self._filecache[k]
1270 try:
1270 try:
1271 delattr(unfiltered, k)
1271 delattr(unfiltered, k)
1272 except AttributeError:
1272 except AttributeError:
1273 pass
1273 pass
1274 self.invalidatecaches()
1274 self.invalidatecaches()
1275 if not self.currenttransaction():
1275 if not self.currenttransaction():
1276 # TODO: Changing contents of store outside transaction
1276 # TODO: Changing contents of store outside transaction
1277 # causes inconsistency. We should make in-memory store
1277 # causes inconsistency. We should make in-memory store
1278 # changes detectable, and abort if changed.
1278 # changes detectable, and abort if changed.
1279 self.store.invalidatecaches()
1279 self.store.invalidatecaches()
1280
1280
1281 def invalidateall(self):
1281 def invalidateall(self):
1282 '''Fully invalidates both store and non-store parts, causing the
1282 '''Fully invalidates both store and non-store parts, causing the
1283 subsequent operation to reread any outside changes.'''
1283 subsequent operation to reread any outside changes.'''
1284 # extension should hook this to invalidate its caches
1284 # extension should hook this to invalidate its caches
1285 self.invalidate()
1285 self.invalidate()
1286 self.invalidatedirstate()
1286 self.invalidatedirstate()
1287
1287
1288 @unfilteredmethod
1288 @unfilteredmethod
1289 def _refreshfilecachestats(self, tr):
1289 def _refreshfilecachestats(self, tr):
1290 """Reload stats of cached files so that they are flagged as valid"""
1290 """Reload stats of cached files so that they are flagged as valid"""
1291 for k, ce in self._filecache.items():
1291 for k, ce in self._filecache.items():
1292 if k == 'dirstate' or k not in self.__dict__:
1292 if k == 'dirstate' or k not in self.__dict__:
1293 continue
1293 continue
1294 ce.refresh()
1294 ce.refresh()
1295
1295
1296 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1296 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1297 inheritchecker=None, parentenvvar=None):
1297 inheritchecker=None, parentenvvar=None):
1298 parentlock = None
1298 parentlock = None
1299 # the contents of parentenvvar are used by the underlying lock to
1299 # the contents of parentenvvar are used by the underlying lock to
1300 # determine whether it can be inherited
1300 # determine whether it can be inherited
1301 if parentenvvar is not None:
1301 if parentenvvar is not None:
1302 parentlock = os.environ.get(parentenvvar)
1302 parentlock = encoding.environ.get(parentenvvar)
1303 try:
1303 try:
1304 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1304 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1305 acquirefn=acquirefn, desc=desc,
1305 acquirefn=acquirefn, desc=desc,
1306 inheritchecker=inheritchecker,
1306 inheritchecker=inheritchecker,
1307 parentlock=parentlock)
1307 parentlock=parentlock)
1308 except error.LockHeld as inst:
1308 except error.LockHeld as inst:
1309 if not wait:
1309 if not wait:
1310 raise
1310 raise
1311 # show more details for new-style locks
1311 # show more details for new-style locks
1312 if ':' in inst.locker:
1312 if ':' in inst.locker:
1313 host, pid = inst.locker.split(":", 1)
1313 host, pid = inst.locker.split(":", 1)
1314 self.ui.warn(
1314 self.ui.warn(
1315 _("waiting for lock on %s held by process %r "
1315 _("waiting for lock on %s held by process %r "
1316 "on host %r\n") % (desc, pid, host))
1316 "on host %r\n") % (desc, pid, host))
1317 else:
1317 else:
1318 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1318 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1319 (desc, inst.locker))
1319 (desc, inst.locker))
1320 # default to 600 seconds timeout
1320 # default to 600 seconds timeout
1321 l = lockmod.lock(vfs, lockname,
1321 l = lockmod.lock(vfs, lockname,
1322 int(self.ui.config("ui", "timeout", "600")),
1322 int(self.ui.config("ui", "timeout", "600")),
1323 releasefn=releasefn, acquirefn=acquirefn,
1323 releasefn=releasefn, acquirefn=acquirefn,
1324 desc=desc)
1324 desc=desc)
1325 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1325 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1326 return l
1326 return l
1327
1327
1328 def _afterlock(self, callback):
1328 def _afterlock(self, callback):
1329 """add a callback to be run when the repository is fully unlocked
1329 """add a callback to be run when the repository is fully unlocked
1330
1330
1331 The callback will be executed when the outermost lock is released
1331 The callback will be executed when the outermost lock is released
1332 (with wlock being higher level than 'lock')."""
1332 (with wlock being higher level than 'lock')."""
1333 for ref in (self._wlockref, self._lockref):
1333 for ref in (self._wlockref, self._lockref):
1334 l = ref and ref()
1334 l = ref and ref()
1335 if l and l.held:
1335 if l and l.held:
1336 l.postrelease.append(callback)
1336 l.postrelease.append(callback)
1337 break
1337 break
1338 else: # no lock have been found.
1338 else: # no lock have been found.
1339 callback()
1339 callback()
1340
1340
1341 def lock(self, wait=True):
1341 def lock(self, wait=True):
1342 '''Lock the repository store (.hg/store) and return a weak reference
1342 '''Lock the repository store (.hg/store) and return a weak reference
1343 to the lock. Use this before modifying the store (e.g. committing or
1343 to the lock. Use this before modifying the store (e.g. committing or
1344 stripping). If you are opening a transaction, get a lock as well.)
1344 stripping). If you are opening a transaction, get a lock as well.)
1345
1345
1346 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1346 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1347 'wlock' first to avoid a dead-lock hazard.'''
1347 'wlock' first to avoid a dead-lock hazard.'''
1348 l = self._currentlock(self._lockref)
1348 l = self._currentlock(self._lockref)
1349 if l is not None:
1349 if l is not None:
1350 l.lock()
1350 l.lock()
1351 return l
1351 return l
1352
1352
1353 l = self._lock(self.svfs, "lock", wait, None,
1353 l = self._lock(self.svfs, "lock", wait, None,
1354 self.invalidate, _('repository %s') % self.origroot)
1354 self.invalidate, _('repository %s') % self.origroot)
1355 self._lockref = weakref.ref(l)
1355 self._lockref = weakref.ref(l)
1356 return l
1356 return l
1357
1357
1358 def _wlockchecktransaction(self):
1358 def _wlockchecktransaction(self):
1359 if self.currenttransaction() is not None:
1359 if self.currenttransaction() is not None:
1360 raise error.LockInheritanceContractViolation(
1360 raise error.LockInheritanceContractViolation(
1361 'wlock cannot be inherited in the middle of a transaction')
1361 'wlock cannot be inherited in the middle of a transaction')
1362
1362
1363 def wlock(self, wait=True):
1363 def wlock(self, wait=True):
1364 '''Lock the non-store parts of the repository (everything under
1364 '''Lock the non-store parts of the repository (everything under
1365 .hg except .hg/store) and return a weak reference to the lock.
1365 .hg except .hg/store) and return a weak reference to the lock.
1366
1366
1367 Use this before modifying files in .hg.
1367 Use this before modifying files in .hg.
1368
1368
1369 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1369 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1370 'wlock' first to avoid a dead-lock hazard.'''
1370 'wlock' first to avoid a dead-lock hazard.'''
1371 l = self._wlockref and self._wlockref()
1371 l = self._wlockref and self._wlockref()
1372 if l is not None and l.held:
1372 if l is not None and l.held:
1373 l.lock()
1373 l.lock()
1374 return l
1374 return l
1375
1375
1376 # We do not need to check for non-waiting lock acquisition. Such
1376 # We do not need to check for non-waiting lock acquisition. Such
1377 # acquisition would not cause dead-lock as they would just fail.
1377 # acquisition would not cause dead-lock as they would just fail.
1378 if wait and (self.ui.configbool('devel', 'all-warnings')
1378 if wait and (self.ui.configbool('devel', 'all-warnings')
1379 or self.ui.configbool('devel', 'check-locks')):
1379 or self.ui.configbool('devel', 'check-locks')):
1380 if self._currentlock(self._lockref) is not None:
1380 if self._currentlock(self._lockref) is not None:
1381 self.ui.develwarn('"wlock" acquired after "lock"')
1381 self.ui.develwarn('"wlock" acquired after "lock"')
1382
1382
1383 def unlock():
1383 def unlock():
1384 if self.dirstate.pendingparentchange():
1384 if self.dirstate.pendingparentchange():
1385 self.dirstate.invalidate()
1385 self.dirstate.invalidate()
1386 else:
1386 else:
1387 self.dirstate.write(None)
1387 self.dirstate.write(None)
1388
1388
1389 self._filecache['dirstate'].refresh()
1389 self._filecache['dirstate'].refresh()
1390
1390
1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1392 self.invalidatedirstate, _('working directory of %s') %
1392 self.invalidatedirstate, _('working directory of %s') %
1393 self.origroot,
1393 self.origroot,
1394 inheritchecker=self._wlockchecktransaction,
1394 inheritchecker=self._wlockchecktransaction,
1395 parentenvvar='HG_WLOCK_LOCKER')
1395 parentenvvar='HG_WLOCK_LOCKER')
1396 self._wlockref = weakref.ref(l)
1396 self._wlockref = weakref.ref(l)
1397 return l
1397 return l
1398
1398
1399 def _currentlock(self, lockref):
1399 def _currentlock(self, lockref):
1400 """Returns the lock if it's held, or None if it's not."""
1400 """Returns the lock if it's held, or None if it's not."""
1401 if lockref is None:
1401 if lockref is None:
1402 return None
1402 return None
1403 l = lockref()
1403 l = lockref()
1404 if l is None or not l.held:
1404 if l is None or not l.held:
1405 return None
1405 return None
1406 return l
1406 return l
1407
1407
1408 def currentwlock(self):
1408 def currentwlock(self):
1409 """Returns the wlock if it's held, or None if it's not."""
1409 """Returns the wlock if it's held, or None if it's not."""
1410 return self._currentlock(self._wlockref)
1410 return self._currentlock(self._wlockref)
1411
1411
1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1413 """
1413 """
1414 commit an individual file as part of a larger transaction
1414 commit an individual file as part of a larger transaction
1415 """
1415 """
1416
1416
1417 fname = fctx.path()
1417 fname = fctx.path()
1418 fparent1 = manifest1.get(fname, nullid)
1418 fparent1 = manifest1.get(fname, nullid)
1419 fparent2 = manifest2.get(fname, nullid)
1419 fparent2 = manifest2.get(fname, nullid)
1420 if isinstance(fctx, context.filectx):
1420 if isinstance(fctx, context.filectx):
1421 node = fctx.filenode()
1421 node = fctx.filenode()
1422 if node in [fparent1, fparent2]:
1422 if node in [fparent1, fparent2]:
1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1424 if manifest1.flags(fname) != fctx.flags():
1424 if manifest1.flags(fname) != fctx.flags():
1425 changelist.append(fname)
1425 changelist.append(fname)
1426 return node
1426 return node
1427
1427
1428 flog = self.file(fname)
1428 flog = self.file(fname)
1429 meta = {}
1429 meta = {}
1430 copy = fctx.renamed()
1430 copy = fctx.renamed()
1431 if copy and copy[0] != fname:
1431 if copy and copy[0] != fname:
1432 # Mark the new revision of this file as a copy of another
1432 # Mark the new revision of this file as a copy of another
1433 # file. This copy data will effectively act as a parent
1433 # file. This copy data will effectively act as a parent
1434 # of this new revision. If this is a merge, the first
1434 # of this new revision. If this is a merge, the first
1435 # parent will be the nullid (meaning "look up the copy data")
1435 # parent will be the nullid (meaning "look up the copy data")
1436 # and the second one will be the other parent. For example:
1436 # and the second one will be the other parent. For example:
1437 #
1437 #
1438 # 0 --- 1 --- 3 rev1 changes file foo
1438 # 0 --- 1 --- 3 rev1 changes file foo
1439 # \ / rev2 renames foo to bar and changes it
1439 # \ / rev2 renames foo to bar and changes it
1440 # \- 2 -/ rev3 should have bar with all changes and
1440 # \- 2 -/ rev3 should have bar with all changes and
1441 # should record that bar descends from
1441 # should record that bar descends from
1442 # bar in rev2 and foo in rev1
1442 # bar in rev2 and foo in rev1
1443 #
1443 #
1444 # this allows this merge to succeed:
1444 # this allows this merge to succeed:
1445 #
1445 #
1446 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1446 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1447 # \ / merging rev3 and rev4 should use bar@rev2
1447 # \ / merging rev3 and rev4 should use bar@rev2
1448 # \- 2 --- 4 as the merge base
1448 # \- 2 --- 4 as the merge base
1449 #
1449 #
1450
1450
1451 cfname = copy[0]
1451 cfname = copy[0]
1452 crev = manifest1.get(cfname)
1452 crev = manifest1.get(cfname)
1453 newfparent = fparent2
1453 newfparent = fparent2
1454
1454
1455 if manifest2: # branch merge
1455 if manifest2: # branch merge
1456 if fparent2 == nullid or crev is None: # copied on remote side
1456 if fparent2 == nullid or crev is None: # copied on remote side
1457 if cfname in manifest2:
1457 if cfname in manifest2:
1458 crev = manifest2[cfname]
1458 crev = manifest2[cfname]
1459 newfparent = fparent1
1459 newfparent = fparent1
1460
1460
1461 # Here, we used to search backwards through history to try to find
1461 # Here, we used to search backwards through history to try to find
1462 # where the file copy came from if the source of a copy was not in
1462 # where the file copy came from if the source of a copy was not in
1463 # the parent directory. However, this doesn't actually make sense to
1463 # the parent directory. However, this doesn't actually make sense to
1464 # do (what does a copy from something not in your working copy even
1464 # do (what does a copy from something not in your working copy even
1465 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1465 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1466 # the user that copy information was dropped, so if they didn't
1466 # the user that copy information was dropped, so if they didn't
1467 # expect this outcome it can be fixed, but this is the correct
1467 # expect this outcome it can be fixed, but this is the correct
1468 # behavior in this circumstance.
1468 # behavior in this circumstance.
1469
1469
1470 if crev:
1470 if crev:
1471 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1471 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1472 meta["copy"] = cfname
1472 meta["copy"] = cfname
1473 meta["copyrev"] = hex(crev)
1473 meta["copyrev"] = hex(crev)
1474 fparent1, fparent2 = nullid, newfparent
1474 fparent1, fparent2 = nullid, newfparent
1475 else:
1475 else:
1476 self.ui.warn(_("warning: can't find ancestor for '%s' "
1476 self.ui.warn(_("warning: can't find ancestor for '%s' "
1477 "copied from '%s'!\n") % (fname, cfname))
1477 "copied from '%s'!\n") % (fname, cfname))
1478
1478
1479 elif fparent1 == nullid:
1479 elif fparent1 == nullid:
1480 fparent1, fparent2 = fparent2, nullid
1480 fparent1, fparent2 = fparent2, nullid
1481 elif fparent2 != nullid:
1481 elif fparent2 != nullid:
1482 # is one parent an ancestor of the other?
1482 # is one parent an ancestor of the other?
1483 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1483 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1484 if fparent1 in fparentancestors:
1484 if fparent1 in fparentancestors:
1485 fparent1, fparent2 = fparent2, nullid
1485 fparent1, fparent2 = fparent2, nullid
1486 elif fparent2 in fparentancestors:
1486 elif fparent2 in fparentancestors:
1487 fparent2 = nullid
1487 fparent2 = nullid
1488
1488
1489 # is the file changed?
1489 # is the file changed?
1490 text = fctx.data()
1490 text = fctx.data()
1491 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1491 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1492 changelist.append(fname)
1492 changelist.append(fname)
1493 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1493 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1494 # are just the flags changed during merge?
1494 # are just the flags changed during merge?
1495 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1495 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1496 changelist.append(fname)
1496 changelist.append(fname)
1497
1497
1498 return fparent1
1498 return fparent1
1499
1499
1500 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1500 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1501 """check for commit arguments that aren't committable"""
1501 """check for commit arguments that aren't committable"""
1502 if match.isexact() or match.prefix():
1502 if match.isexact() or match.prefix():
1503 matched = set(status.modified + status.added + status.removed)
1503 matched = set(status.modified + status.added + status.removed)
1504
1504
1505 for f in match.files():
1505 for f in match.files():
1506 f = self.dirstate.normalize(f)
1506 f = self.dirstate.normalize(f)
1507 if f == '.' or f in matched or f in wctx.substate:
1507 if f == '.' or f in matched or f in wctx.substate:
1508 continue
1508 continue
1509 if f in status.deleted:
1509 if f in status.deleted:
1510 fail(f, _('file not found!'))
1510 fail(f, _('file not found!'))
1511 if f in vdirs: # visited directory
1511 if f in vdirs: # visited directory
1512 d = f + '/'
1512 d = f + '/'
1513 for mf in matched:
1513 for mf in matched:
1514 if mf.startswith(d):
1514 if mf.startswith(d):
1515 break
1515 break
1516 else:
1516 else:
1517 fail(f, _("no match under directory!"))
1517 fail(f, _("no match under directory!"))
1518 elif f not in self.dirstate:
1518 elif f not in self.dirstate:
1519 fail(f, _("file not tracked!"))
1519 fail(f, _("file not tracked!"))
1520
1520
1521 @unfilteredmethod
1521 @unfilteredmethod
1522 def commit(self, text="", user=None, date=None, match=None, force=False,
1522 def commit(self, text="", user=None, date=None, match=None, force=False,
1523 editor=False, extra=None):
1523 editor=False, extra=None):
1524 """Add a new revision to current repository.
1524 """Add a new revision to current repository.
1525
1525
1526 Revision information is gathered from the working directory,
1526 Revision information is gathered from the working directory,
1527 match can be used to filter the committed files. If editor is
1527 match can be used to filter the committed files. If editor is
1528 supplied, it is called to get a commit message.
1528 supplied, it is called to get a commit message.
1529 """
1529 """
1530 if extra is None:
1530 if extra is None:
1531 extra = {}
1531 extra = {}
1532
1532
1533 def fail(f, msg):
1533 def fail(f, msg):
1534 raise error.Abort('%s: %s' % (f, msg))
1534 raise error.Abort('%s: %s' % (f, msg))
1535
1535
1536 if not match:
1536 if not match:
1537 match = matchmod.always(self.root, '')
1537 match = matchmod.always(self.root, '')
1538
1538
1539 if not force:
1539 if not force:
1540 vdirs = []
1540 vdirs = []
1541 match.explicitdir = vdirs.append
1541 match.explicitdir = vdirs.append
1542 match.bad = fail
1542 match.bad = fail
1543
1543
1544 wlock = lock = tr = None
1544 wlock = lock = tr = None
1545 try:
1545 try:
1546 wlock = self.wlock()
1546 wlock = self.wlock()
1547 lock = self.lock() # for recent changelog (see issue4368)
1547 lock = self.lock() # for recent changelog (see issue4368)
1548
1548
1549 wctx = self[None]
1549 wctx = self[None]
1550 merge = len(wctx.parents()) > 1
1550 merge = len(wctx.parents()) > 1
1551
1551
1552 if not force and merge and match.ispartial():
1552 if not force and merge and match.ispartial():
1553 raise error.Abort(_('cannot partially commit a merge '
1553 raise error.Abort(_('cannot partially commit a merge '
1554 '(do not specify files or patterns)'))
1554 '(do not specify files or patterns)'))
1555
1555
1556 status = self.status(match=match, clean=force)
1556 status = self.status(match=match, clean=force)
1557 if force:
1557 if force:
1558 status.modified.extend(status.clean) # mq may commit clean files
1558 status.modified.extend(status.clean) # mq may commit clean files
1559
1559
1560 # check subrepos
1560 # check subrepos
1561 subs = []
1561 subs = []
1562 commitsubs = set()
1562 commitsubs = set()
1563 newstate = wctx.substate.copy()
1563 newstate = wctx.substate.copy()
1564 # only manage subrepos and .hgsubstate if .hgsub is present
1564 # only manage subrepos and .hgsubstate if .hgsub is present
1565 if '.hgsub' in wctx:
1565 if '.hgsub' in wctx:
1566 # we'll decide whether to track this ourselves, thanks
1566 # we'll decide whether to track this ourselves, thanks
1567 for c in status.modified, status.added, status.removed:
1567 for c in status.modified, status.added, status.removed:
1568 if '.hgsubstate' in c:
1568 if '.hgsubstate' in c:
1569 c.remove('.hgsubstate')
1569 c.remove('.hgsubstate')
1570
1570
1571 # compare current state to last committed state
1571 # compare current state to last committed state
1572 # build new substate based on last committed state
1572 # build new substate based on last committed state
1573 oldstate = wctx.p1().substate
1573 oldstate = wctx.p1().substate
1574 for s in sorted(newstate.keys()):
1574 for s in sorted(newstate.keys()):
1575 if not match(s):
1575 if not match(s):
1576 # ignore working copy, use old state if present
1576 # ignore working copy, use old state if present
1577 if s in oldstate:
1577 if s in oldstate:
1578 newstate[s] = oldstate[s]
1578 newstate[s] = oldstate[s]
1579 continue
1579 continue
1580 if not force:
1580 if not force:
1581 raise error.Abort(
1581 raise error.Abort(
1582 _("commit with new subrepo %s excluded") % s)
1582 _("commit with new subrepo %s excluded") % s)
1583 dirtyreason = wctx.sub(s).dirtyreason(True)
1583 dirtyreason = wctx.sub(s).dirtyreason(True)
1584 if dirtyreason:
1584 if dirtyreason:
1585 if not self.ui.configbool('ui', 'commitsubrepos'):
1585 if not self.ui.configbool('ui', 'commitsubrepos'):
1586 raise error.Abort(dirtyreason,
1586 raise error.Abort(dirtyreason,
1587 hint=_("use --subrepos for recursive commit"))
1587 hint=_("use --subrepos for recursive commit"))
1588 subs.append(s)
1588 subs.append(s)
1589 commitsubs.add(s)
1589 commitsubs.add(s)
1590 else:
1590 else:
1591 bs = wctx.sub(s).basestate()
1591 bs = wctx.sub(s).basestate()
1592 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1592 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1593 if oldstate.get(s, (None, None, None))[1] != bs:
1593 if oldstate.get(s, (None, None, None))[1] != bs:
1594 subs.append(s)
1594 subs.append(s)
1595
1595
1596 # check for removed subrepos
1596 # check for removed subrepos
1597 for p in wctx.parents():
1597 for p in wctx.parents():
1598 r = [s for s in p.substate if s not in newstate]
1598 r = [s for s in p.substate if s not in newstate]
1599 subs += [s for s in r if match(s)]
1599 subs += [s for s in r if match(s)]
1600 if subs:
1600 if subs:
1601 if (not match('.hgsub') and
1601 if (not match('.hgsub') and
1602 '.hgsub' in (wctx.modified() + wctx.added())):
1602 '.hgsub' in (wctx.modified() + wctx.added())):
1603 raise error.Abort(
1603 raise error.Abort(
1604 _("can't commit subrepos without .hgsub"))
1604 _("can't commit subrepos without .hgsub"))
1605 status.modified.insert(0, '.hgsubstate')
1605 status.modified.insert(0, '.hgsubstate')
1606
1606
1607 elif '.hgsub' in status.removed:
1607 elif '.hgsub' in status.removed:
1608 # clean up .hgsubstate when .hgsub is removed
1608 # clean up .hgsubstate when .hgsub is removed
1609 if ('.hgsubstate' in wctx and
1609 if ('.hgsubstate' in wctx and
1610 '.hgsubstate' not in (status.modified + status.added +
1610 '.hgsubstate' not in (status.modified + status.added +
1611 status.removed)):
1611 status.removed)):
1612 status.removed.insert(0, '.hgsubstate')
1612 status.removed.insert(0, '.hgsubstate')
1613
1613
1614 # make sure all explicit patterns are matched
1614 # make sure all explicit patterns are matched
1615 if not force:
1615 if not force:
1616 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1616 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1617
1617
1618 cctx = context.workingcommitctx(self, status,
1618 cctx = context.workingcommitctx(self, status,
1619 text, user, date, extra)
1619 text, user, date, extra)
1620
1620
1621 # internal config: ui.allowemptycommit
1621 # internal config: ui.allowemptycommit
1622 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1622 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1623 or extra.get('close') or merge or cctx.files()
1623 or extra.get('close') or merge or cctx.files()
1624 or self.ui.configbool('ui', 'allowemptycommit'))
1624 or self.ui.configbool('ui', 'allowemptycommit'))
1625 if not allowemptycommit:
1625 if not allowemptycommit:
1626 return None
1626 return None
1627
1627
1628 if merge and cctx.deleted():
1628 if merge and cctx.deleted():
1629 raise error.Abort(_("cannot commit merge with missing files"))
1629 raise error.Abort(_("cannot commit merge with missing files"))
1630
1630
1631 ms = mergemod.mergestate.read(self)
1631 ms = mergemod.mergestate.read(self)
1632 mergeutil.checkunresolved(ms)
1632 mergeutil.checkunresolved(ms)
1633
1633
1634 if editor:
1634 if editor:
1635 cctx._text = editor(self, cctx, subs)
1635 cctx._text = editor(self, cctx, subs)
1636 edited = (text != cctx._text)
1636 edited = (text != cctx._text)
1637
1637
1638 # Save commit message in case this transaction gets rolled back
1638 # Save commit message in case this transaction gets rolled back
1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1640 # the assumption that the user will use the same editor again.
1640 # the assumption that the user will use the same editor again.
1641 msgfn = self.savecommitmessage(cctx._text)
1641 msgfn = self.savecommitmessage(cctx._text)
1642
1642
1643 # commit subs and write new state
1643 # commit subs and write new state
1644 if subs:
1644 if subs:
1645 for s in sorted(commitsubs):
1645 for s in sorted(commitsubs):
1646 sub = wctx.sub(s)
1646 sub = wctx.sub(s)
1647 self.ui.status(_('committing subrepository %s\n') %
1647 self.ui.status(_('committing subrepository %s\n') %
1648 subrepo.subrelpath(sub))
1648 subrepo.subrelpath(sub))
1649 sr = sub.commit(cctx._text, user, date)
1649 sr = sub.commit(cctx._text, user, date)
1650 newstate[s] = (newstate[s][0], sr)
1650 newstate[s] = (newstate[s][0], sr)
1651 subrepo.writestate(self, newstate)
1651 subrepo.writestate(self, newstate)
1652
1652
1653 p1, p2 = self.dirstate.parents()
1653 p1, p2 = self.dirstate.parents()
1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1655 try:
1655 try:
1656 self.hook("precommit", throw=True, parent1=hookp1,
1656 self.hook("precommit", throw=True, parent1=hookp1,
1657 parent2=hookp2)
1657 parent2=hookp2)
1658 tr = self.transaction('commit')
1658 tr = self.transaction('commit')
1659 ret = self.commitctx(cctx, True)
1659 ret = self.commitctx(cctx, True)
1660 except: # re-raises
1660 except: # re-raises
1661 if edited:
1661 if edited:
1662 self.ui.write(
1662 self.ui.write(
1663 _('note: commit message saved in %s\n') % msgfn)
1663 _('note: commit message saved in %s\n') % msgfn)
1664 raise
1664 raise
1665 # update bookmarks, dirstate and mergestate
1665 # update bookmarks, dirstate and mergestate
1666 bookmarks.update(self, [p1, p2], ret)
1666 bookmarks.update(self, [p1, p2], ret)
1667 cctx.markcommitted(ret)
1667 cctx.markcommitted(ret)
1668 ms.reset()
1668 ms.reset()
1669 tr.close()
1669 tr.close()
1670
1670
1671 finally:
1671 finally:
1672 lockmod.release(tr, lock, wlock)
1672 lockmod.release(tr, lock, wlock)
1673
1673
1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1675 # hack for command that use a temporary commit (eg: histedit)
1675 # hack for command that use a temporary commit (eg: histedit)
1676 # temporary commit got stripped before hook release
1676 # temporary commit got stripped before hook release
1677 if self.changelog.hasnode(ret):
1677 if self.changelog.hasnode(ret):
1678 self.hook("commit", node=node, parent1=parent1,
1678 self.hook("commit", node=node, parent1=parent1,
1679 parent2=parent2)
1679 parent2=parent2)
1680 self._afterlock(commithook)
1680 self._afterlock(commithook)
1681 return ret
1681 return ret
1682
1682
1683 @unfilteredmethod
1683 @unfilteredmethod
1684 def commitctx(self, ctx, error=False):
1684 def commitctx(self, ctx, error=False):
1685 """Add a new revision to current repository.
1685 """Add a new revision to current repository.
1686 Revision information is passed via the context argument.
1686 Revision information is passed via the context argument.
1687 """
1687 """
1688
1688
1689 tr = None
1689 tr = None
1690 p1, p2 = ctx.p1(), ctx.p2()
1690 p1, p2 = ctx.p1(), ctx.p2()
1691 user = ctx.user()
1691 user = ctx.user()
1692
1692
1693 lock = self.lock()
1693 lock = self.lock()
1694 try:
1694 try:
1695 tr = self.transaction("commit")
1695 tr = self.transaction("commit")
1696 trp = weakref.proxy(tr)
1696 trp = weakref.proxy(tr)
1697
1697
1698 if ctx.manifestnode():
1698 if ctx.manifestnode():
1699 # reuse an existing manifest revision
1699 # reuse an existing manifest revision
1700 mn = ctx.manifestnode()
1700 mn = ctx.manifestnode()
1701 files = ctx.files()
1701 files = ctx.files()
1702 elif ctx.files():
1702 elif ctx.files():
1703 m1ctx = p1.manifestctx()
1703 m1ctx = p1.manifestctx()
1704 m2ctx = p2.manifestctx()
1704 m2ctx = p2.manifestctx()
1705 mctx = m1ctx.copy()
1705 mctx = m1ctx.copy()
1706
1706
1707 m = mctx.read()
1707 m = mctx.read()
1708 m1 = m1ctx.read()
1708 m1 = m1ctx.read()
1709 m2 = m2ctx.read()
1709 m2 = m2ctx.read()
1710
1710
1711 # check in files
1711 # check in files
1712 added = []
1712 added = []
1713 changed = []
1713 changed = []
1714 removed = list(ctx.removed())
1714 removed = list(ctx.removed())
1715 linkrev = len(self)
1715 linkrev = len(self)
1716 self.ui.note(_("committing files:\n"))
1716 self.ui.note(_("committing files:\n"))
1717 for f in sorted(ctx.modified() + ctx.added()):
1717 for f in sorted(ctx.modified() + ctx.added()):
1718 self.ui.note(f + "\n")
1718 self.ui.note(f + "\n")
1719 try:
1719 try:
1720 fctx = ctx[f]
1720 fctx = ctx[f]
1721 if fctx is None:
1721 if fctx is None:
1722 removed.append(f)
1722 removed.append(f)
1723 else:
1723 else:
1724 added.append(f)
1724 added.append(f)
1725 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1725 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1726 trp, changed)
1726 trp, changed)
1727 m.setflag(f, fctx.flags())
1727 m.setflag(f, fctx.flags())
1728 except OSError as inst:
1728 except OSError as inst:
1729 self.ui.warn(_("trouble committing %s!\n") % f)
1729 self.ui.warn(_("trouble committing %s!\n") % f)
1730 raise
1730 raise
1731 except IOError as inst:
1731 except IOError as inst:
1732 errcode = getattr(inst, 'errno', errno.ENOENT)
1732 errcode = getattr(inst, 'errno', errno.ENOENT)
1733 if error or errcode and errcode != errno.ENOENT:
1733 if error or errcode and errcode != errno.ENOENT:
1734 self.ui.warn(_("trouble committing %s!\n") % f)
1734 self.ui.warn(_("trouble committing %s!\n") % f)
1735 raise
1735 raise
1736
1736
1737 # update manifest
1737 # update manifest
1738 self.ui.note(_("committing manifest\n"))
1738 self.ui.note(_("committing manifest\n"))
1739 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1739 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1740 drop = [f for f in removed if f in m]
1740 drop = [f for f in removed if f in m]
1741 for f in drop:
1741 for f in drop:
1742 del m[f]
1742 del m[f]
1743 mn = mctx.write(trp, linkrev,
1743 mn = mctx.write(trp, linkrev,
1744 p1.manifestnode(), p2.manifestnode(),
1744 p1.manifestnode(), p2.manifestnode(),
1745 added, drop)
1745 added, drop)
1746 files = changed + removed
1746 files = changed + removed
1747 else:
1747 else:
1748 mn = p1.manifestnode()
1748 mn = p1.manifestnode()
1749 files = []
1749 files = []
1750
1750
1751 # update changelog
1751 # update changelog
1752 self.ui.note(_("committing changelog\n"))
1752 self.ui.note(_("committing changelog\n"))
1753 self.changelog.delayupdate(tr)
1753 self.changelog.delayupdate(tr)
1754 n = self.changelog.add(mn, files, ctx.description(),
1754 n = self.changelog.add(mn, files, ctx.description(),
1755 trp, p1.node(), p2.node(),
1755 trp, p1.node(), p2.node(),
1756 user, ctx.date(), ctx.extra().copy())
1756 user, ctx.date(), ctx.extra().copy())
1757 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1757 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1758 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1758 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1759 parent2=xp2)
1759 parent2=xp2)
1760 # set the new commit is proper phase
1760 # set the new commit is proper phase
1761 targetphase = subrepo.newcommitphase(self.ui, ctx)
1761 targetphase = subrepo.newcommitphase(self.ui, ctx)
1762 if targetphase:
1762 if targetphase:
1763 # retract boundary do not alter parent changeset.
1763 # retract boundary do not alter parent changeset.
1764 # if a parent have higher the resulting phase will
1764 # if a parent have higher the resulting phase will
1765 # be compliant anyway
1765 # be compliant anyway
1766 #
1766 #
1767 # if minimal phase was 0 we don't need to retract anything
1767 # if minimal phase was 0 we don't need to retract anything
1768 phases.retractboundary(self, tr, targetphase, [n])
1768 phases.retractboundary(self, tr, targetphase, [n])
1769 tr.close()
1769 tr.close()
1770 branchmap.updatecache(self.filtered('served'))
1770 branchmap.updatecache(self.filtered('served'))
1771 return n
1771 return n
1772 finally:
1772 finally:
1773 if tr:
1773 if tr:
1774 tr.release()
1774 tr.release()
1775 lock.release()
1775 lock.release()
1776
1776
1777 @unfilteredmethod
1777 @unfilteredmethod
1778 def destroying(self):
1778 def destroying(self):
1779 '''Inform the repository that nodes are about to be destroyed.
1779 '''Inform the repository that nodes are about to be destroyed.
1780 Intended for use by strip and rollback, so there's a common
1780 Intended for use by strip and rollback, so there's a common
1781 place for anything that has to be done before destroying history.
1781 place for anything that has to be done before destroying history.
1782
1782
1783 This is mostly useful for saving state that is in memory and waiting
1783 This is mostly useful for saving state that is in memory and waiting
1784 to be flushed when the current lock is released. Because a call to
1784 to be flushed when the current lock is released. Because a call to
1785 destroyed is imminent, the repo will be invalidated causing those
1785 destroyed is imminent, the repo will be invalidated causing those
1786 changes to stay in memory (waiting for the next unlock), or vanish
1786 changes to stay in memory (waiting for the next unlock), or vanish
1787 completely.
1787 completely.
1788 '''
1788 '''
1789 # When using the same lock to commit and strip, the phasecache is left
1789 # When using the same lock to commit and strip, the phasecache is left
1790 # dirty after committing. Then when we strip, the repo is invalidated,
1790 # dirty after committing. Then when we strip, the repo is invalidated,
1791 # causing those changes to disappear.
1791 # causing those changes to disappear.
1792 if '_phasecache' in vars(self):
1792 if '_phasecache' in vars(self):
1793 self._phasecache.write()
1793 self._phasecache.write()
1794
1794
1795 @unfilteredmethod
1795 @unfilteredmethod
1796 def destroyed(self):
1796 def destroyed(self):
1797 '''Inform the repository that nodes have been destroyed.
1797 '''Inform the repository that nodes have been destroyed.
1798 Intended for use by strip and rollback, so there's a common
1798 Intended for use by strip and rollback, so there's a common
1799 place for anything that has to be done after destroying history.
1799 place for anything that has to be done after destroying history.
1800 '''
1800 '''
1801 # When one tries to:
1801 # When one tries to:
1802 # 1) destroy nodes thus calling this method (e.g. strip)
1802 # 1) destroy nodes thus calling this method (e.g. strip)
1803 # 2) use phasecache somewhere (e.g. commit)
1803 # 2) use phasecache somewhere (e.g. commit)
1804 #
1804 #
1805 # then 2) will fail because the phasecache contains nodes that were
1805 # then 2) will fail because the phasecache contains nodes that were
1806 # removed. We can either remove phasecache from the filecache,
1806 # removed. We can either remove phasecache from the filecache,
1807 # causing it to reload next time it is accessed, or simply filter
1807 # causing it to reload next time it is accessed, or simply filter
1808 # the removed nodes now and write the updated cache.
1808 # the removed nodes now and write the updated cache.
1809 self._phasecache.filterunknown(self)
1809 self._phasecache.filterunknown(self)
1810 self._phasecache.write()
1810 self._phasecache.write()
1811
1811
1812 # update the 'served' branch cache to help read only server process
1812 # update the 'served' branch cache to help read only server process
1813 # Thanks to branchcache collaboration this is done from the nearest
1813 # Thanks to branchcache collaboration this is done from the nearest
1814 # filtered subset and it is expected to be fast.
1814 # filtered subset and it is expected to be fast.
1815 branchmap.updatecache(self.filtered('served'))
1815 branchmap.updatecache(self.filtered('served'))
1816
1816
1817 # Ensure the persistent tag cache is updated. Doing it now
1817 # Ensure the persistent tag cache is updated. Doing it now
1818 # means that the tag cache only has to worry about destroyed
1818 # means that the tag cache only has to worry about destroyed
1819 # heads immediately after a strip/rollback. That in turn
1819 # heads immediately after a strip/rollback. That in turn
1820 # guarantees that "cachetip == currenttip" (comparing both rev
1820 # guarantees that "cachetip == currenttip" (comparing both rev
1821 # and node) always means no nodes have been added or destroyed.
1821 # and node) always means no nodes have been added or destroyed.
1822
1822
1823 # XXX this is suboptimal when qrefresh'ing: we strip the current
1823 # XXX this is suboptimal when qrefresh'ing: we strip the current
1824 # head, refresh the tag cache, then immediately add a new head.
1824 # head, refresh the tag cache, then immediately add a new head.
1825 # But I think doing it this way is necessary for the "instant
1825 # But I think doing it this way is necessary for the "instant
1826 # tag cache retrieval" case to work.
1826 # tag cache retrieval" case to work.
1827 self.invalidate()
1827 self.invalidate()
1828
1828
1829 def walk(self, match, node=None):
1829 def walk(self, match, node=None):
1830 '''
1830 '''
1831 walk recursively through the directory tree or a given
1831 walk recursively through the directory tree or a given
1832 changeset, finding all files matched by the match
1832 changeset, finding all files matched by the match
1833 function
1833 function
1834 '''
1834 '''
1835 return self[node].walk(match)
1835 return self[node].walk(match)
1836
1836
1837 def status(self, node1='.', node2=None, match=None,
1837 def status(self, node1='.', node2=None, match=None,
1838 ignored=False, clean=False, unknown=False,
1838 ignored=False, clean=False, unknown=False,
1839 listsubrepos=False):
1839 listsubrepos=False):
1840 '''a convenience method that calls node1.status(node2)'''
1840 '''a convenience method that calls node1.status(node2)'''
1841 return self[node1].status(node2, match, ignored, clean, unknown,
1841 return self[node1].status(node2, match, ignored, clean, unknown,
1842 listsubrepos)
1842 listsubrepos)
1843
1843
1844 def heads(self, start=None):
1844 def heads(self, start=None):
1845 heads = self.changelog.heads(start)
1845 heads = self.changelog.heads(start)
1846 # sort the output in rev descending order
1846 # sort the output in rev descending order
1847 return sorted(heads, key=self.changelog.rev, reverse=True)
1847 return sorted(heads, key=self.changelog.rev, reverse=True)
1848
1848
1849 def branchheads(self, branch=None, start=None, closed=False):
1849 def branchheads(self, branch=None, start=None, closed=False):
1850 '''return a (possibly filtered) list of heads for the given branch
1850 '''return a (possibly filtered) list of heads for the given branch
1851
1851
1852 Heads are returned in topological order, from newest to oldest.
1852 Heads are returned in topological order, from newest to oldest.
1853 If branch is None, use the dirstate branch.
1853 If branch is None, use the dirstate branch.
1854 If start is not None, return only heads reachable from start.
1854 If start is not None, return only heads reachable from start.
1855 If closed is True, return heads that are marked as closed as well.
1855 If closed is True, return heads that are marked as closed as well.
1856 '''
1856 '''
1857 if branch is None:
1857 if branch is None:
1858 branch = self[None].branch()
1858 branch = self[None].branch()
1859 branches = self.branchmap()
1859 branches = self.branchmap()
1860 if branch not in branches:
1860 if branch not in branches:
1861 return []
1861 return []
1862 # the cache returns heads ordered lowest to highest
1862 # the cache returns heads ordered lowest to highest
1863 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1863 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1864 if start is not None:
1864 if start is not None:
1865 # filter out the heads that cannot be reached from startrev
1865 # filter out the heads that cannot be reached from startrev
1866 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1866 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1867 bheads = [h for h in bheads if h in fbheads]
1867 bheads = [h for h in bheads if h in fbheads]
1868 return bheads
1868 return bheads
1869
1869
1870 def branches(self, nodes):
1870 def branches(self, nodes):
1871 if not nodes:
1871 if not nodes:
1872 nodes = [self.changelog.tip()]
1872 nodes = [self.changelog.tip()]
1873 b = []
1873 b = []
1874 for n in nodes:
1874 for n in nodes:
1875 t = n
1875 t = n
1876 while True:
1876 while True:
1877 p = self.changelog.parents(n)
1877 p = self.changelog.parents(n)
1878 if p[1] != nullid or p[0] == nullid:
1878 if p[1] != nullid or p[0] == nullid:
1879 b.append((t, n, p[0], p[1]))
1879 b.append((t, n, p[0], p[1]))
1880 break
1880 break
1881 n = p[0]
1881 n = p[0]
1882 return b
1882 return b
1883
1883
1884 def between(self, pairs):
1884 def between(self, pairs):
1885 r = []
1885 r = []
1886
1886
1887 for top, bottom in pairs:
1887 for top, bottom in pairs:
1888 n, l, i = top, [], 0
1888 n, l, i = top, [], 0
1889 f = 1
1889 f = 1
1890
1890
1891 while n != bottom and n != nullid:
1891 while n != bottom and n != nullid:
1892 p = self.changelog.parents(n)[0]
1892 p = self.changelog.parents(n)[0]
1893 if i == f:
1893 if i == f:
1894 l.append(n)
1894 l.append(n)
1895 f = f * 2
1895 f = f * 2
1896 n = p
1896 n = p
1897 i += 1
1897 i += 1
1898
1898
1899 r.append(l)
1899 r.append(l)
1900
1900
1901 return r
1901 return r
1902
1902
1903 def checkpush(self, pushop):
1903 def checkpush(self, pushop):
1904 """Extensions can override this function if additional checks have
1904 """Extensions can override this function if additional checks have
1905 to be performed before pushing, or call it if they override push
1905 to be performed before pushing, or call it if they override push
1906 command.
1906 command.
1907 """
1907 """
1908 pass
1908 pass
1909
1909
1910 @unfilteredpropertycache
1910 @unfilteredpropertycache
1911 def prepushoutgoinghooks(self):
1911 def prepushoutgoinghooks(self):
1912 """Return util.hooks consists of a pushop with repo, remote, outgoing
1912 """Return util.hooks consists of a pushop with repo, remote, outgoing
1913 methods, which are called before pushing changesets.
1913 methods, which are called before pushing changesets.
1914 """
1914 """
1915 return util.hooks()
1915 return util.hooks()
1916
1916
1917 def pushkey(self, namespace, key, old, new):
1917 def pushkey(self, namespace, key, old, new):
1918 try:
1918 try:
1919 tr = self.currenttransaction()
1919 tr = self.currenttransaction()
1920 hookargs = {}
1920 hookargs = {}
1921 if tr is not None:
1921 if tr is not None:
1922 hookargs.update(tr.hookargs)
1922 hookargs.update(tr.hookargs)
1923 hookargs['namespace'] = namespace
1923 hookargs['namespace'] = namespace
1924 hookargs['key'] = key
1924 hookargs['key'] = key
1925 hookargs['old'] = old
1925 hookargs['old'] = old
1926 hookargs['new'] = new
1926 hookargs['new'] = new
1927 self.hook('prepushkey', throw=True, **hookargs)
1927 self.hook('prepushkey', throw=True, **hookargs)
1928 except error.HookAbort as exc:
1928 except error.HookAbort as exc:
1929 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1929 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1930 if exc.hint:
1930 if exc.hint:
1931 self.ui.write_err(_("(%s)\n") % exc.hint)
1931 self.ui.write_err(_("(%s)\n") % exc.hint)
1932 return False
1932 return False
1933 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1933 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1934 ret = pushkey.push(self, namespace, key, old, new)
1934 ret = pushkey.push(self, namespace, key, old, new)
1935 def runhook():
1935 def runhook():
1936 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1936 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1937 ret=ret)
1937 ret=ret)
1938 self._afterlock(runhook)
1938 self._afterlock(runhook)
1939 return ret
1939 return ret
1940
1940
1941 def listkeys(self, namespace):
1941 def listkeys(self, namespace):
1942 self.hook('prelistkeys', throw=True, namespace=namespace)
1942 self.hook('prelistkeys', throw=True, namespace=namespace)
1943 self.ui.debug('listing keys for "%s"\n' % namespace)
1943 self.ui.debug('listing keys for "%s"\n' % namespace)
1944 values = pushkey.list(self, namespace)
1944 values = pushkey.list(self, namespace)
1945 self.hook('listkeys', namespace=namespace, values=values)
1945 self.hook('listkeys', namespace=namespace, values=values)
1946 return values
1946 return values
1947
1947
1948 def debugwireargs(self, one, two, three=None, four=None, five=None):
1948 def debugwireargs(self, one, two, three=None, four=None, five=None):
1949 '''used to test argument passing over the wire'''
1949 '''used to test argument passing over the wire'''
1950 return "%s %s %s %s %s" % (one, two, three, four, five)
1950 return "%s %s %s %s %s" % (one, two, three, four, five)
1951
1951
1952 def savecommitmessage(self, text):
1952 def savecommitmessage(self, text):
1953 fp = self.vfs('last-message.txt', 'wb')
1953 fp = self.vfs('last-message.txt', 'wb')
1954 try:
1954 try:
1955 fp.write(text)
1955 fp.write(text)
1956 finally:
1956 finally:
1957 fp.close()
1957 fp.close()
1958 return self.pathto(fp.name[len(self.root) + 1:])
1958 return self.pathto(fp.name[len(self.root) + 1:])
1959
1959
1960 # used to avoid circular references so destructors work
1960 # used to avoid circular references so destructors work
1961 def aftertrans(files):
1961 def aftertrans(files):
1962 renamefiles = [tuple(t) for t in files]
1962 renamefiles = [tuple(t) for t in files]
1963 def a():
1963 def a():
1964 for vfs, src, dest in renamefiles:
1964 for vfs, src, dest in renamefiles:
1965 try:
1965 try:
1966 vfs.rename(src, dest)
1966 vfs.rename(src, dest)
1967 except OSError: # journal file does not yet exist
1967 except OSError: # journal file does not yet exist
1968 pass
1968 pass
1969 return a
1969 return a
1970
1970
1971 def undoname(fn):
1971 def undoname(fn):
1972 base, name = os.path.split(fn)
1972 base, name = os.path.split(fn)
1973 assert name.startswith('journal')
1973 assert name.startswith('journal')
1974 return os.path.join(base, name.replace('journal', 'undo', 1))
1974 return os.path.join(base, name.replace('journal', 'undo', 1))
1975
1975
1976 def instance(ui, path, create):
1976 def instance(ui, path, create):
1977 return localrepository(ui, util.urllocalpath(path), create)
1977 return localrepository(ui, util.urllocalpath(path), create)
1978
1978
1979 def islocal(path):
1979 def islocal(path):
1980 return True
1980 return True
1981
1981
1982 def newreporequirements(repo):
1982 def newreporequirements(repo):
1983 """Determine the set of requirements for a new local repository.
1983 """Determine the set of requirements for a new local repository.
1984
1984
1985 Extensions can wrap this function to specify custom requirements for
1985 Extensions can wrap this function to specify custom requirements for
1986 new repositories.
1986 new repositories.
1987 """
1987 """
1988 ui = repo.ui
1988 ui = repo.ui
1989 requirements = set(['revlogv1'])
1989 requirements = set(['revlogv1'])
1990 if ui.configbool('format', 'usestore', True):
1990 if ui.configbool('format', 'usestore', True):
1991 requirements.add('store')
1991 requirements.add('store')
1992 if ui.configbool('format', 'usefncache', True):
1992 if ui.configbool('format', 'usefncache', True):
1993 requirements.add('fncache')
1993 requirements.add('fncache')
1994 if ui.configbool('format', 'dotencode', True):
1994 if ui.configbool('format', 'dotencode', True):
1995 requirements.add('dotencode')
1995 requirements.add('dotencode')
1996
1996
1997 if scmutil.gdinitconfig(ui):
1997 if scmutil.gdinitconfig(ui):
1998 requirements.add('generaldelta')
1998 requirements.add('generaldelta')
1999 if ui.configbool('experimental', 'treemanifest', False):
1999 if ui.configbool('experimental', 'treemanifest', False):
2000 requirements.add('treemanifest')
2000 requirements.add('treemanifest')
2001 if ui.configbool('experimental', 'manifestv2', False):
2001 if ui.configbool('experimental', 'manifestv2', False):
2002 requirements.add('manifestv2')
2002 requirements.add('manifestv2')
2003
2003
2004 return requirements
2004 return requirements
@@ -1,481 +1,481
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 from __future__ import absolute_import
103 from __future__ import absolute_import
104
104
105 import errno
105 import errno
106 import os
107
106
108 from .i18n import _
107 from .i18n import _
109 from .node import (
108 from .node import (
110 bin,
109 bin,
111 hex,
110 hex,
112 nullid,
111 nullid,
113 nullrev,
112 nullrev,
114 short,
113 short,
115 )
114 )
116 from . import (
115 from . import (
116 encoding,
117 error,
117 error,
118 )
118 )
119
119
120 allphases = public, draft, secret = range(3)
120 allphases = public, draft, secret = range(3)
121 trackedphases = allphases[1:]
121 trackedphases = allphases[1:]
122 phasenames = ['public', 'draft', 'secret']
122 phasenames = ['public', 'draft', 'secret']
123
123
124 def _readroots(repo, phasedefaults=None):
124 def _readroots(repo, phasedefaults=None):
125 """Read phase roots from disk
125 """Read phase roots from disk
126
126
127 phasedefaults is a list of fn(repo, roots) callable, which are
127 phasedefaults is a list of fn(repo, roots) callable, which are
128 executed if the phase roots file does not exist. When phases are
128 executed if the phase roots file does not exist. When phases are
129 being initialized on an existing repository, this could be used to
129 being initialized on an existing repository, this could be used to
130 set selected changesets phase to something else than public.
130 set selected changesets phase to something else than public.
131
131
132 Return (roots, dirty) where dirty is true if roots differ from
132 Return (roots, dirty) where dirty is true if roots differ from
133 what is being stored.
133 what is being stored.
134 """
134 """
135 repo = repo.unfiltered()
135 repo = repo.unfiltered()
136 dirty = False
136 dirty = False
137 roots = [set() for i in allphases]
137 roots = [set() for i in allphases]
138 try:
138 try:
139 f = None
139 f = None
140 if 'HG_PENDING' in os.environ:
140 if 'HG_PENDING' in encoding.environ:
141 try:
141 try:
142 f = repo.svfs('phaseroots.pending')
142 f = repo.svfs('phaseroots.pending')
143 except IOError as inst:
143 except IOError as inst:
144 if inst.errno != errno.ENOENT:
144 if inst.errno != errno.ENOENT:
145 raise
145 raise
146 if f is None:
146 if f is None:
147 f = repo.svfs('phaseroots')
147 f = repo.svfs('phaseroots')
148 try:
148 try:
149 for line in f:
149 for line in f:
150 phase, nh = line.split()
150 phase, nh = line.split()
151 roots[int(phase)].add(bin(nh))
151 roots[int(phase)].add(bin(nh))
152 finally:
152 finally:
153 f.close()
153 f.close()
154 except IOError as inst:
154 except IOError as inst:
155 if inst.errno != errno.ENOENT:
155 if inst.errno != errno.ENOENT:
156 raise
156 raise
157 if phasedefaults:
157 if phasedefaults:
158 for f in phasedefaults:
158 for f in phasedefaults:
159 roots = f(repo, roots)
159 roots = f(repo, roots)
160 dirty = True
160 dirty = True
161 return roots, dirty
161 return roots, dirty
162
162
163 class phasecache(object):
163 class phasecache(object):
164 def __init__(self, repo, phasedefaults, _load=True):
164 def __init__(self, repo, phasedefaults, _load=True):
165 if _load:
165 if _load:
166 # Cheap trick to allow shallow-copy without copy module
166 # Cheap trick to allow shallow-copy without copy module
167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
168 self._phaserevs = None
168 self._phaserevs = None
169 self._phasesets = None
169 self._phasesets = None
170 self.filterunknown(repo)
170 self.filterunknown(repo)
171 self.opener = repo.svfs
171 self.opener = repo.svfs
172
172
173 def copy(self):
173 def copy(self):
174 # Shallow copy meant to ensure isolation in
174 # Shallow copy meant to ensure isolation in
175 # advance/retractboundary(), nothing more.
175 # advance/retractboundary(), nothing more.
176 ph = self.__class__(None, None, _load=False)
176 ph = self.__class__(None, None, _load=False)
177 ph.phaseroots = self.phaseroots[:]
177 ph.phaseroots = self.phaseroots[:]
178 ph.dirty = self.dirty
178 ph.dirty = self.dirty
179 ph.opener = self.opener
179 ph.opener = self.opener
180 ph._phaserevs = self._phaserevs
180 ph._phaserevs = self._phaserevs
181 ph._phasesets = self._phasesets
181 ph._phasesets = self._phasesets
182 return ph
182 return ph
183
183
184 def replace(self, phcache):
184 def replace(self, phcache):
185 """replace all values in 'self' with content of phcache"""
185 """replace all values in 'self' with content of phcache"""
186 for a in ('phaseroots', 'dirty', 'opener', '_phaserevs', '_phasesets'):
186 for a in ('phaseroots', 'dirty', 'opener', '_phaserevs', '_phasesets'):
187 setattr(self, a, getattr(phcache, a))
187 setattr(self, a, getattr(phcache, a))
188
188
189 def _getphaserevsnative(self, repo):
189 def _getphaserevsnative(self, repo):
190 repo = repo.unfiltered()
190 repo = repo.unfiltered()
191 nativeroots = []
191 nativeroots = []
192 for phase in trackedphases:
192 for phase in trackedphases:
193 nativeroots.append(map(repo.changelog.rev, self.phaseroots[phase]))
193 nativeroots.append(map(repo.changelog.rev, self.phaseroots[phase]))
194 return repo.changelog.computephases(nativeroots)
194 return repo.changelog.computephases(nativeroots)
195
195
196 def _computephaserevspure(self, repo):
196 def _computephaserevspure(self, repo):
197 repo = repo.unfiltered()
197 repo = repo.unfiltered()
198 revs = [public] * len(repo.changelog)
198 revs = [public] * len(repo.changelog)
199 self._phaserevs = revs
199 self._phaserevs = revs
200 self._populatephaseroots(repo)
200 self._populatephaseroots(repo)
201 for phase in trackedphases:
201 for phase in trackedphases:
202 roots = map(repo.changelog.rev, self.phaseroots[phase])
202 roots = map(repo.changelog.rev, self.phaseroots[phase])
203 if roots:
203 if roots:
204 for rev in roots:
204 for rev in roots:
205 revs[rev] = phase
205 revs[rev] = phase
206 for rev in repo.changelog.descendants(roots):
206 for rev in repo.changelog.descendants(roots):
207 revs[rev] = phase
207 revs[rev] = phase
208
208
209 def loadphaserevs(self, repo):
209 def loadphaserevs(self, repo):
210 """ensure phase information is loaded in the object"""
210 """ensure phase information is loaded in the object"""
211 if self._phaserevs is None:
211 if self._phaserevs is None:
212 try:
212 try:
213 if repo.ui.configbool('experimental',
213 if repo.ui.configbool('experimental',
214 'nativephaseskillswitch'):
214 'nativephaseskillswitch'):
215 self._computephaserevspure(repo)
215 self._computephaserevspure(repo)
216 else:
216 else:
217 res = self._getphaserevsnative(repo)
217 res = self._getphaserevsnative(repo)
218 self._phaserevs, self._phasesets = res
218 self._phaserevs, self._phasesets = res
219 except AttributeError:
219 except AttributeError:
220 self._computephaserevspure(repo)
220 self._computephaserevspure(repo)
221
221
222 def invalidate(self):
222 def invalidate(self):
223 self._phaserevs = None
223 self._phaserevs = None
224 self._phasesets = None
224 self._phasesets = None
225
225
226 def _populatephaseroots(self, repo):
226 def _populatephaseroots(self, repo):
227 """Fills the _phaserevs cache with phases for the roots.
227 """Fills the _phaserevs cache with phases for the roots.
228 """
228 """
229 cl = repo.changelog
229 cl = repo.changelog
230 phaserevs = self._phaserevs
230 phaserevs = self._phaserevs
231 for phase in trackedphases:
231 for phase in trackedphases:
232 roots = map(cl.rev, self.phaseroots[phase])
232 roots = map(cl.rev, self.phaseroots[phase])
233 for root in roots:
233 for root in roots:
234 phaserevs[root] = phase
234 phaserevs[root] = phase
235
235
236 def phase(self, repo, rev):
236 def phase(self, repo, rev):
237 # We need a repo argument here to be able to build _phaserevs
237 # We need a repo argument here to be able to build _phaserevs
238 # if necessary. The repository instance is not stored in
238 # if necessary. The repository instance is not stored in
239 # phasecache to avoid reference cycles. The changelog instance
239 # phasecache to avoid reference cycles. The changelog instance
240 # is not stored because it is a filecache() property and can
240 # is not stored because it is a filecache() property and can
241 # be replaced without us being notified.
241 # be replaced without us being notified.
242 if rev == nullrev:
242 if rev == nullrev:
243 return public
243 return public
244 if rev < nullrev:
244 if rev < nullrev:
245 raise ValueError(_('cannot lookup negative revision'))
245 raise ValueError(_('cannot lookup negative revision'))
246 if self._phaserevs is None or rev >= len(self._phaserevs):
246 if self._phaserevs is None or rev >= len(self._phaserevs):
247 self.invalidate()
247 self.invalidate()
248 self.loadphaserevs(repo)
248 self.loadphaserevs(repo)
249 return self._phaserevs[rev]
249 return self._phaserevs[rev]
250
250
251 def write(self):
251 def write(self):
252 if not self.dirty:
252 if not self.dirty:
253 return
253 return
254 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
254 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
255 try:
255 try:
256 self._write(f)
256 self._write(f)
257 finally:
257 finally:
258 f.close()
258 f.close()
259
259
260 def _write(self, fp):
260 def _write(self, fp):
261 for phase, roots in enumerate(self.phaseroots):
261 for phase, roots in enumerate(self.phaseroots):
262 for h in roots:
262 for h in roots:
263 fp.write('%i %s\n' % (phase, hex(h)))
263 fp.write('%i %s\n' % (phase, hex(h)))
264 self.dirty = False
264 self.dirty = False
265
265
266 def _updateroots(self, phase, newroots, tr):
266 def _updateroots(self, phase, newroots, tr):
267 self.phaseroots[phase] = newroots
267 self.phaseroots[phase] = newroots
268 self.invalidate()
268 self.invalidate()
269 self.dirty = True
269 self.dirty = True
270
270
271 tr.addfilegenerator('phase', ('phaseroots',), self._write)
271 tr.addfilegenerator('phase', ('phaseroots',), self._write)
272 tr.hookargs['phases_moved'] = '1'
272 tr.hookargs['phases_moved'] = '1'
273
273
274 def advanceboundary(self, repo, tr, targetphase, nodes):
274 def advanceboundary(self, repo, tr, targetphase, nodes):
275 # Be careful to preserve shallow-copied values: do not update
275 # Be careful to preserve shallow-copied values: do not update
276 # phaseroots values, replace them.
276 # phaseroots values, replace them.
277
277
278 repo = repo.unfiltered()
278 repo = repo.unfiltered()
279 delroots = [] # set of root deleted by this path
279 delroots = [] # set of root deleted by this path
280 for phase in xrange(targetphase + 1, len(allphases)):
280 for phase in xrange(targetphase + 1, len(allphases)):
281 # filter nodes that are not in a compatible phase already
281 # filter nodes that are not in a compatible phase already
282 nodes = [n for n in nodes
282 nodes = [n for n in nodes
283 if self.phase(repo, repo[n].rev()) >= phase]
283 if self.phase(repo, repo[n].rev()) >= phase]
284 if not nodes:
284 if not nodes:
285 break # no roots to move anymore
285 break # no roots to move anymore
286 olds = self.phaseroots[phase]
286 olds = self.phaseroots[phase]
287 roots = set(ctx.node() for ctx in repo.set(
287 roots = set(ctx.node() for ctx in repo.set(
288 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
288 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
289 if olds != roots:
289 if olds != roots:
290 self._updateroots(phase, roots, tr)
290 self._updateroots(phase, roots, tr)
291 # some roots may need to be declared for lower phases
291 # some roots may need to be declared for lower phases
292 delroots.extend(olds - roots)
292 delroots.extend(olds - roots)
293 # declare deleted root in the target phase
293 # declare deleted root in the target phase
294 if targetphase != 0:
294 if targetphase != 0:
295 self.retractboundary(repo, tr, targetphase, delroots)
295 self.retractboundary(repo, tr, targetphase, delroots)
296 repo.invalidatevolatilesets()
296 repo.invalidatevolatilesets()
297
297
298 def retractboundary(self, repo, tr, targetphase, nodes):
298 def retractboundary(self, repo, tr, targetphase, nodes):
299 # Be careful to preserve shallow-copied values: do not update
299 # Be careful to preserve shallow-copied values: do not update
300 # phaseroots values, replace them.
300 # phaseroots values, replace them.
301
301
302 repo = repo.unfiltered()
302 repo = repo.unfiltered()
303 currentroots = self.phaseroots[targetphase]
303 currentroots = self.phaseroots[targetphase]
304 newroots = [n for n in nodes
304 newroots = [n for n in nodes
305 if self.phase(repo, repo[n].rev()) < targetphase]
305 if self.phase(repo, repo[n].rev()) < targetphase]
306 if newroots:
306 if newroots:
307 if nullid in newroots:
307 if nullid in newroots:
308 raise error.Abort(_('cannot change null revision phase'))
308 raise error.Abort(_('cannot change null revision phase'))
309 currentroots = currentroots.copy()
309 currentroots = currentroots.copy()
310 currentroots.update(newroots)
310 currentroots.update(newroots)
311
311
312 # Only compute new roots for revs above the roots that are being
312 # Only compute new roots for revs above the roots that are being
313 # retracted.
313 # retracted.
314 minnewroot = min(repo[n].rev() for n in newroots)
314 minnewroot = min(repo[n].rev() for n in newroots)
315 aboveroots = [n for n in currentroots
315 aboveroots = [n for n in currentroots
316 if repo[n].rev() >= minnewroot]
316 if repo[n].rev() >= minnewroot]
317 updatedroots = repo.set('roots(%ln::)', aboveroots)
317 updatedroots = repo.set('roots(%ln::)', aboveroots)
318
318
319 finalroots = set(n for n in currentroots if repo[n].rev() <
319 finalroots = set(n for n in currentroots if repo[n].rev() <
320 minnewroot)
320 minnewroot)
321 finalroots.update(ctx.node() for ctx in updatedroots)
321 finalroots.update(ctx.node() for ctx in updatedroots)
322
322
323 self._updateroots(targetphase, finalroots, tr)
323 self._updateroots(targetphase, finalroots, tr)
324 repo.invalidatevolatilesets()
324 repo.invalidatevolatilesets()
325
325
326 def filterunknown(self, repo):
326 def filterunknown(self, repo):
327 """remove unknown nodes from the phase boundary
327 """remove unknown nodes from the phase boundary
328
328
329 Nothing is lost as unknown nodes only hold data for their descendants.
329 Nothing is lost as unknown nodes only hold data for their descendants.
330 """
330 """
331 filtered = False
331 filtered = False
332 nodemap = repo.changelog.nodemap # to filter unknown nodes
332 nodemap = repo.changelog.nodemap # to filter unknown nodes
333 for phase, nodes in enumerate(self.phaseroots):
333 for phase, nodes in enumerate(self.phaseroots):
334 missing = sorted(node for node in nodes if node not in nodemap)
334 missing = sorted(node for node in nodes if node not in nodemap)
335 if missing:
335 if missing:
336 for mnode in missing:
336 for mnode in missing:
337 repo.ui.debug(
337 repo.ui.debug(
338 'removing unknown node %s from %i-phase boundary\n'
338 'removing unknown node %s from %i-phase boundary\n'
339 % (short(mnode), phase))
339 % (short(mnode), phase))
340 nodes.symmetric_difference_update(missing)
340 nodes.symmetric_difference_update(missing)
341 filtered = True
341 filtered = True
342 if filtered:
342 if filtered:
343 self.dirty = True
343 self.dirty = True
344 # filterunknown is called by repo.destroyed, we may have no changes in
344 # filterunknown is called by repo.destroyed, we may have no changes in
345 # root but phaserevs contents is certainly invalid (or at least we
345 # root but phaserevs contents is certainly invalid (or at least we
346 # have not proper way to check that). related to issue 3858.
346 # have not proper way to check that). related to issue 3858.
347 #
347 #
348 # The other caller is __init__ that have no _phaserevs initialized
348 # The other caller is __init__ that have no _phaserevs initialized
349 # anyway. If this change we should consider adding a dedicated
349 # anyway. If this change we should consider adding a dedicated
350 # "destroyed" function to phasecache or a proper cache key mechanism
350 # "destroyed" function to phasecache or a proper cache key mechanism
351 # (see branchmap one)
351 # (see branchmap one)
352 self.invalidate()
352 self.invalidate()
353
353
354 def advanceboundary(repo, tr, targetphase, nodes):
354 def advanceboundary(repo, tr, targetphase, nodes):
355 """Add nodes to a phase changing other nodes phases if necessary.
355 """Add nodes to a phase changing other nodes phases if necessary.
356
356
357 This function move boundary *forward* this means that all nodes
357 This function move boundary *forward* this means that all nodes
358 are set in the target phase or kept in a *lower* phase.
358 are set in the target phase or kept in a *lower* phase.
359
359
360 Simplify boundary to contains phase roots only."""
360 Simplify boundary to contains phase roots only."""
361 phcache = repo._phasecache.copy()
361 phcache = repo._phasecache.copy()
362 phcache.advanceboundary(repo, tr, targetphase, nodes)
362 phcache.advanceboundary(repo, tr, targetphase, nodes)
363 repo._phasecache.replace(phcache)
363 repo._phasecache.replace(phcache)
364
364
365 def retractboundary(repo, tr, targetphase, nodes):
365 def retractboundary(repo, tr, targetphase, nodes):
366 """Set nodes back to a phase changing other nodes phases if
366 """Set nodes back to a phase changing other nodes phases if
367 necessary.
367 necessary.
368
368
369 This function move boundary *backward* this means that all nodes
369 This function move boundary *backward* this means that all nodes
370 are set in the target phase or kept in a *higher* phase.
370 are set in the target phase or kept in a *higher* phase.
371
371
372 Simplify boundary to contains phase roots only."""
372 Simplify boundary to contains phase roots only."""
373 phcache = repo._phasecache.copy()
373 phcache = repo._phasecache.copy()
374 phcache.retractboundary(repo, tr, targetphase, nodes)
374 phcache.retractboundary(repo, tr, targetphase, nodes)
375 repo._phasecache.replace(phcache)
375 repo._phasecache.replace(phcache)
376
376
377 def listphases(repo):
377 def listphases(repo):
378 """List phases root for serialization over pushkey"""
378 """List phases root for serialization over pushkey"""
379 keys = {}
379 keys = {}
380 value = '%i' % draft
380 value = '%i' % draft
381 for root in repo._phasecache.phaseroots[draft]:
381 for root in repo._phasecache.phaseroots[draft]:
382 keys[hex(root)] = value
382 keys[hex(root)] = value
383
383
384 if repo.publishing():
384 if repo.publishing():
385 # Add an extra data to let remote know we are a publishing
385 # Add an extra data to let remote know we are a publishing
386 # repo. Publishing repo can't just pretend they are old repo.
386 # repo. Publishing repo can't just pretend they are old repo.
387 # When pushing to a publishing repo, the client still need to
387 # When pushing to a publishing repo, the client still need to
388 # push phase boundary
388 # push phase boundary
389 #
389 #
390 # Push do not only push changeset. It also push phase data.
390 # Push do not only push changeset. It also push phase data.
391 # New phase data may apply to common changeset which won't be
391 # New phase data may apply to common changeset which won't be
392 # push (as they are common). Here is a very simple example:
392 # push (as they are common). Here is a very simple example:
393 #
393 #
394 # 1) repo A push changeset X as draft to repo B
394 # 1) repo A push changeset X as draft to repo B
395 # 2) repo B make changeset X public
395 # 2) repo B make changeset X public
396 # 3) repo B push to repo A. X is not pushed but the data that
396 # 3) repo B push to repo A. X is not pushed but the data that
397 # X as now public should
397 # X as now public should
398 #
398 #
399 # The server can't handle it on it's own as it has no idea of
399 # The server can't handle it on it's own as it has no idea of
400 # client phase data.
400 # client phase data.
401 keys['publishing'] = 'True'
401 keys['publishing'] = 'True'
402 return keys
402 return keys
403
403
404 def pushphase(repo, nhex, oldphasestr, newphasestr):
404 def pushphase(repo, nhex, oldphasestr, newphasestr):
405 """List phases root for serialization over pushkey"""
405 """List phases root for serialization over pushkey"""
406 repo = repo.unfiltered()
406 repo = repo.unfiltered()
407 with repo.lock():
407 with repo.lock():
408 currentphase = repo[nhex].phase()
408 currentphase = repo[nhex].phase()
409 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
409 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
410 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
410 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
411 if currentphase == oldphase and newphase < oldphase:
411 if currentphase == oldphase and newphase < oldphase:
412 with repo.transaction('pushkey-phase') as tr:
412 with repo.transaction('pushkey-phase') as tr:
413 advanceboundary(repo, tr, newphase, [bin(nhex)])
413 advanceboundary(repo, tr, newphase, [bin(nhex)])
414 return 1
414 return 1
415 elif currentphase == newphase:
415 elif currentphase == newphase:
416 # raced, but got correct result
416 # raced, but got correct result
417 return 1
417 return 1
418 else:
418 else:
419 return 0
419 return 0
420
420
421 def analyzeremotephases(repo, subset, roots):
421 def analyzeremotephases(repo, subset, roots):
422 """Compute phases heads and root in a subset of node from root dict
422 """Compute phases heads and root in a subset of node from root dict
423
423
424 * subset is heads of the subset
424 * subset is heads of the subset
425 * roots is {<nodeid> => phase} mapping. key and value are string.
425 * roots is {<nodeid> => phase} mapping. key and value are string.
426
426
427 Accept unknown element input
427 Accept unknown element input
428 """
428 """
429 repo = repo.unfiltered()
429 repo = repo.unfiltered()
430 # build list from dictionary
430 # build list from dictionary
431 draftroots = []
431 draftroots = []
432 nodemap = repo.changelog.nodemap # to filter unknown nodes
432 nodemap = repo.changelog.nodemap # to filter unknown nodes
433 for nhex, phase in roots.iteritems():
433 for nhex, phase in roots.iteritems():
434 if nhex == 'publishing': # ignore data related to publish option
434 if nhex == 'publishing': # ignore data related to publish option
435 continue
435 continue
436 node = bin(nhex)
436 node = bin(nhex)
437 phase = int(phase)
437 phase = int(phase)
438 if phase == public:
438 if phase == public:
439 if node != nullid:
439 if node != nullid:
440 repo.ui.warn(_('ignoring inconsistent public root'
440 repo.ui.warn(_('ignoring inconsistent public root'
441 ' from remote: %s\n') % nhex)
441 ' from remote: %s\n') % nhex)
442 elif phase == draft:
442 elif phase == draft:
443 if node in nodemap:
443 if node in nodemap:
444 draftroots.append(node)
444 draftroots.append(node)
445 else:
445 else:
446 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
446 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
447 % (phase, nhex))
447 % (phase, nhex))
448 # compute heads
448 # compute heads
449 publicheads = newheads(repo, subset, draftroots)
449 publicheads = newheads(repo, subset, draftroots)
450 return publicheads, draftroots
450 return publicheads, draftroots
451
451
452 def newheads(repo, heads, roots):
452 def newheads(repo, heads, roots):
453 """compute new head of a subset minus another
453 """compute new head of a subset minus another
454
454
455 * `heads`: define the first subset
455 * `heads`: define the first subset
456 * `roots`: define the second we subtract from the first"""
456 * `roots`: define the second we subtract from the first"""
457 repo = repo.unfiltered()
457 repo = repo.unfiltered()
458 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
458 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
459 heads, roots, roots, heads)
459 heads, roots, roots, heads)
460 return [c.node() for c in revset]
460 return [c.node() for c in revset]
461
461
462
462
463 def newcommitphase(ui):
463 def newcommitphase(ui):
464 """helper to get the target phase of new commit
464 """helper to get the target phase of new commit
465
465
466 Handle all possible values for the phases.new-commit options.
466 Handle all possible values for the phases.new-commit options.
467
467
468 """
468 """
469 v = ui.config('phases', 'new-commit', draft)
469 v = ui.config('phases', 'new-commit', draft)
470 try:
470 try:
471 return phasenames.index(v)
471 return phasenames.index(v)
472 except ValueError:
472 except ValueError:
473 try:
473 try:
474 return int(v)
474 return int(v)
475 except ValueError:
475 except ValueError:
476 msg = _("phases.new-commit: not a valid phase name ('%s')")
476 msg = _("phases.new-commit: not a valid phase name ('%s')")
477 raise error.ConfigError(msg % v)
477 raise error.ConfigError(msg % v)
478
478
479 def hassecret(repo):
479 def hassecret(repo):
480 """utility function that check if a repo have any secret changeset."""
480 """utility function that check if a repo have any secret changeset."""
481 return bool(repo._phasecache.phaseroots[2])
481 return bool(repo._phasecache.phaseroots[2])
@@ -1,652 +1,652
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import fcntl
11 import fcntl
12 import getpass
12 import getpass
13 import grp
13 import grp
14 import os
14 import os
15 import pwd
15 import pwd
16 import re
16 import re
17 import select
17 import select
18 import stat
18 import stat
19 import sys
19 import sys
20 import tempfile
20 import tempfile
21 import unicodedata
21 import unicodedata
22
22
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 pycompat,
26 pycompat,
27 )
27 )
28
28
29 posixfile = open
29 posixfile = open
30 normpath = os.path.normpath
30 normpath = os.path.normpath
31 samestat = os.path.samestat
31 samestat = os.path.samestat
32 try:
32 try:
33 oslink = os.link
33 oslink = os.link
34 except AttributeError:
34 except AttributeError:
35 # Some platforms build Python without os.link on systems that are
35 # Some platforms build Python without os.link on systems that are
36 # vaguely unix-like but don't have hardlink support. For those
36 # vaguely unix-like but don't have hardlink support. For those
37 # poor souls, just say we tried and that it failed so we fall back
37 # poor souls, just say we tried and that it failed so we fall back
38 # to copies.
38 # to copies.
39 def oslink(src, dst):
39 def oslink(src, dst):
40 raise OSError(errno.EINVAL,
40 raise OSError(errno.EINVAL,
41 'hardlinks not supported: %s to %s' % (src, dst))
41 'hardlinks not supported: %s to %s' % (src, dst))
42 unlink = os.unlink
42 unlink = os.unlink
43 rename = os.rename
43 rename = os.rename
44 removedirs = os.removedirs
44 removedirs = os.removedirs
45 expandglobs = False
45 expandglobs = False
46
46
47 umask = os.umask(0)
47 umask = os.umask(0)
48 os.umask(umask)
48 os.umask(umask)
49
49
50 def split(p):
50 def split(p):
51 '''Same as posixpath.split, but faster
51 '''Same as posixpath.split, but faster
52
52
53 >>> import posixpath
53 >>> import posixpath
54 >>> for f in ['/absolute/path/to/file',
54 >>> for f in ['/absolute/path/to/file',
55 ... 'relative/path/to/file',
55 ... 'relative/path/to/file',
56 ... 'file_alone',
56 ... 'file_alone',
57 ... 'path/to/directory/',
57 ... 'path/to/directory/',
58 ... '/multiple/path//separators',
58 ... '/multiple/path//separators',
59 ... '/file_at_root',
59 ... '/file_at_root',
60 ... '///multiple_leading_separators_at_root',
60 ... '///multiple_leading_separators_at_root',
61 ... '']:
61 ... '']:
62 ... assert split(f) == posixpath.split(f), f
62 ... assert split(f) == posixpath.split(f), f
63 '''
63 '''
64 ht = p.rsplit('/', 1)
64 ht = p.rsplit('/', 1)
65 if len(ht) == 1:
65 if len(ht) == 1:
66 return '', p
66 return '', p
67 nh = ht[0].rstrip('/')
67 nh = ht[0].rstrip('/')
68 if nh:
68 if nh:
69 return nh, ht[1]
69 return nh, ht[1]
70 return ht[0] + '/', ht[1]
70 return ht[0] + '/', ht[1]
71
71
72 def openhardlinks():
72 def openhardlinks():
73 '''return true if it is safe to hold open file handles to hardlinks'''
73 '''return true if it is safe to hold open file handles to hardlinks'''
74 return True
74 return True
75
75
76 def nlinks(name):
76 def nlinks(name):
77 '''return number of hardlinks for the given file'''
77 '''return number of hardlinks for the given file'''
78 return os.lstat(name).st_nlink
78 return os.lstat(name).st_nlink
79
79
80 def parsepatchoutput(output_line):
80 def parsepatchoutput(output_line):
81 """parses the output produced by patch and returns the filename"""
81 """parses the output produced by patch and returns the filename"""
82 pf = output_line[14:]
82 pf = output_line[14:]
83 if os.sys.platform == 'OpenVMS':
83 if os.sys.platform == 'OpenVMS':
84 if pf[0] == '`':
84 if pf[0] == '`':
85 pf = pf[1:-1] # Remove the quotes
85 pf = pf[1:-1] # Remove the quotes
86 else:
86 else:
87 if pf.startswith("'") and pf.endswith("'") and " " in pf:
87 if pf.startswith("'") and pf.endswith("'") and " " in pf:
88 pf = pf[1:-1] # Remove the quotes
88 pf = pf[1:-1] # Remove the quotes
89 return pf
89 return pf
90
90
91 def sshargs(sshcmd, host, user, port):
91 def sshargs(sshcmd, host, user, port):
92 '''Build argument list for ssh'''
92 '''Build argument list for ssh'''
93 args = user and ("%s@%s" % (user, host)) or host
93 args = user and ("%s@%s" % (user, host)) or host
94 return port and ("%s -p %s" % (args, port)) or args
94 return port and ("%s -p %s" % (args, port)) or args
95
95
96 def isexec(f):
96 def isexec(f):
97 """check whether a file is executable"""
97 """check whether a file is executable"""
98 return (os.lstat(f).st_mode & 0o100 != 0)
98 return (os.lstat(f).st_mode & 0o100 != 0)
99
99
100 def setflags(f, l, x):
100 def setflags(f, l, x):
101 s = os.lstat(f).st_mode
101 s = os.lstat(f).st_mode
102 if l:
102 if l:
103 if not stat.S_ISLNK(s):
103 if not stat.S_ISLNK(s):
104 # switch file to link
104 # switch file to link
105 fp = open(f)
105 fp = open(f)
106 data = fp.read()
106 data = fp.read()
107 fp.close()
107 fp.close()
108 os.unlink(f)
108 os.unlink(f)
109 try:
109 try:
110 os.symlink(data, f)
110 os.symlink(data, f)
111 except OSError:
111 except OSError:
112 # failed to make a link, rewrite file
112 # failed to make a link, rewrite file
113 fp = open(f, "w")
113 fp = open(f, "w")
114 fp.write(data)
114 fp.write(data)
115 fp.close()
115 fp.close()
116 # no chmod needed at this point
116 # no chmod needed at this point
117 return
117 return
118 if stat.S_ISLNK(s):
118 if stat.S_ISLNK(s):
119 # switch link to file
119 # switch link to file
120 data = os.readlink(f)
120 data = os.readlink(f)
121 os.unlink(f)
121 os.unlink(f)
122 fp = open(f, "w")
122 fp = open(f, "w")
123 fp.write(data)
123 fp.write(data)
124 fp.close()
124 fp.close()
125 s = 0o666 & ~umask # avoid restatting for chmod
125 s = 0o666 & ~umask # avoid restatting for chmod
126
126
127 sx = s & 0o100
127 sx = s & 0o100
128 if x and not sx:
128 if x and not sx:
129 # Turn on +x for every +r bit when making a file executable
129 # Turn on +x for every +r bit when making a file executable
130 # and obey umask.
130 # and obey umask.
131 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
131 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
132 elif not x and sx:
132 elif not x and sx:
133 # Turn off all +x bits
133 # Turn off all +x bits
134 os.chmod(f, s & 0o666)
134 os.chmod(f, s & 0o666)
135
135
136 def copymode(src, dst, mode=None):
136 def copymode(src, dst, mode=None):
137 '''Copy the file mode from the file at path src to dst.
137 '''Copy the file mode from the file at path src to dst.
138 If src doesn't exist, we're using mode instead. If mode is None, we're
138 If src doesn't exist, we're using mode instead. If mode is None, we're
139 using umask.'''
139 using umask.'''
140 try:
140 try:
141 st_mode = os.lstat(src).st_mode & 0o777
141 st_mode = os.lstat(src).st_mode & 0o777
142 except OSError as inst:
142 except OSError as inst:
143 if inst.errno != errno.ENOENT:
143 if inst.errno != errno.ENOENT:
144 raise
144 raise
145 st_mode = mode
145 st_mode = mode
146 if st_mode is None:
146 if st_mode is None:
147 st_mode = ~umask
147 st_mode = ~umask
148 st_mode &= 0o666
148 st_mode &= 0o666
149 os.chmod(dst, st_mode)
149 os.chmod(dst, st_mode)
150
150
151 def checkexec(path):
151 def checkexec(path):
152 """
152 """
153 Check whether the given path is on a filesystem with UNIX-like exec flags
153 Check whether the given path is on a filesystem with UNIX-like exec flags
154
154
155 Requires a directory (like /foo/.hg)
155 Requires a directory (like /foo/.hg)
156 """
156 """
157
157
158 # VFAT on some Linux versions can flip mode but it doesn't persist
158 # VFAT on some Linux versions can flip mode but it doesn't persist
159 # a FS remount. Frequently we can detect it if files are created
159 # a FS remount. Frequently we can detect it if files are created
160 # with exec bit on.
160 # with exec bit on.
161
161
162 try:
162 try:
163 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
163 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
164 cachedir = os.path.join(path, '.hg', 'cache')
164 cachedir = os.path.join(path, '.hg', 'cache')
165 if os.path.isdir(cachedir):
165 if os.path.isdir(cachedir):
166 checkisexec = os.path.join(cachedir, 'checkisexec')
166 checkisexec = os.path.join(cachedir, 'checkisexec')
167 checknoexec = os.path.join(cachedir, 'checknoexec')
167 checknoexec = os.path.join(cachedir, 'checknoexec')
168
168
169 try:
169 try:
170 m = os.stat(checkisexec).st_mode
170 m = os.stat(checkisexec).st_mode
171 except OSError as e:
171 except OSError as e:
172 if e.errno != errno.ENOENT:
172 if e.errno != errno.ENOENT:
173 raise
173 raise
174 # checkisexec does not exist - fall through ...
174 # checkisexec does not exist - fall through ...
175 else:
175 else:
176 # checkisexec exists, check if it actually is exec
176 # checkisexec exists, check if it actually is exec
177 if m & EXECFLAGS != 0:
177 if m & EXECFLAGS != 0:
178 # ensure checkisexec exists, check it isn't exec
178 # ensure checkisexec exists, check it isn't exec
179 try:
179 try:
180 m = os.stat(checknoexec).st_mode
180 m = os.stat(checknoexec).st_mode
181 except OSError as e:
181 except OSError as e:
182 if e.errno != errno.ENOENT:
182 if e.errno != errno.ENOENT:
183 raise
183 raise
184 file(checknoexec, 'w').close() # might fail
184 file(checknoexec, 'w').close() # might fail
185 m = os.stat(checknoexec).st_mode
185 m = os.stat(checknoexec).st_mode
186 if m & EXECFLAGS == 0:
186 if m & EXECFLAGS == 0:
187 # check-exec is exec and check-no-exec is not exec
187 # check-exec is exec and check-no-exec is not exec
188 return True
188 return True
189 # checknoexec exists but is exec - delete it
189 # checknoexec exists but is exec - delete it
190 os.unlink(checknoexec)
190 os.unlink(checknoexec)
191 # checkisexec exists but is not exec - delete it
191 # checkisexec exists but is not exec - delete it
192 os.unlink(checkisexec)
192 os.unlink(checkisexec)
193
193
194 # check using one file, leave it as checkisexec
194 # check using one file, leave it as checkisexec
195 checkdir = cachedir
195 checkdir = cachedir
196 else:
196 else:
197 # check directly in path and don't leave checkisexec behind
197 # check directly in path and don't leave checkisexec behind
198 checkdir = path
198 checkdir = path
199 checkisexec = None
199 checkisexec = None
200 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
200 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
201 try:
201 try:
202 os.close(fh)
202 os.close(fh)
203 m = os.stat(fn).st_mode
203 m = os.stat(fn).st_mode
204 if m & EXECFLAGS == 0:
204 if m & EXECFLAGS == 0:
205 os.chmod(fn, m & 0o777 | EXECFLAGS)
205 os.chmod(fn, m & 0o777 | EXECFLAGS)
206 if os.stat(fn).st_mode & EXECFLAGS != 0:
206 if os.stat(fn).st_mode & EXECFLAGS != 0:
207 if checkisexec is not None:
207 if checkisexec is not None:
208 os.rename(fn, checkisexec)
208 os.rename(fn, checkisexec)
209 fn = None
209 fn = None
210 return True
210 return True
211 finally:
211 finally:
212 if fn is not None:
212 if fn is not None:
213 os.unlink(fn)
213 os.unlink(fn)
214 except (IOError, OSError):
214 except (IOError, OSError):
215 # we don't care, the user probably won't be able to commit anyway
215 # we don't care, the user probably won't be able to commit anyway
216 return False
216 return False
217
217
218 def checklink(path):
218 def checklink(path):
219 """check whether the given path is on a symlink-capable filesystem"""
219 """check whether the given path is on a symlink-capable filesystem"""
220 # mktemp is not racy because symlink creation will fail if the
220 # mktemp is not racy because symlink creation will fail if the
221 # file already exists
221 # file already exists
222 while True:
222 while True:
223 cachedir = os.path.join(path, '.hg', 'cache')
223 cachedir = os.path.join(path, '.hg', 'cache')
224 checklink = os.path.join(cachedir, 'checklink')
224 checklink = os.path.join(cachedir, 'checklink')
225 # try fast path, read only
225 # try fast path, read only
226 if os.path.islink(checklink):
226 if os.path.islink(checklink):
227 return True
227 return True
228 if os.path.isdir(cachedir):
228 if os.path.isdir(cachedir):
229 checkdir = cachedir
229 checkdir = cachedir
230 else:
230 else:
231 checkdir = path
231 checkdir = path
232 cachedir = None
232 cachedir = None
233 name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
233 name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
234 try:
234 try:
235 fd = None
235 fd = None
236 if cachedir is None:
236 if cachedir is None:
237 fd = tempfile.NamedTemporaryFile(dir=checkdir,
237 fd = tempfile.NamedTemporaryFile(dir=checkdir,
238 prefix='hg-checklink-')
238 prefix='hg-checklink-')
239 target = os.path.basename(fd.name)
239 target = os.path.basename(fd.name)
240 else:
240 else:
241 # create a fixed file to link to; doesn't matter if it
241 # create a fixed file to link to; doesn't matter if it
242 # already exists.
242 # already exists.
243 target = 'checklink-target'
243 target = 'checklink-target'
244 open(os.path.join(cachedir, target), 'w').close()
244 open(os.path.join(cachedir, target), 'w').close()
245 try:
245 try:
246 os.symlink(target, name)
246 os.symlink(target, name)
247 if cachedir is None:
247 if cachedir is None:
248 os.unlink(name)
248 os.unlink(name)
249 else:
249 else:
250 try:
250 try:
251 os.rename(name, checklink)
251 os.rename(name, checklink)
252 except OSError:
252 except OSError:
253 os.unlink(name)
253 os.unlink(name)
254 return True
254 return True
255 except OSError as inst:
255 except OSError as inst:
256 # link creation might race, try again
256 # link creation might race, try again
257 if inst[0] == errno.EEXIST:
257 if inst[0] == errno.EEXIST:
258 continue
258 continue
259 raise
259 raise
260 finally:
260 finally:
261 if fd is not None:
261 if fd is not None:
262 fd.close()
262 fd.close()
263 except AttributeError:
263 except AttributeError:
264 return False
264 return False
265 except OSError as inst:
265 except OSError as inst:
266 # sshfs might report failure while successfully creating the link
266 # sshfs might report failure while successfully creating the link
267 if inst[0] == errno.EIO and os.path.exists(name):
267 if inst[0] == errno.EIO and os.path.exists(name):
268 os.unlink(name)
268 os.unlink(name)
269 return False
269 return False
270
270
271 def checkosfilename(path):
271 def checkosfilename(path):
272 '''Check that the base-relative path is a valid filename on this platform.
272 '''Check that the base-relative path is a valid filename on this platform.
273 Returns None if the path is ok, or a UI string describing the problem.'''
273 Returns None if the path is ok, or a UI string describing the problem.'''
274 pass # on posix platforms, every path is ok
274 pass # on posix platforms, every path is ok
275
275
276 def setbinary(fd):
276 def setbinary(fd):
277 pass
277 pass
278
278
279 def pconvert(path):
279 def pconvert(path):
280 return path
280 return path
281
281
282 def localpath(path):
282 def localpath(path):
283 return path
283 return path
284
284
285 def samefile(fpath1, fpath2):
285 def samefile(fpath1, fpath2):
286 """Returns whether path1 and path2 refer to the same file. This is only
286 """Returns whether path1 and path2 refer to the same file. This is only
287 guaranteed to work for files, not directories."""
287 guaranteed to work for files, not directories."""
288 return os.path.samefile(fpath1, fpath2)
288 return os.path.samefile(fpath1, fpath2)
289
289
290 def samedevice(fpath1, fpath2):
290 def samedevice(fpath1, fpath2):
291 """Returns whether fpath1 and fpath2 are on the same device. This is only
291 """Returns whether fpath1 and fpath2 are on the same device. This is only
292 guaranteed to work for files, not directories."""
292 guaranteed to work for files, not directories."""
293 st1 = os.lstat(fpath1)
293 st1 = os.lstat(fpath1)
294 st2 = os.lstat(fpath2)
294 st2 = os.lstat(fpath2)
295 return st1.st_dev == st2.st_dev
295 return st1.st_dev == st2.st_dev
296
296
297 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
297 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
298 def normcase(path):
298 def normcase(path):
299 return path.lower()
299 return path.lower()
300
300
301 # what normcase does to ASCII strings
301 # what normcase does to ASCII strings
302 normcasespec = encoding.normcasespecs.lower
302 normcasespec = encoding.normcasespecs.lower
303 # fallback normcase function for non-ASCII strings
303 # fallback normcase function for non-ASCII strings
304 normcasefallback = normcase
304 normcasefallback = normcase
305
305
306 if sys.platform == 'darwin':
306 if sys.platform == 'darwin':
307
307
308 def normcase(path):
308 def normcase(path):
309 '''
309 '''
310 Normalize a filename for OS X-compatible comparison:
310 Normalize a filename for OS X-compatible comparison:
311 - escape-encode invalid characters
311 - escape-encode invalid characters
312 - decompose to NFD
312 - decompose to NFD
313 - lowercase
313 - lowercase
314 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
314 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
315
315
316 >>> normcase('UPPER')
316 >>> normcase('UPPER')
317 'upper'
317 'upper'
318 >>> normcase('Caf\xc3\xa9')
318 >>> normcase('Caf\xc3\xa9')
319 'cafe\\xcc\\x81'
319 'cafe\\xcc\\x81'
320 >>> normcase('\xc3\x89')
320 >>> normcase('\xc3\x89')
321 'e\\xcc\\x81'
321 'e\\xcc\\x81'
322 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
322 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
323 '%b8%ca%c3\\xca\\xbe%c8.jpg'
323 '%b8%ca%c3\\xca\\xbe%c8.jpg'
324 '''
324 '''
325
325
326 try:
326 try:
327 return encoding.asciilower(path) # exception for non-ASCII
327 return encoding.asciilower(path) # exception for non-ASCII
328 except UnicodeDecodeError:
328 except UnicodeDecodeError:
329 return normcasefallback(path)
329 return normcasefallback(path)
330
330
331 normcasespec = encoding.normcasespecs.lower
331 normcasespec = encoding.normcasespecs.lower
332
332
333 def normcasefallback(path):
333 def normcasefallback(path):
334 try:
334 try:
335 u = path.decode('utf-8')
335 u = path.decode('utf-8')
336 except UnicodeDecodeError:
336 except UnicodeDecodeError:
337 # OS X percent-encodes any bytes that aren't valid utf-8
337 # OS X percent-encodes any bytes that aren't valid utf-8
338 s = ''
338 s = ''
339 pos = 0
339 pos = 0
340 l = len(path)
340 l = len(path)
341 while pos < l:
341 while pos < l:
342 try:
342 try:
343 c = encoding.getutf8char(path, pos)
343 c = encoding.getutf8char(path, pos)
344 pos += len(c)
344 pos += len(c)
345 except ValueError:
345 except ValueError:
346 c = '%%%02X' % ord(path[pos])
346 c = '%%%02X' % ord(path[pos])
347 pos += 1
347 pos += 1
348 s += c
348 s += c
349
349
350 u = s.decode('utf-8')
350 u = s.decode('utf-8')
351
351
352 # Decompose then lowercase (HFS+ technote specifies lower)
352 # Decompose then lowercase (HFS+ technote specifies lower)
353 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
353 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
354 # drop HFS+ ignored characters
354 # drop HFS+ ignored characters
355 return encoding.hfsignoreclean(enc)
355 return encoding.hfsignoreclean(enc)
356
356
357 if sys.platform == 'cygwin':
357 if sys.platform == 'cygwin':
358 # workaround for cygwin, in which mount point part of path is
358 # workaround for cygwin, in which mount point part of path is
359 # treated as case sensitive, even though underlying NTFS is case
359 # treated as case sensitive, even though underlying NTFS is case
360 # insensitive.
360 # insensitive.
361
361
362 # default mount points
362 # default mount points
363 cygwinmountpoints = sorted([
363 cygwinmountpoints = sorted([
364 "/usr/bin",
364 "/usr/bin",
365 "/usr/lib",
365 "/usr/lib",
366 "/cygdrive",
366 "/cygdrive",
367 ], reverse=True)
367 ], reverse=True)
368
368
369 # use upper-ing as normcase as same as NTFS workaround
369 # use upper-ing as normcase as same as NTFS workaround
370 def normcase(path):
370 def normcase(path):
371 pathlen = len(path)
371 pathlen = len(path)
372 if (pathlen == 0) or (path[0] != pycompat.ossep):
372 if (pathlen == 0) or (path[0] != pycompat.ossep):
373 # treat as relative
373 # treat as relative
374 return encoding.upper(path)
374 return encoding.upper(path)
375
375
376 # to preserve case of mountpoint part
376 # to preserve case of mountpoint part
377 for mp in cygwinmountpoints:
377 for mp in cygwinmountpoints:
378 if not path.startswith(mp):
378 if not path.startswith(mp):
379 continue
379 continue
380
380
381 mplen = len(mp)
381 mplen = len(mp)
382 if mplen == pathlen: # mount point itself
382 if mplen == pathlen: # mount point itself
383 return mp
383 return mp
384 if path[mplen] == pycompat.ossep:
384 if path[mplen] == pycompat.ossep:
385 return mp + encoding.upper(path[mplen:])
385 return mp + encoding.upper(path[mplen:])
386
386
387 return encoding.upper(path)
387 return encoding.upper(path)
388
388
389 normcasespec = encoding.normcasespecs.other
389 normcasespec = encoding.normcasespecs.other
390 normcasefallback = normcase
390 normcasefallback = normcase
391
391
392 # Cygwin translates native ACLs to POSIX permissions,
392 # Cygwin translates native ACLs to POSIX permissions,
393 # but these translations are not supported by native
393 # but these translations are not supported by native
394 # tools, so the exec bit tends to be set erroneously.
394 # tools, so the exec bit tends to be set erroneously.
395 # Therefore, disable executable bit access on Cygwin.
395 # Therefore, disable executable bit access on Cygwin.
396 def checkexec(path):
396 def checkexec(path):
397 return False
397 return False
398
398
399 # Similarly, Cygwin's symlink emulation is likely to create
399 # Similarly, Cygwin's symlink emulation is likely to create
400 # problems when Mercurial is used from both Cygwin and native
400 # problems when Mercurial is used from both Cygwin and native
401 # Windows, with other native tools, or on shared volumes
401 # Windows, with other native tools, or on shared volumes
402 def checklink(path):
402 def checklink(path):
403 return False
403 return False
404
404
405 _needsshellquote = None
405 _needsshellquote = None
406 def shellquote(s):
406 def shellquote(s):
407 if os.sys.platform == 'OpenVMS':
407 if os.sys.platform == 'OpenVMS':
408 return '"%s"' % s
408 return '"%s"' % s
409 global _needsshellquote
409 global _needsshellquote
410 if _needsshellquote is None:
410 if _needsshellquote is None:
411 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
411 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
412 if s and not _needsshellquote(s):
412 if s and not _needsshellquote(s):
413 # "s" shouldn't have to be quoted
413 # "s" shouldn't have to be quoted
414 return s
414 return s
415 else:
415 else:
416 return "'%s'" % s.replace("'", "'\\''")
416 return "'%s'" % s.replace("'", "'\\''")
417
417
418 def quotecommand(cmd):
418 def quotecommand(cmd):
419 return cmd
419 return cmd
420
420
421 def popen(command, mode='r'):
421 def popen(command, mode='r'):
422 return os.popen(command, mode)
422 return os.popen(command, mode)
423
423
424 def testpid(pid):
424 def testpid(pid):
425 '''return False if pid dead, True if running or not sure'''
425 '''return False if pid dead, True if running or not sure'''
426 if os.sys.platform == 'OpenVMS':
426 if os.sys.platform == 'OpenVMS':
427 return True
427 return True
428 try:
428 try:
429 os.kill(pid, 0)
429 os.kill(pid, 0)
430 return True
430 return True
431 except OSError as inst:
431 except OSError as inst:
432 return inst.errno != errno.ESRCH
432 return inst.errno != errno.ESRCH
433
433
434 def explainexit(code):
434 def explainexit(code):
435 """return a 2-tuple (desc, code) describing a subprocess status
435 """return a 2-tuple (desc, code) describing a subprocess status
436 (codes from kill are negative - not os.system/wait encoding)"""
436 (codes from kill are negative - not os.system/wait encoding)"""
437 if code >= 0:
437 if code >= 0:
438 return _("exited with status %d") % code, code
438 return _("exited with status %d") % code, code
439 return _("killed by signal %d") % -code, -code
439 return _("killed by signal %d") % -code, -code
440
440
441 def isowner(st):
441 def isowner(st):
442 """Return True if the stat object st is from the current user."""
442 """Return True if the stat object st is from the current user."""
443 return st.st_uid == os.getuid()
443 return st.st_uid == os.getuid()
444
444
445 def findexe(command):
445 def findexe(command):
446 '''Find executable for command searching like which does.
446 '''Find executable for command searching like which does.
447 If command is a basename then PATH is searched for command.
447 If command is a basename then PATH is searched for command.
448 PATH isn't searched if command is an absolute or relative path.
448 PATH isn't searched if command is an absolute or relative path.
449 If command isn't found None is returned.'''
449 If command isn't found None is returned.'''
450 if sys.platform == 'OpenVMS':
450 if sys.platform == 'OpenVMS':
451 return command
451 return command
452
452
453 def findexisting(executable):
453 def findexisting(executable):
454 'Will return executable if existing file'
454 'Will return executable if existing file'
455 if os.path.isfile(executable) and os.access(executable, os.X_OK):
455 if os.path.isfile(executable) and os.access(executable, os.X_OK):
456 return executable
456 return executable
457 return None
457 return None
458
458
459 if pycompat.ossep in command:
459 if pycompat.ossep in command:
460 return findexisting(command)
460 return findexisting(command)
461
461
462 if sys.platform == 'plan9':
462 if sys.platform == 'plan9':
463 return findexisting(os.path.join('/bin', command))
463 return findexisting(os.path.join('/bin', command))
464
464
465 for path in os.environ.get('PATH', '').split(pycompat.ospathsep):
465 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
466 executable = findexisting(os.path.join(path, command))
466 executable = findexisting(os.path.join(path, command))
467 if executable is not None:
467 if executable is not None:
468 return executable
468 return executable
469 return None
469 return None
470
470
471 def setsignalhandler():
471 def setsignalhandler():
472 pass
472 pass
473
473
474 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
474 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
475
475
476 def statfiles(files):
476 def statfiles(files):
477 '''Stat each file in files. Yield each stat, or None if a file does not
477 '''Stat each file in files. Yield each stat, or None if a file does not
478 exist or has a type we don't care about.'''
478 exist or has a type we don't care about.'''
479 lstat = os.lstat
479 lstat = os.lstat
480 getkind = stat.S_IFMT
480 getkind = stat.S_IFMT
481 for nf in files:
481 for nf in files:
482 try:
482 try:
483 st = lstat(nf)
483 st = lstat(nf)
484 if getkind(st.st_mode) not in _wantedkinds:
484 if getkind(st.st_mode) not in _wantedkinds:
485 st = None
485 st = None
486 except OSError as err:
486 except OSError as err:
487 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
487 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
488 raise
488 raise
489 st = None
489 st = None
490 yield st
490 yield st
491
491
492 def getuser():
492 def getuser():
493 '''return name of current user'''
493 '''return name of current user'''
494 return getpass.getuser()
494 return getpass.getuser()
495
495
496 def username(uid=None):
496 def username(uid=None):
497 """Return the name of the user with the given uid.
497 """Return the name of the user with the given uid.
498
498
499 If uid is None, return the name of the current user."""
499 If uid is None, return the name of the current user."""
500
500
501 if uid is None:
501 if uid is None:
502 uid = os.getuid()
502 uid = os.getuid()
503 try:
503 try:
504 return pwd.getpwuid(uid)[0]
504 return pwd.getpwuid(uid)[0]
505 except KeyError:
505 except KeyError:
506 return str(uid)
506 return str(uid)
507
507
508 def groupname(gid=None):
508 def groupname(gid=None):
509 """Return the name of the group with the given gid.
509 """Return the name of the group with the given gid.
510
510
511 If gid is None, return the name of the current group."""
511 If gid is None, return the name of the current group."""
512
512
513 if gid is None:
513 if gid is None:
514 gid = os.getgid()
514 gid = os.getgid()
515 try:
515 try:
516 return grp.getgrgid(gid)[0]
516 return grp.getgrgid(gid)[0]
517 except KeyError:
517 except KeyError:
518 return str(gid)
518 return str(gid)
519
519
520 def groupmembers(name):
520 def groupmembers(name):
521 """Return the list of members of the group with the given
521 """Return the list of members of the group with the given
522 name, KeyError if the group does not exist.
522 name, KeyError if the group does not exist.
523 """
523 """
524 return list(grp.getgrnam(name).gr_mem)
524 return list(grp.getgrnam(name).gr_mem)
525
525
526 def spawndetached(args):
526 def spawndetached(args):
527 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
527 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
528 args[0], args)
528 args[0], args)
529
529
530 def gethgcmd():
530 def gethgcmd():
531 return sys.argv[:1]
531 return sys.argv[:1]
532
532
533 def makedir(path, notindexed):
533 def makedir(path, notindexed):
534 os.mkdir(path)
534 os.mkdir(path)
535
535
536 def unlinkpath(f, ignoremissing=False):
536 def unlinkpath(f, ignoremissing=False):
537 """unlink and remove the directory if it is empty"""
537 """unlink and remove the directory if it is empty"""
538 try:
538 try:
539 os.unlink(f)
539 os.unlink(f)
540 except OSError as e:
540 except OSError as e:
541 if not (ignoremissing and e.errno == errno.ENOENT):
541 if not (ignoremissing and e.errno == errno.ENOENT):
542 raise
542 raise
543 # try removing directories that might now be empty
543 # try removing directories that might now be empty
544 try:
544 try:
545 os.removedirs(os.path.dirname(f))
545 os.removedirs(os.path.dirname(f))
546 except OSError:
546 except OSError:
547 pass
547 pass
548
548
549 def lookupreg(key, name=None, scope=None):
549 def lookupreg(key, name=None, scope=None):
550 return None
550 return None
551
551
552 def hidewindow():
552 def hidewindow():
553 """Hide current shell window.
553 """Hide current shell window.
554
554
555 Used to hide the window opened when starting asynchronous
555 Used to hide the window opened when starting asynchronous
556 child process under Windows, unneeded on other systems.
556 child process under Windows, unneeded on other systems.
557 """
557 """
558 pass
558 pass
559
559
560 class cachestat(object):
560 class cachestat(object):
561 def __init__(self, path):
561 def __init__(self, path):
562 self.stat = os.stat(path)
562 self.stat = os.stat(path)
563
563
564 def cacheable(self):
564 def cacheable(self):
565 return bool(self.stat.st_ino)
565 return bool(self.stat.st_ino)
566
566
567 __hash__ = object.__hash__
567 __hash__ = object.__hash__
568
568
569 def __eq__(self, other):
569 def __eq__(self, other):
570 try:
570 try:
571 # Only dev, ino, size, mtime and atime are likely to change. Out
571 # Only dev, ino, size, mtime and atime are likely to change. Out
572 # of these, we shouldn't compare atime but should compare the
572 # of these, we shouldn't compare atime but should compare the
573 # rest. However, one of the other fields changing indicates
573 # rest. However, one of the other fields changing indicates
574 # something fishy going on, so return False if anything but atime
574 # something fishy going on, so return False if anything but atime
575 # changes.
575 # changes.
576 return (self.stat.st_mode == other.stat.st_mode and
576 return (self.stat.st_mode == other.stat.st_mode and
577 self.stat.st_ino == other.stat.st_ino and
577 self.stat.st_ino == other.stat.st_ino and
578 self.stat.st_dev == other.stat.st_dev and
578 self.stat.st_dev == other.stat.st_dev and
579 self.stat.st_nlink == other.stat.st_nlink and
579 self.stat.st_nlink == other.stat.st_nlink and
580 self.stat.st_uid == other.stat.st_uid and
580 self.stat.st_uid == other.stat.st_uid and
581 self.stat.st_gid == other.stat.st_gid and
581 self.stat.st_gid == other.stat.st_gid and
582 self.stat.st_size == other.stat.st_size and
582 self.stat.st_size == other.stat.st_size and
583 self.stat.st_mtime == other.stat.st_mtime and
583 self.stat.st_mtime == other.stat.st_mtime and
584 self.stat.st_ctime == other.stat.st_ctime)
584 self.stat.st_ctime == other.stat.st_ctime)
585 except AttributeError:
585 except AttributeError:
586 return False
586 return False
587
587
588 def __ne__(self, other):
588 def __ne__(self, other):
589 return not self == other
589 return not self == other
590
590
591 def executablepath():
591 def executablepath():
592 return None # available on Windows only
592 return None # available on Windows only
593
593
594 def statislink(st):
594 def statislink(st):
595 '''check whether a stat result is a symlink'''
595 '''check whether a stat result is a symlink'''
596 return st and stat.S_ISLNK(st.st_mode)
596 return st and stat.S_ISLNK(st.st_mode)
597
597
598 def statisexec(st):
598 def statisexec(st):
599 '''check whether a stat result is an executable file'''
599 '''check whether a stat result is an executable file'''
600 return st and (st.st_mode & 0o100 != 0)
600 return st and (st.st_mode & 0o100 != 0)
601
601
602 def poll(fds):
602 def poll(fds):
603 """block until something happens on any file descriptor
603 """block until something happens on any file descriptor
604
604
605 This is a generic helper that will check for any activity
605 This is a generic helper that will check for any activity
606 (read, write. exception) and return the list of touched files.
606 (read, write. exception) and return the list of touched files.
607
607
608 In unsupported cases, it will raise a NotImplementedError"""
608 In unsupported cases, it will raise a NotImplementedError"""
609 try:
609 try:
610 res = select.select(fds, fds, fds)
610 res = select.select(fds, fds, fds)
611 except ValueError: # out of range file descriptor
611 except ValueError: # out of range file descriptor
612 raise NotImplementedError()
612 raise NotImplementedError()
613 return sorted(list(set(sum(res, []))))
613 return sorted(list(set(sum(res, []))))
614
614
615 def readpipe(pipe):
615 def readpipe(pipe):
616 """Read all available data from a pipe."""
616 """Read all available data from a pipe."""
617 # We can't fstat() a pipe because Linux will always report 0.
617 # We can't fstat() a pipe because Linux will always report 0.
618 # So, we set the pipe to non-blocking mode and read everything
618 # So, we set the pipe to non-blocking mode and read everything
619 # that's available.
619 # that's available.
620 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
620 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
621 flags |= os.O_NONBLOCK
621 flags |= os.O_NONBLOCK
622 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
622 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
623
623
624 try:
624 try:
625 chunks = []
625 chunks = []
626 while True:
626 while True:
627 try:
627 try:
628 s = pipe.read()
628 s = pipe.read()
629 if not s:
629 if not s:
630 break
630 break
631 chunks.append(s)
631 chunks.append(s)
632 except IOError:
632 except IOError:
633 break
633 break
634
634
635 return ''.join(chunks)
635 return ''.join(chunks)
636 finally:
636 finally:
637 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
637 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
638
638
639 def bindunixsocket(sock, path):
639 def bindunixsocket(sock, path):
640 """Bind the UNIX domain socket to the specified path"""
640 """Bind the UNIX domain socket to the specified path"""
641 # use relative path instead of full path at bind() if possible, since
641 # use relative path instead of full path at bind() if possible, since
642 # AF_UNIX path has very small length limit (107 chars) on common
642 # AF_UNIX path has very small length limit (107 chars) on common
643 # platforms (see sys/un.h)
643 # platforms (see sys/un.h)
644 dirname, basename = os.path.split(path)
644 dirname, basename = os.path.split(path)
645 bakwdfd = None
645 bakwdfd = None
646 if dirname:
646 if dirname:
647 bakwdfd = os.open('.', os.O_DIRECTORY)
647 bakwdfd = os.open('.', os.O_DIRECTORY)
648 os.chdir(dirname)
648 os.chdir(dirname)
649 sock.bind(basename)
649 sock.bind(basename)
650 if bakwdfd:
650 if bakwdfd:
651 os.fchdir(bakwdfd)
651 os.fchdir(bakwdfd)
652 os.close(bakwdfd)
652 os.close(bakwdfd)
General Comments 0
You need to be logged in to leave comments. Login now