##// END OF EJS Templates
Merge with crew-stable
Martin Geisler -
r9482:ca3390c1 merge default
parent child Browse files
Show More
@@ -1,2172 +1,2171 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self.branchcache = None
101 self.branchcache = None
102 self._ubranchcache = None # UTF-8 version of branchcache
102 self._ubranchcache = None # UTF-8 version of branchcache
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.defversion = c.version
116 self.sopener.defversion = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __nonzero__(self):
132 def __nonzero__(self):
133 return True
133 return True
134
134
135 def __len__(self):
135 def __len__(self):
136 return len(self.changelog)
136 return len(self.changelog)
137
137
138 def __iter__(self):
138 def __iter__(self):
139 for i in xrange(len(self)):
139 for i in xrange(len(self)):
140 yield i
140 yield i
141
141
142 def url(self):
142 def url(self):
143 return 'file:' + self.root
143 return 'file:' + self.root
144
144
145 def hook(self, name, throw=False, **args):
145 def hook(self, name, throw=False, **args):
146 return hook.hook(self.ui, self, name, throw, **args)
146 return hook.hook(self.ui, self, name, throw, **args)
147
147
148 tag_disallowed = ':\r\n'
148 tag_disallowed = ':\r\n'
149
149
150 def _tag(self, names, node, message, local, user, date, extra={}):
150 def _tag(self, names, node, message, local, user, date, extra={}):
151 if isinstance(names, str):
151 if isinstance(names, str):
152 allchars = names
152 allchars = names
153 names = (names,)
153 names = (names,)
154 else:
154 else:
155 allchars = ''.join(names)
155 allchars = ''.join(names)
156 for c in self.tag_disallowed:
156 for c in self.tag_disallowed:
157 if c in allchars:
157 if c in allchars:
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
159
159
160 for name in names:
160 for name in names:
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 local=local)
162 local=local)
163
163
164 def writetags(fp, names, munge, prevtags):
164 def writetags(fp, names, munge, prevtags):
165 fp.seek(0, 2)
165 fp.seek(0, 2)
166 if prevtags and prevtags[-1] != '\n':
166 if prevtags and prevtags[-1] != '\n':
167 fp.write('\n')
167 fp.write('\n')
168 for name in names:
168 for name in names:
169 m = munge and munge(name) or name
169 m = munge and munge(name) or name
170 if self._tagtypes and name in self._tagtypes:
170 if self._tagtypes and name in self._tagtypes:
171 old = self._tags.get(name, nullid)
171 old = self._tags.get(name, nullid)
172 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(old), m))
173 fp.write('%s %s\n' % (hex(node), m))
173 fp.write('%s %s\n' % (hex(node), m))
174 fp.close()
174 fp.close()
175
175
176 prevtags = ''
176 prevtags = ''
177 if local:
177 if local:
178 try:
178 try:
179 fp = self.opener('localtags', 'r+')
179 fp = self.opener('localtags', 'r+')
180 except IOError:
180 except IOError:
181 fp = self.opener('localtags', 'a')
181 fp = self.opener('localtags', 'a')
182 else:
182 else:
183 prevtags = fp.read()
183 prevtags = fp.read()
184
184
185 # local tags are stored in the current charset
185 # local tags are stored in the current charset
186 writetags(fp, names, None, prevtags)
186 writetags(fp, names, None, prevtags)
187 for name in names:
187 for name in names:
188 self.hook('tag', node=hex(node), tag=name, local=local)
188 self.hook('tag', node=hex(node), tag=name, local=local)
189 return
189 return
190
190
191 try:
191 try:
192 fp = self.wfile('.hgtags', 'rb+')
192 fp = self.wfile('.hgtags', 'rb+')
193 except IOError:
193 except IOError:
194 fp = self.wfile('.hgtags', 'ab')
194 fp = self.wfile('.hgtags', 'ab')
195 else:
195 else:
196 prevtags = fp.read()
196 prevtags = fp.read()
197
197
198 # committed tags are stored in UTF-8
198 # committed tags are stored in UTF-8
199 writetags(fp, names, encoding.fromlocal, prevtags)
199 writetags(fp, names, encoding.fromlocal, prevtags)
200
200
201 if '.hgtags' not in self.dirstate:
201 if '.hgtags' not in self.dirstate:
202 self.add(['.hgtags'])
202 self.add(['.hgtags'])
203
203
204 m = match_.exact(self.root, '', ['.hgtags'])
204 m = match_.exact(self.root, '', ['.hgtags'])
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
206
206
207 for name in names:
207 for name in names:
208 self.hook('tag', node=hex(node), tag=name, local=local)
208 self.hook('tag', node=hex(node), tag=name, local=local)
209
209
210 return tagnode
210 return tagnode
211
211
212 def tag(self, names, node, message, local, user, date):
212 def tag(self, names, node, message, local, user, date):
213 '''tag a revision with one or more symbolic names.
213 '''tag a revision with one or more symbolic names.
214
214
215 names is a list of strings or, when adding a single tag, names may be a
215 names is a list of strings or, when adding a single tag, names may be a
216 string.
216 string.
217
217
218 if local is True, the tags are stored in a per-repository file.
218 if local is True, the tags are stored in a per-repository file.
219 otherwise, they are stored in the .hgtags file, and a new
219 otherwise, they are stored in the .hgtags file, and a new
220 changeset is committed with the change.
220 changeset is committed with the change.
221
221
222 keyword arguments:
222 keyword arguments:
223
223
224 local: whether to store tags in non-version-controlled file
224 local: whether to store tags in non-version-controlled file
225 (default False)
225 (default False)
226
226
227 message: commit message to use if committing
227 message: commit message to use if committing
228
228
229 user: name of user to use if committing
229 user: name of user to use if committing
230
230
231 date: date tuple to use if committing'''
231 date: date tuple to use if committing'''
232
232
233 for x in self.status()[:5]:
233 for x in self.status()[:5]:
234 if '.hgtags' in x:
234 if '.hgtags' in x:
235 raise util.Abort(_('working copy of .hgtags is changed '
235 raise util.Abort(_('working copy of .hgtags is changed '
236 '(please commit .hgtags manually)'))
236 '(please commit .hgtags manually)'))
237
237
238 self.tags() # instantiate the cache
238 self.tags() # instantiate the cache
239 self._tag(names, node, message, local, user, date)
239 self._tag(names, node, message, local, user, date)
240
240
241 def tags(self):
241 def tags(self):
242 '''return a mapping of tag to node'''
242 '''return a mapping of tag to node'''
243 if self._tags is None:
243 if self._tags is None:
244 (self._tags, self._tagtypes) = self._findtags()
244 (self._tags, self._tagtypes) = self._findtags()
245
245
246 return self._tags
246 return self._tags
247
247
248 def _findtags(self):
248 def _findtags(self):
249 '''Do the hard work of finding tags. Return a pair of dicts
249 '''Do the hard work of finding tags. Return a pair of dicts
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 maps tag name to a string like \'global\' or \'local\'.
251 maps tag name to a string like \'global\' or \'local\'.
252 Subclasses or extensions are free to add their own tags, but
252 Subclasses or extensions are free to add their own tags, but
253 should be aware that the returned dicts will be retained for the
253 should be aware that the returned dicts will be retained for the
254 duration of the localrepo object.'''
254 duration of the localrepo object.'''
255
255
256 # XXX what tagtype should subclasses/extensions use? Currently
256 # XXX what tagtype should subclasses/extensions use? Currently
257 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 # Should each extension invent its own tag type? Should there
258 # Should each extension invent its own tag type? Should there
259 # be one tagtype for all such "virtual" tags? Or is the status
259 # be one tagtype for all such "virtual" tags? Or is the status
260 # quo fine?
260 # quo fine?
261
261
262 alltags = {} # map tag name to (node, hist)
262 alltags = {} # map tag name to (node, hist)
263 tagtypes = {}
263 tagtypes = {}
264
264
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267
267
268 # Build the return dicts. Have to re-encode tag names because
268 # Build the return dicts. Have to re-encode tag names because
269 # the tags module always uses UTF-8 (in order not to lose info
269 # the tags module always uses UTF-8 (in order not to lose info
270 # writing to the cache), but the rest of Mercurial wants them in
270 # writing to the cache), but the rest of Mercurial wants them in
271 # local encoding.
271 # local encoding.
272 tags = {}
272 tags = {}
273 for (name, (node, hist)) in alltags.iteritems():
273 for (name, (node, hist)) in alltags.iteritems():
274 if node != nullid:
274 if node != nullid:
275 tags[encoding.tolocal(name)] = node
275 tags[encoding.tolocal(name)] = node
276 tags['tip'] = self.changelog.tip()
276 tags['tip'] = self.changelog.tip()
277 tagtypes = dict([(encoding.tolocal(name), value)
277 tagtypes = dict([(encoding.tolocal(name), value)
278 for (name, value) in tagtypes.iteritems()])
278 for (name, value) in tagtypes.iteritems()])
279 return (tags, tagtypes)
279 return (tags, tagtypes)
280
280
281 def tagtype(self, tagname):
281 def tagtype(self, tagname):
282 '''
282 '''
283 return the type of the given tag. result can be:
283 return the type of the given tag. result can be:
284
284
285 'local' : a local tag
285 'local' : a local tag
286 'global' : a global tag
286 'global' : a global tag
287 None : tag does not exist
287 None : tag does not exist
288 '''
288 '''
289
289
290 self.tags()
290 self.tags()
291
291
292 return self._tagtypes.get(tagname)
292 return self._tagtypes.get(tagname)
293
293
294 def tagslist(self):
294 def tagslist(self):
295 '''return a list of tags ordered by revision'''
295 '''return a list of tags ordered by revision'''
296 l = []
296 l = []
297 for t, n in self.tags().iteritems():
297 for t, n in self.tags().iteritems():
298 try:
298 try:
299 r = self.changelog.rev(n)
299 r = self.changelog.rev(n)
300 except:
300 except:
301 r = -2 # sort to the beginning of the list if unknown
301 r = -2 # sort to the beginning of the list if unknown
302 l.append((r, t, n))
302 l.append((r, t, n))
303 return [(t, n) for r, t, n in sorted(l)]
303 return [(t, n) for r, t, n in sorted(l)]
304
304
305 def nodetags(self, node):
305 def nodetags(self, node):
306 '''return the tags associated with a node'''
306 '''return the tags associated with a node'''
307 if not self.nodetagscache:
307 if not self.nodetagscache:
308 self.nodetagscache = {}
308 self.nodetagscache = {}
309 for t, n in self.tags().iteritems():
309 for t, n in self.tags().iteritems():
310 self.nodetagscache.setdefault(n, []).append(t)
310 self.nodetagscache.setdefault(n, []).append(t)
311 return self.nodetagscache.get(node, [])
311 return self.nodetagscache.get(node, [])
312
312
313 def _branchtags(self, partial, lrev):
313 def _branchtags(self, partial, lrev):
314 # TODO: rename this function?
314 # TODO: rename this function?
315 tiprev = len(self) - 1
315 tiprev = len(self) - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 return partial
320 return partial
321
321
322 def branchmap(self):
322 def branchmap(self):
323 tip = self.changelog.tip()
323 tip = self.changelog.tip()
324 if self.branchcache is not None and self._branchcachetip == tip:
324 if self.branchcache is not None and self._branchcachetip == tip:
325 return self.branchcache
325 return self.branchcache
326
326
327 oldtip = self._branchcachetip
327 oldtip = self._branchcachetip
328 self._branchcachetip = tip
328 self._branchcachetip = tip
329 if self.branchcache is None:
329 if self.branchcache is None:
330 self.branchcache = {} # avoid recursion in changectx
330 self.branchcache = {} # avoid recursion in changectx
331 else:
331 else:
332 self.branchcache.clear() # keep using the same dict
332 self.branchcache.clear() # keep using the same dict
333 if oldtip is None or oldtip not in self.changelog.nodemap:
333 if oldtip is None or oldtip not in self.changelog.nodemap:
334 partial, last, lrev = self._readbranchcache()
334 partial, last, lrev = self._readbranchcache()
335 else:
335 else:
336 lrev = self.changelog.rev(oldtip)
336 lrev = self.changelog.rev(oldtip)
337 partial = self._ubranchcache
337 partial = self._ubranchcache
338
338
339 self._branchtags(partial, lrev)
339 self._branchtags(partial, lrev)
340 # this private cache holds all heads (not just tips)
340 # this private cache holds all heads (not just tips)
341 self._ubranchcache = partial
341 self._ubranchcache = partial
342
342
343 # the branch cache is stored on disk as UTF-8, but in the local
343 # the branch cache is stored on disk as UTF-8, but in the local
344 # charset internally
344 # charset internally
345 for k, v in partial.iteritems():
345 for k, v in partial.iteritems():
346 self.branchcache[encoding.tolocal(k)] = v
346 self.branchcache[encoding.tolocal(k)] = v
347 return self.branchcache
347 return self.branchcache
348
348
349
349
350 def branchtags(self):
350 def branchtags(self):
351 '''return a dict where branch names map to the tipmost head of
351 '''return a dict where branch names map to the tipmost head of
352 the branch, open heads come before closed'''
352 the branch, open heads come before closed'''
353 bt = {}
353 bt = {}
354 for bn, heads in self.branchmap().iteritems():
354 for bn, heads in self.branchmap().iteritems():
355 head = None
355 head = None
356 for i in range(len(heads)-1, -1, -1):
356 for i in range(len(heads)-1, -1, -1):
357 h = heads[i]
357 h = heads[i]
358 if 'close' not in self.changelog.read(h)[5]:
358 if 'close' not in self.changelog.read(h)[5]:
359 head = h
359 head = h
360 break
360 break
361 # no open heads were found
361 # no open heads were found
362 if head is None:
362 if head is None:
363 head = heads[-1]
363 head = heads[-1]
364 bt[bn] = head
364 bt[bn] = head
365 return bt
365 return bt
366
366
367
367
368 def _readbranchcache(self):
368 def _readbranchcache(self):
369 partial = {}
369 partial = {}
370 try:
370 try:
371 f = self.opener("branchheads.cache")
371 f = self.opener("branchheads.cache")
372 lines = f.read().split('\n')
372 lines = f.read().split('\n')
373 f.close()
373 f.close()
374 except (IOError, OSError):
374 except (IOError, OSError):
375 return {}, nullid, nullrev
375 return {}, nullid, nullrev
376
376
377 try:
377 try:
378 last, lrev = lines.pop(0).split(" ", 1)
378 last, lrev = lines.pop(0).split(" ", 1)
379 last, lrev = bin(last), int(lrev)
379 last, lrev = bin(last), int(lrev)
380 if lrev >= len(self) or self[lrev].node() != last:
380 if lrev >= len(self) or self[lrev].node() != last:
381 # invalidate the cache
381 # invalidate the cache
382 raise ValueError('invalidating branch cache (tip differs)')
382 raise ValueError('invalidating branch cache (tip differs)')
383 for l in lines:
383 for l in lines:
384 if not l: continue
384 if not l: continue
385 node, label = l.split(" ", 1)
385 node, label = l.split(" ", 1)
386 partial.setdefault(label.strip(), []).append(bin(node))
386 partial.setdefault(label.strip(), []).append(bin(node))
387 except KeyboardInterrupt:
387 except KeyboardInterrupt:
388 raise
388 raise
389 except Exception, inst:
389 except Exception, inst:
390 if self.ui.debugflag:
390 if self.ui.debugflag:
391 self.ui.warn(str(inst), '\n')
391 self.ui.warn(str(inst), '\n')
392 partial, last, lrev = {}, nullid, nullrev
392 partial, last, lrev = {}, nullid, nullrev
393 return partial, last, lrev
393 return partial, last, lrev
394
394
395 def _writebranchcache(self, branches, tip, tiprev):
395 def _writebranchcache(self, branches, tip, tiprev):
396 try:
396 try:
397 f = self.opener("branchheads.cache", "w", atomictemp=True)
397 f = self.opener("branchheads.cache", "w", atomictemp=True)
398 f.write("%s %s\n" % (hex(tip), tiprev))
398 f.write("%s %s\n" % (hex(tip), tiprev))
399 for label, nodes in branches.iteritems():
399 for label, nodes in branches.iteritems():
400 for node in nodes:
400 for node in nodes:
401 f.write("%s %s\n" % (hex(node), label))
401 f.write("%s %s\n" % (hex(node), label))
402 f.rename()
402 f.rename()
403 except (IOError, OSError):
403 except (IOError, OSError):
404 pass
404 pass
405
405
406 def _updatebranchcache(self, partial, start, end):
406 def _updatebranchcache(self, partial, start, end):
407 # collect new branch entries
407 # collect new branch entries
408 newbranches = {}
408 newbranches = {}
409 for r in xrange(start, end):
409 for r in xrange(start, end):
410 c = self[r]
410 c = self[r]
411 newbranches.setdefault(c.branch(), []).append(c.node())
411 newbranches.setdefault(c.branch(), []).append(c.node())
412 # if older branchheads are reachable from new ones, they aren't
412 # if older branchheads are reachable from new ones, they aren't
413 # really branchheads. Note checking parents is insufficient:
413 # really branchheads. Note checking parents is insufficient:
414 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
414 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
415 for branch, newnodes in newbranches.iteritems():
415 for branch, newnodes in newbranches.iteritems():
416 bheads = partial.setdefault(branch, [])
416 bheads = partial.setdefault(branch, [])
417 bheads.extend(newnodes)
417 bheads.extend(newnodes)
418 if len(bheads) < 2:
418 if len(bheads) < 2:
419 continue
419 continue
420 newbheads = []
420 newbheads = []
421 # starting from tip means fewer passes over reachable
421 # starting from tip means fewer passes over reachable
422 while newnodes:
422 while newnodes:
423 latest = newnodes.pop()
423 latest = newnodes.pop()
424 if latest not in bheads:
424 if latest not in bheads:
425 continue
425 continue
426 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
426 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 reachable = self.changelog.reachable(latest, minbhrev)
427 reachable = self.changelog.reachable(latest, minbhrev)
428 bheads = [b for b in bheads if b not in reachable]
428 bheads = [b for b in bheads if b not in reachable]
429 newbheads.insert(0, latest)
429 newbheads.insert(0, latest)
430 bheads.extend(newbheads)
430 bheads.extend(newbheads)
431 partial[branch] = bheads
431 partial[branch] = bheads
432
432
433 def lookup(self, key):
433 def lookup(self, key):
434 if isinstance(key, int):
434 if isinstance(key, int):
435 return self.changelog.node(key)
435 return self.changelog.node(key)
436 elif key == '.':
436 elif key == '.':
437 return self.dirstate.parents()[0]
437 return self.dirstate.parents()[0]
438 elif key == 'null':
438 elif key == 'null':
439 return nullid
439 return nullid
440 elif key == 'tip':
440 elif key == 'tip':
441 return self.changelog.tip()
441 return self.changelog.tip()
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452
452
453 # can't find key, check if it might have come from damaged dirstate
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
456 % short(key))
457 try:
457 try:
458 if len(key) == 20:
458 if len(key) == 20:
459 key = hex(key)
459 key = hex(key)
460 except:
460 except:
461 pass
461 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
463
464 def local(self):
464 def local(self):
465 return True
465 return True
466
466
467 def join(self, f):
467 def join(self, f):
468 return os.path.join(self.path, f)
468 return os.path.join(self.path, f)
469
469
470 def wjoin(self, f):
470 def wjoin(self, f):
471 return os.path.join(self.root, f)
471 return os.path.join(self.root, f)
472
472
473 def rjoin(self, f):
473 def rjoin(self, f):
474 return os.path.join(self.root, util.pconvert(f))
474 return os.path.join(self.root, util.pconvert(f))
475
475
476 def file(self, f):
476 def file(self, f):
477 if f[0] == '/':
477 if f[0] == '/':
478 f = f[1:]
478 f = f[1:]
479 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
480
480
481 def changectx(self, changeid):
481 def changectx(self, changeid):
482 return self[changeid]
482 return self[changeid]
483
483
484 def parents(self, changeid=None):
484 def parents(self, changeid=None):
485 '''get list of changectxs for parents of changeid'''
485 '''get list of changectxs for parents of changeid'''
486 return self[changeid].parents()
486 return self[changeid].parents()
487
487
488 def filectx(self, path, changeid=None, fileid=None):
488 def filectx(self, path, changeid=None, fileid=None):
489 """changeid can be a changeset revision, node, or tag.
489 """changeid can be a changeset revision, node, or tag.
490 fileid can be a file revision or node."""
490 fileid can be a file revision or node."""
491 return context.filectx(self, path, changeid, fileid)
491 return context.filectx(self, path, changeid, fileid)
492
492
493 def getcwd(self):
493 def getcwd(self):
494 return self.dirstate.getcwd()
494 return self.dirstate.getcwd()
495
495
496 def pathto(self, f, cwd=None):
496 def pathto(self, f, cwd=None):
497 return self.dirstate.pathto(f, cwd)
497 return self.dirstate.pathto(f, cwd)
498
498
499 def wfile(self, f, mode='r'):
499 def wfile(self, f, mode='r'):
500 return self.wopener(f, mode)
500 return self.wopener(f, mode)
501
501
502 def _link(self, f):
502 def _link(self, f):
503 return os.path.islink(self.wjoin(f))
503 return os.path.islink(self.wjoin(f))
504
504
505 def _filter(self, filter, filename, data):
505 def _filter(self, filter, filename, data):
506 if filter not in self.filterpats:
506 if filter not in self.filterpats:
507 l = []
507 l = []
508 for pat, cmd in self.ui.configitems(filter):
508 for pat, cmd in self.ui.configitems(filter):
509 if cmd == '!':
509 if cmd == '!':
510 continue
510 continue
511 mf = match_.match(self.root, '', [pat])
511 mf = match_.match(self.root, '', [pat])
512 fn = None
512 fn = None
513 params = cmd
513 params = cmd
514 for name, filterfn in self._datafilters.iteritems():
514 for name, filterfn in self._datafilters.iteritems():
515 if cmd.startswith(name):
515 if cmd.startswith(name):
516 fn = filterfn
516 fn = filterfn
517 params = cmd[len(name):].lstrip()
517 params = cmd[len(name):].lstrip()
518 break
518 break
519 if not fn:
519 if not fn:
520 fn = lambda s, c, **kwargs: util.filter(s, c)
520 fn = lambda s, c, **kwargs: util.filter(s, c)
521 # Wrap old filters not supporting keyword arguments
521 # Wrap old filters not supporting keyword arguments
522 if not inspect.getargspec(fn)[2]:
522 if not inspect.getargspec(fn)[2]:
523 oldfn = fn
523 oldfn = fn
524 fn = lambda s, c, **kwargs: oldfn(s, c)
524 fn = lambda s, c, **kwargs: oldfn(s, c)
525 l.append((mf, fn, params))
525 l.append((mf, fn, params))
526 self.filterpats[filter] = l
526 self.filterpats[filter] = l
527
527
528 for mf, fn, cmd in self.filterpats[filter]:
528 for mf, fn, cmd in self.filterpats[filter]:
529 if mf(filename):
529 if mf(filename):
530 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
530 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
532 break
532 break
533
533
534 return data
534 return data
535
535
536 def adddatafilter(self, name, filter):
536 def adddatafilter(self, name, filter):
537 self._datafilters[name] = filter
537 self._datafilters[name] = filter
538
538
539 def wread(self, filename):
539 def wread(self, filename):
540 if self._link(filename):
540 if self._link(filename):
541 data = os.readlink(self.wjoin(filename))
541 data = os.readlink(self.wjoin(filename))
542 else:
542 else:
543 data = self.wopener(filename, 'r').read()
543 data = self.wopener(filename, 'r').read()
544 return self._filter("encode", filename, data)
544 return self._filter("encode", filename, data)
545
545
546 def wwrite(self, filename, data, flags):
546 def wwrite(self, filename, data, flags):
547 data = self._filter("decode", filename, data)
547 data = self._filter("decode", filename, data)
548 try:
548 try:
549 os.unlink(self.wjoin(filename))
549 os.unlink(self.wjoin(filename))
550 except OSError:
550 except OSError:
551 pass
551 pass
552 if 'l' in flags:
552 if 'l' in flags:
553 self.wopener.symlink(data, filename)
553 self.wopener.symlink(data, filename)
554 else:
554 else:
555 self.wopener(filename, 'w').write(data)
555 self.wopener(filename, 'w').write(data)
556 if 'x' in flags:
556 if 'x' in flags:
557 util.set_flags(self.wjoin(filename), False, True)
557 util.set_flags(self.wjoin(filename), False, True)
558
558
559 def wwritedata(self, filename, data):
559 def wwritedata(self, filename, data):
560 return self._filter("decode", filename, data)
560 return self._filter("decode", filename, data)
561
561
562 def transaction(self):
562 def transaction(self):
563 tr = self._transref and self._transref() or None
563 tr = self._transref and self._transref() or None
564 if tr and tr.running():
564 if tr and tr.running():
565 return tr.nest()
565 return tr.nest()
566
566
567 # abort here if the journal already exists
567 # abort here if the journal already exists
568 if os.path.exists(self.sjoin("journal")):
568 if os.path.exists(self.sjoin("journal")):
569 raise error.RepoError(_("journal already exists - run hg recover"))
569 raise error.RepoError(_("journal already exists - run hg recover"))
570
570
571 # save dirstate for rollback
571 # save dirstate for rollback
572 try:
572 try:
573 ds = self.opener("dirstate").read()
573 ds = self.opener("dirstate").read()
574 except IOError:
574 except IOError:
575 ds = ""
575 ds = ""
576 self.opener("journal.dirstate", "w").write(ds)
576 self.opener("journal.dirstate", "w").write(ds)
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
578
578
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 (self.join("journal.branch"), self.join("undo.branch"))]
581 (self.join("journal.branch"), self.join("undo.branch"))]
582 tr = transaction.transaction(self.ui.warn, self.sopener,
582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 self.sjoin("journal"),
583 self.sjoin("journal"),
584 aftertrans(renames),
584 aftertrans(renames),
585 self.store.createmode)
585 self.store.createmode)
586 self._transref = weakref.ref(tr)
586 self._transref = weakref.ref(tr)
587 return tr
587 return tr
588
588
589 def recover(self):
589 def recover(self):
590 lock = self.lock()
590 lock = self.lock()
591 try:
591 try:
592 if os.path.exists(self.sjoin("journal")):
592 if os.path.exists(self.sjoin("journal")):
593 self.ui.status(_("rolling back interrupted transaction\n"))
593 self.ui.status(_("rolling back interrupted transaction\n"))
594 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
594 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
595 self.invalidate()
595 self.invalidate()
596 return True
596 return True
597 else:
597 else:
598 self.ui.warn(_("no interrupted transaction available\n"))
598 self.ui.warn(_("no interrupted transaction available\n"))
599 return False
599 return False
600 finally:
600 finally:
601 lock.release()
601 lock.release()
602
602
603 def rollback(self):
603 def rollback(self):
604 wlock = lock = None
604 wlock = lock = None
605 try:
605 try:
606 wlock = self.wlock()
606 wlock = self.wlock()
607 lock = self.lock()
607 lock = self.lock()
608 if os.path.exists(self.sjoin("undo")):
608 if os.path.exists(self.sjoin("undo")):
609 self.ui.status(_("rolling back last transaction\n"))
609 self.ui.status(_("rolling back last transaction\n"))
610 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
610 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 try:
612 try:
613 branch = self.opener("undo.branch").read()
613 branch = self.opener("undo.branch").read()
614 self.dirstate.setbranch(branch)
614 self.dirstate.setbranch(branch)
615 except IOError:
615 except IOError:
616 self.ui.warn(_("Named branch could not be reset, "
616 self.ui.warn(_("Named branch could not be reset, "
617 "current branch still is: %s\n")
617 "current branch still is: %s\n")
618 % encoding.tolocal(self.dirstate.branch()))
618 % encoding.tolocal(self.dirstate.branch()))
619 self.invalidate()
619 self.invalidate()
620 self.dirstate.invalidate()
620 self.dirstate.invalidate()
621 self.destroyed()
621 self.destroyed()
622 else:
622 else:
623 self.ui.warn(_("no rollback information available\n"))
623 self.ui.warn(_("no rollback information available\n"))
624 finally:
624 finally:
625 release(lock, wlock)
625 release(lock, wlock)
626
626
627 def invalidate(self):
627 def invalidate(self):
628 for a in "changelog manifest".split():
628 for a in "changelog manifest".split():
629 if a in self.__dict__:
629 if a in self.__dict__:
630 delattr(self, a)
630 delattr(self, a)
631 self._tags = None
631 self._tags = None
632 self._tagtypes = None
632 self._tagtypes = None
633 self.nodetagscache = None
633 self.nodetagscache = None
634 self.branchcache = None
634 self.branchcache = None
635 self._ubranchcache = None
635 self._ubranchcache = None
636 self._branchcachetip = None
636 self._branchcachetip = None
637
637
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 try:
639 try:
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 except error.LockHeld, inst:
641 except error.LockHeld, inst:
642 if not wait:
642 if not wait:
643 raise
643 raise
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 (desc, inst.locker))
645 (desc, inst.locker))
646 # default to 600 seconds timeout
646 # default to 600 seconds timeout
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 releasefn, desc=desc)
648 releasefn, desc=desc)
649 if acquirefn:
649 if acquirefn:
650 acquirefn()
650 acquirefn()
651 return l
651 return l
652
652
653 def lock(self, wait=True):
653 def lock(self, wait=True):
654 '''Lock the repository store (.hg/store) and return a weak reference
654 '''Lock the repository store (.hg/store) and return a weak reference
655 to the lock. Use this before modifying the store (e.g. committing or
655 to the lock. Use this before modifying the store (e.g. committing or
656 stripping). If you are opening a transaction, get a lock as well.)'''
656 stripping). If you are opening a transaction, get a lock as well.)'''
657 l = self._lockref and self._lockref()
657 l = self._lockref and self._lockref()
658 if l is not None and l.held:
658 if l is not None and l.held:
659 l.lock()
659 l.lock()
660 return l
660 return l
661
661
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 _('repository %s') % self.origroot)
663 _('repository %s') % self.origroot)
664 self._lockref = weakref.ref(l)
664 self._lockref = weakref.ref(l)
665 return l
665 return l
666
666
667 def wlock(self, wait=True):
667 def wlock(self, wait=True):
668 '''Lock the non-store parts of the repository (everything under
668 '''Lock the non-store parts of the repository (everything under
669 .hg except .hg/store) and return a weak reference to the lock.
669 .hg except .hg/store) and return a weak reference to the lock.
670 Use this before modifying files in .hg.'''
670 Use this before modifying files in .hg.'''
671 l = self._wlockref and self._wlockref()
671 l = self._wlockref and self._wlockref()
672 if l is not None and l.held:
672 if l is not None and l.held:
673 l.lock()
673 l.lock()
674 return l
674 return l
675
675
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
677 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
678 self.origroot)
679 self._wlockref = weakref.ref(l)
679 self._wlockref = weakref.ref(l)
680 return l
680 return l
681
681
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
683 """
684 commit an individual file as part of a larger transaction
684 commit an individual file as part of a larger transaction
685 """
685 """
686
686
687 fname = fctx.path()
687 fname = fctx.path()
688 text = fctx.data()
688 text = fctx.data()
689 flog = self.file(fname)
689 flog = self.file(fname)
690 fparent1 = manifest1.get(fname, nullid)
690 fparent1 = manifest1.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
692
692
693 meta = {}
693 meta = {}
694 copy = fctx.renamed()
694 copy = fctx.renamed()
695 if copy and copy[0] != fname:
695 if copy and copy[0] != fname:
696 # Mark the new revision of this file as a copy of another
696 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
697 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
698 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
699 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
700 # and the second one will be the other parent. For example:
701 #
701 #
702 # 0 --- 1 --- 3 rev1 changes file foo
702 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
703 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
704 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
705 # should record that bar descends from
706 # bar in rev2 and foo in rev1
706 # bar in rev2 and foo in rev1
707 #
707 #
708 # this allows this merge to succeed:
708 # this allows this merge to succeed:
709 #
709 #
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
712 # \- 2 --- 4 as the merge base
713 #
713 #
714
714
715 cfname = copy[0]
715 cfname = copy[0]
716 crev = manifest1.get(cfname)
716 crev = manifest1.get(cfname)
717 newfparent = fparent2
717 newfparent = fparent2
718
718
719 if manifest2: # branch merge
719 if manifest2: # branch merge
720 if fparent2 == nullid or crev is None: # copied on remote side
720 if fparent2 == nullid or crev is None: # copied on remote side
721 if cfname in manifest2:
721 if cfname in manifest2:
722 crev = manifest2[cfname]
722 crev = manifest2[cfname]
723 newfparent = fparent1
723 newfparent = fparent1
724
724
725 # find source in nearest ancestor if we've lost track
725 # find source in nearest ancestor if we've lost track
726 if not crev:
726 if not crev:
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 (fname, cfname))
728 (fname, cfname))
729 for ancestor in self['.'].ancestors():
729 for ancestor in self['.'].ancestors():
730 if cfname in ancestor:
730 if cfname in ancestor:
731 crev = ancestor[cfname].filenode()
731 crev = ancestor[cfname].filenode()
732 break
732 break
733
733
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 meta["copy"] = cfname
735 meta["copy"] = cfname
736 meta["copyrev"] = hex(crev)
736 meta["copyrev"] = hex(crev)
737 fparent1, fparent2 = nullid, newfparent
737 fparent1, fparent2 = nullid, newfparent
738 elif fparent2 != nullid:
738 elif fparent2 != nullid:
739 # is one parent an ancestor of the other?
739 # is one parent an ancestor of the other?
740 fparentancestor = flog.ancestor(fparent1, fparent2)
740 fparentancestor = flog.ancestor(fparent1, fparent2)
741 if fparentancestor == fparent1:
741 if fparentancestor == fparent1:
742 fparent1, fparent2 = fparent2, nullid
742 fparent1, fparent2 = fparent2, nullid
743 elif fparentancestor == fparent2:
743 elif fparentancestor == fparent2:
744 fparent2 = nullid
744 fparent2 = nullid
745
745
746 # is the file changed?
746 # is the file changed?
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 changelist.append(fname)
748 changelist.append(fname)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750
750
751 # are just the flags changed during merge?
751 # are just the flags changed during merge?
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 changelist.append(fname)
753 changelist.append(fname)
754
754
755 return fparent1
755 return fparent1
756
756
757 def commit(self, text="", user=None, date=None, match=None, force=False,
757 def commit(self, text="", user=None, date=None, match=None, force=False,
758 editor=False, extra={}):
758 editor=False, extra={}):
759 """Add a new revision to current repository.
759 """Add a new revision to current repository.
760
760
761 Revision information is gathered from the working directory,
761 Revision information is gathered from the working directory,
762 match can be used to filter the committed files. If editor is
762 match can be used to filter the committed files. If editor is
763 supplied, it is called to get a commit message.
763 supplied, it is called to get a commit message.
764 """
764 """
765
765
766 def fail(f, msg):
766 def fail(f, msg):
767 raise util.Abort('%s: %s' % (f, msg))
767 raise util.Abort('%s: %s' % (f, msg))
768
768
769 if not match:
769 if not match:
770 match = match_.always(self.root, '')
770 match = match_.always(self.root, '')
771
771
772 if not force:
772 if not force:
773 vdirs = []
773 vdirs = []
774 match.dir = vdirs.append
774 match.dir = vdirs.append
775 match.bad = fail
775 match.bad = fail
776
776
777 wlock = self.wlock()
777 wlock = self.wlock()
778 try:
778 try:
779 p1, p2 = self.dirstate.parents()
779 p1, p2 = self.dirstate.parents()
780 wctx = self[None]
780 wctx = self[None]
781
781
782 if (not force and p2 != nullid and match and
782 if (not force and p2 != nullid and match and
783 (match.files() or match.anypats())):
783 (match.files() or match.anypats())):
784 raise util.Abort(_('cannot partially commit a merge '
784 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
785 '(do not specify files or patterns)'))
786
786
787 changes = self.status(match=match, clean=force)
787 changes = self.status(match=match, clean=force)
788 if force:
788 if force:
789 changes[0].extend(changes[6]) # mq may commit unchanged files
789 changes[0].extend(changes[6]) # mq may commit unchanged files
790
790
791 # check subrepos
791 # check subrepos
792 subs = []
792 subs = []
793 for s in wctx.substate:
793 for s in wctx.substate:
794 if match(s) and wctx.sub(s).dirty():
794 if match(s) and wctx.sub(s).dirty():
795 subs.append(s)
795 subs.append(s)
796 if subs and '.hgsubstate' not in changes[0]:
796 if subs and '.hgsubstate' not in changes[0]:
797 changes[0].insert(0, '.hgsubstate')
797 changes[0].insert(0, '.hgsubstate')
798
798
799 # make sure all explicit patterns are matched
799 # make sure all explicit patterns are matched
800 if not force and match.files():
800 if not force and match.files():
801 matched = set(changes[0] + changes[1] + changes[2])
801 matched = set(changes[0] + changes[1] + changes[2])
802
802
803 for f in match.files():
803 for f in match.files():
804 if f == '.' or f in matched or f in wctx.substate:
804 if f == '.' or f in matched or f in wctx.substate:
805 continue
805 continue
806 if f in changes[3]: # missing
806 if f in changes[3]: # missing
807 fail(f, _('file not found!'))
807 fail(f, _('file not found!'))
808 if f in vdirs: # visited directory
808 if f in vdirs: # visited directory
809 d = f + '/'
809 d = f + '/'
810 for mf in matched:
810 for mf in matched:
811 if mf.startswith(d):
811 if mf.startswith(d):
812 break
812 break
813 else:
813 else:
814 fail(f, _("no match under directory!"))
814 fail(f, _("no match under directory!"))
815 elif f not in self.dirstate:
815 elif f not in self.dirstate:
816 fail(f, _("file not tracked!"))
816 fail(f, _("file not tracked!"))
817
817
818 if (not force and not extra.get("close") and p2 == nullid
818 if (not force and not extra.get("close") and p2 == nullid
819 and not (changes[0] or changes[1] or changes[2])
819 and not (changes[0] or changes[1] or changes[2])
820 and self[None].branch() == self['.'].branch()):
820 and self[None].branch() == self['.'].branch()):
821 return None
821 return None
822
822
823 ms = merge_.mergestate(self)
823 ms = merge_.mergestate(self)
824 for f in changes[0]:
824 for f in changes[0]:
825 if f in ms and ms[f] == 'u':
825 if f in ms and ms[f] == 'u':
826 raise util.Abort(_("unresolved merge conflicts "
826 raise util.Abort(_("unresolved merge conflicts "
827 "(see hg resolve)"))
827 "(see hg resolve)"))
828
828
829 cctx = context.workingctx(self, (p1, p2), text, user, date,
829 cctx = context.workingctx(self, (p1, p2), text, user, date,
830 extra, changes)
830 extra, changes)
831 if editor:
831 if editor:
832 cctx._text = editor(self, cctx, subs)
832 cctx._text = editor(self, cctx, subs)
833
833
834 # commit subs
834 # commit subs
835 if subs:
835 if subs:
836 state = wctx.substate.copy()
836 state = wctx.substate.copy()
837 for s in subs:
837 for s in subs:
838 self.ui.status(_('committing subrepository %s\n') % s)
838 self.ui.status(_('committing subrepository %s\n') % s)
839 sr = wctx.sub(s).commit(cctx._text, user, date)
839 sr = wctx.sub(s).commit(cctx._text, user, date)
840 state[s] = (state[s][0], sr)
840 state[s] = (state[s][0], sr)
841 subrepo.writestate(self, state)
841 subrepo.writestate(self, state)
842
842
843 ret = self.commitctx(cctx, True)
843 ret = self.commitctx(cctx, True)
844
844
845 # update dirstate and mergestate
845 # update dirstate and mergestate
846 for f in changes[0] + changes[1]:
846 for f in changes[0] + changes[1]:
847 self.dirstate.normal(f)
847 self.dirstate.normal(f)
848 for f in changes[2]:
848 for f in changes[2]:
849 self.dirstate.forget(f)
849 self.dirstate.forget(f)
850 self.dirstate.setparents(ret)
850 self.dirstate.setparents(ret)
851 ms.reset()
851 ms.reset()
852
852
853 return ret
853 return ret
854
854
855 finally:
855 finally:
856 wlock.release()
856 wlock.release()
857
857
858 def commitctx(self, ctx, error=False):
858 def commitctx(self, ctx, error=False):
859 """Add a new revision to current repository.
859 """Add a new revision to current repository.
860
860
861 Revision information is passed via the context argument.
861 Revision information is passed via the context argument.
862 """
862 """
863
863
864 tr = lock = None
864 tr = lock = None
865 removed = ctx.removed()
865 removed = ctx.removed()
866 p1, p2 = ctx.p1(), ctx.p2()
866 p1, p2 = ctx.p1(), ctx.p2()
867 m1 = p1.manifest().copy()
867 m1 = p1.manifest().copy()
868 m2 = p2.manifest()
868 m2 = p2.manifest()
869 user = ctx.user()
869 user = ctx.user()
870
870
871 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
871 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
873
873
874 lock = self.lock()
874 lock = self.lock()
875 try:
875 try:
876 tr = self.transaction()
876 tr = self.transaction()
877 trp = weakref.proxy(tr)
877 trp = weakref.proxy(tr)
878
878
879 # check in files
879 # check in files
880 new = {}
880 new = {}
881 changed = []
881 changed = []
882 linkrev = len(self)
882 linkrev = len(self)
883 for f in sorted(ctx.modified() + ctx.added()):
883 for f in sorted(ctx.modified() + ctx.added()):
884 self.ui.note(f + "\n")
884 self.ui.note(f + "\n")
885 try:
885 try:
886 fctx = ctx[f]
886 fctx = ctx[f]
887 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
887 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
888 changed)
888 changed)
889 m1.set(f, fctx.flags())
889 m1.set(f, fctx.flags())
890 except (OSError, IOError):
890 except (OSError, IOError):
891 if error:
891 if error:
892 self.ui.warn(_("trouble committing %s!\n") % f)
892 self.ui.warn(_("trouble committing %s!\n") % f)
893 raise
893 raise
894 else:
894 else:
895 removed.append(f)
895 removed.append(f)
896
896
897 # update manifest
897 # update manifest
898 m1.update(new)
898 m1.update(new)
899 removed = [f for f in sorted(removed) if f in m1 or f in m2]
899 removed = [f for f in sorted(removed) if f in m1 or f in m2]
900 drop = [f for f in removed if f in m1]
900 drop = [f for f in removed if f in m1]
901 for f in drop:
901 for f in drop:
902 del m1[f]
902 del m1[f]
903 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
903 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
904 p2.manifestnode(), (new, drop))
904 p2.manifestnode(), (new, drop))
905
905
906 # update changelog
906 # update changelog
907 self.changelog.delayupdate()
907 self.changelog.delayupdate()
908 n = self.changelog.add(mn, changed + removed, ctx.description(),
908 n = self.changelog.add(mn, changed + removed, ctx.description(),
909 trp, p1.node(), p2.node(),
909 trp, p1.node(), p2.node(),
910 user, ctx.date(), ctx.extra().copy())
910 user, ctx.date(), ctx.extra().copy())
911 p = lambda: self.changelog.writepending() and self.root or ""
911 p = lambda: self.changelog.writepending() and self.root or ""
912 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
912 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 parent2=xp2, pending=p)
913 parent2=xp2, pending=p)
914 self.changelog.finalize(trp)
914 self.changelog.finalize(trp)
915 tr.close()
915 tr.close()
916
916
917 if self.branchcache:
917 if self.branchcache:
918 self.branchtags()
918 self.branchtags()
919
919
920 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
920 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
921 return n
921 return n
922 finally:
922 finally:
923 del tr
923 del tr
924 lock.release()
924 lock.release()
925
925
926 def destroyed(self):
926 def destroyed(self):
927 '''Inform the repository that nodes have been destroyed.
927 '''Inform the repository that nodes have been destroyed.
928 Intended for use by strip and rollback, so there's a common
928 Intended for use by strip and rollback, so there's a common
929 place for anything that has to be done after destroying history.'''
929 place for anything that has to be done after destroying history.'''
930 # XXX it might be nice if we could take the list of destroyed
930 # XXX it might be nice if we could take the list of destroyed
931 # nodes, but I don't see an easy way for rollback() to do that
931 # nodes, but I don't see an easy way for rollback() to do that
932
932
933 # Ensure the persistent tag cache is updated. Doing it now
933 # Ensure the persistent tag cache is updated. Doing it now
934 # means that the tag cache only has to worry about destroyed
934 # means that the tag cache only has to worry about destroyed
935 # heads immediately after a strip/rollback. That in turn
935 # heads immediately after a strip/rollback. That in turn
936 # guarantees that "cachetip == currenttip" (comparing both rev
936 # guarantees that "cachetip == currenttip" (comparing both rev
937 # and node) always means no nodes have been added or destroyed.
937 # and node) always means no nodes have been added or destroyed.
938
938
939 # XXX this is suboptimal when qrefresh'ing: we strip the current
939 # XXX this is suboptimal when qrefresh'ing: we strip the current
940 # head, refresh the tag cache, then immediately add a new head.
940 # head, refresh the tag cache, then immediately add a new head.
941 # But I think doing it this way is necessary for the "instant
941 # But I think doing it this way is necessary for the "instant
942 # tag cache retrieval" case to work.
942 # tag cache retrieval" case to work.
943 tags_.findglobaltags(self.ui, self, {}, {})
943 tags_.findglobaltags(self.ui, self, {}, {})
944
944
945 def walk(self, match, node=None):
945 def walk(self, match, node=None):
946 '''
946 '''
947 walk recursively through the directory tree or a given
947 walk recursively through the directory tree or a given
948 changeset, finding all files matched by the match
948 changeset, finding all files matched by the match
949 function
949 function
950 '''
950 '''
951 return self[node].walk(match)
951 return self[node].walk(match)
952
952
953 def status(self, node1='.', node2=None, match=None,
953 def status(self, node1='.', node2=None, match=None,
954 ignored=False, clean=False, unknown=False):
954 ignored=False, clean=False, unknown=False):
955 """return status of files between two nodes or node and working directory
955 """return status of files between two nodes or node and working directory
956
956
957 If node1 is None, use the first dirstate parent instead.
957 If node1 is None, use the first dirstate parent instead.
958 If node2 is None, compare node1 with working directory.
958 If node2 is None, compare node1 with working directory.
959 """
959 """
960
960
961 def mfmatches(ctx):
961 def mfmatches(ctx):
962 mf = ctx.manifest().copy()
962 mf = ctx.manifest().copy()
963 for fn in mf.keys():
963 for fn in mf.keys():
964 if not match(fn):
964 if not match(fn):
965 del mf[fn]
965 del mf[fn]
966 return mf
966 return mf
967
967
968 if isinstance(node1, context.changectx):
968 if isinstance(node1, context.changectx):
969 ctx1 = node1
969 ctx1 = node1
970 else:
970 else:
971 ctx1 = self[node1]
971 ctx1 = self[node1]
972 if isinstance(node2, context.changectx):
972 if isinstance(node2, context.changectx):
973 ctx2 = node2
973 ctx2 = node2
974 else:
974 else:
975 ctx2 = self[node2]
975 ctx2 = self[node2]
976
976
977 working = ctx2.rev() is None
977 working = ctx2.rev() is None
978 parentworking = working and ctx1 == self['.']
978 parentworking = working and ctx1 == self['.']
979 match = match or match_.always(self.root, self.getcwd())
979 match = match or match_.always(self.root, self.getcwd())
980 listignored, listclean, listunknown = ignored, clean, unknown
980 listignored, listclean, listunknown = ignored, clean, unknown
981
981
982 # load earliest manifest first for caching reasons
982 # load earliest manifest first for caching reasons
983 if not working and ctx2.rev() < ctx1.rev():
983 if not working and ctx2.rev() < ctx1.rev():
984 ctx2.manifest()
984 ctx2.manifest()
985
985
986 if not parentworking:
986 if not parentworking:
987 def bad(f, msg):
987 def bad(f, msg):
988 if f not in ctx1:
988 if f not in ctx1:
989 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
989 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
990 match.bad = bad
990 match.bad = bad
991
991
992 if working: # we need to scan the working dir
992 if working: # we need to scan the working dir
993 s = self.dirstate.status(match, listignored, listclean, listunknown)
993 s = self.dirstate.status(match, listignored, listclean, listunknown)
994 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
994 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
995
995
996 # check for any possibly clean files
996 # check for any possibly clean files
997 if parentworking and cmp:
997 if parentworking and cmp:
998 fixup = []
998 fixup = []
999 # do a full compare of any files that might have changed
999 # do a full compare of any files that might have changed
1000 for f in sorted(cmp):
1000 for f in sorted(cmp):
1001 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1001 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1002 or ctx1[f].cmp(ctx2[f].data())):
1002 or ctx1[f].cmp(ctx2[f].data())):
1003 modified.append(f)
1003 modified.append(f)
1004 else:
1004 else:
1005 fixup.append(f)
1005 fixup.append(f)
1006
1006
1007 if listclean:
1007 if listclean:
1008 clean += fixup
1008 clean += fixup
1009
1009
1010 # update dirstate for files that are actually clean
1010 # update dirstate for files that are actually clean
1011 if fixup:
1011 if fixup:
1012 try:
1012 try:
1013 # updating the dirstate is optional
1013 # updating the dirstate is optional
1014 # so we don't wait on the lock
1014 # so we don't wait on the lock
1015 wlock = self.wlock(False)
1015 wlock = self.wlock(False)
1016 try:
1016 try:
1017 for f in fixup:
1017 for f in fixup:
1018 self.dirstate.normal(f)
1018 self.dirstate.normal(f)
1019 finally:
1019 finally:
1020 wlock.release()
1020 wlock.release()
1021 except error.LockError:
1021 except error.LockError:
1022 pass
1022 pass
1023
1023
1024 if not parentworking:
1024 if not parentworking:
1025 mf1 = mfmatches(ctx1)
1025 mf1 = mfmatches(ctx1)
1026 if working:
1026 if working:
1027 # we are comparing working dir against non-parent
1027 # we are comparing working dir against non-parent
1028 # generate a pseudo-manifest for the working dir
1028 # generate a pseudo-manifest for the working dir
1029 mf2 = mfmatches(self['.'])
1029 mf2 = mfmatches(self['.'])
1030 for f in cmp + modified + added:
1030 for f in cmp + modified + added:
1031 mf2[f] = None
1031 mf2[f] = None
1032 mf2.set(f, ctx2.flags(f))
1032 mf2.set(f, ctx2.flags(f))
1033 for f in removed:
1033 for f in removed:
1034 if f in mf2:
1034 if f in mf2:
1035 del mf2[f]
1035 del mf2[f]
1036 else:
1036 else:
1037 # we are comparing two revisions
1037 # we are comparing two revisions
1038 deleted, unknown, ignored = [], [], []
1038 deleted, unknown, ignored = [], [], []
1039 mf2 = mfmatches(ctx2)
1039 mf2 = mfmatches(ctx2)
1040
1040
1041 modified, added, clean = [], [], []
1041 modified, added, clean = [], [], []
1042 for fn in mf2:
1042 for fn in mf2:
1043 if fn in mf1:
1043 if fn in mf1:
1044 if (mf1.flags(fn) != mf2.flags(fn) or
1044 if (mf1.flags(fn) != mf2.flags(fn) or
1045 (mf1[fn] != mf2[fn] and
1045 (mf1[fn] != mf2[fn] and
1046 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1046 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1047 modified.append(fn)
1047 modified.append(fn)
1048 elif listclean:
1048 elif listclean:
1049 clean.append(fn)
1049 clean.append(fn)
1050 del mf1[fn]
1050 del mf1[fn]
1051 else:
1051 else:
1052 added.append(fn)
1052 added.append(fn)
1053 removed = mf1.keys()
1053 removed = mf1.keys()
1054
1054
1055 r = modified, added, removed, deleted, unknown, ignored, clean
1055 r = modified, added, removed, deleted, unknown, ignored, clean
1056 [l.sort() for l in r]
1056 [l.sort() for l in r]
1057 return r
1057 return r
1058
1058
1059 def add(self, list):
1059 def add(self, list):
1060 wlock = self.wlock()
1060 wlock = self.wlock()
1061 try:
1061 try:
1062 rejected = []
1062 rejected = []
1063 for f in list:
1063 for f in list:
1064 p = self.wjoin(f)
1064 p = self.wjoin(f)
1065 try:
1065 try:
1066 st = os.lstat(p)
1066 st = os.lstat(p)
1067 except:
1067 except:
1068 self.ui.warn(_("%s does not exist!\n") % f)
1068 self.ui.warn(_("%s does not exist!\n") % f)
1069 rejected.append(f)
1069 rejected.append(f)
1070 continue
1070 continue
1071 if st.st_size > 10000000:
1071 if st.st_size > 10000000:
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 " performance problems\n"
1073 " performance problems\n"
1074 "(use 'hg revert %s' to unadd the file)\n")
1074 "(use 'hg revert %s' to unadd the file)\n")
1075 % (f, f))
1075 % (f, f))
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 self.ui.warn(_("%s not added: only files and symlinks "
1077 self.ui.warn(_("%s not added: only files and symlinks "
1078 "supported currently\n") % f)
1078 "supported currently\n") % f)
1079 rejected.append(p)
1079 rejected.append(p)
1080 elif self.dirstate[f] in 'amn':
1080 elif self.dirstate[f] in 'amn':
1081 self.ui.warn(_("%s already tracked!\n") % f)
1081 self.ui.warn(_("%s already tracked!\n") % f)
1082 elif self.dirstate[f] == 'r':
1082 elif self.dirstate[f] == 'r':
1083 self.dirstate.normallookup(f)
1083 self.dirstate.normallookup(f)
1084 else:
1084 else:
1085 self.dirstate.add(f)
1085 self.dirstate.add(f)
1086 return rejected
1086 return rejected
1087 finally:
1087 finally:
1088 wlock.release()
1088 wlock.release()
1089
1089
1090 def forget(self, list):
1090 def forget(self, list):
1091 wlock = self.wlock()
1091 wlock = self.wlock()
1092 try:
1092 try:
1093 for f in list:
1093 for f in list:
1094 if self.dirstate[f] != 'a':
1094 if self.dirstate[f] != 'a':
1095 self.ui.warn(_("%s not added!\n") % f)
1095 self.ui.warn(_("%s not added!\n") % f)
1096 else:
1096 else:
1097 self.dirstate.forget(f)
1097 self.dirstate.forget(f)
1098 finally:
1098 finally:
1099 wlock.release()
1099 wlock.release()
1100
1100
1101 def remove(self, list, unlink=False):
1101 def remove(self, list, unlink=False):
1102 if unlink:
1102 if unlink:
1103 for f in list:
1103 for f in list:
1104 try:
1104 try:
1105 util.unlink(self.wjoin(f))
1105 util.unlink(self.wjoin(f))
1106 except OSError, inst:
1106 except OSError, inst:
1107 if inst.errno != errno.ENOENT:
1107 if inst.errno != errno.ENOENT:
1108 raise
1108 raise
1109 wlock = self.wlock()
1109 wlock = self.wlock()
1110 try:
1110 try:
1111 for f in list:
1111 for f in list:
1112 if unlink and os.path.exists(self.wjoin(f)):
1112 if unlink and os.path.exists(self.wjoin(f)):
1113 self.ui.warn(_("%s still exists!\n") % f)
1113 self.ui.warn(_("%s still exists!\n") % f)
1114 elif self.dirstate[f] == 'a':
1114 elif self.dirstate[f] == 'a':
1115 self.dirstate.forget(f)
1115 self.dirstate.forget(f)
1116 elif f not in self.dirstate:
1116 elif f not in self.dirstate:
1117 self.ui.warn(_("%s not tracked!\n") % f)
1117 self.ui.warn(_("%s not tracked!\n") % f)
1118 else:
1118 else:
1119 self.dirstate.remove(f)
1119 self.dirstate.remove(f)
1120 finally:
1120 finally:
1121 wlock.release()
1121 wlock.release()
1122
1122
1123 def undelete(self, list):
1123 def undelete(self, list):
1124 manifests = [self.manifest.read(self.changelog.read(p)[0])
1124 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 for p in self.dirstate.parents() if p != nullid]
1125 for p in self.dirstate.parents() if p != nullid]
1126 wlock = self.wlock()
1126 wlock = self.wlock()
1127 try:
1127 try:
1128 for f in list:
1128 for f in list:
1129 if self.dirstate[f] != 'r':
1129 if self.dirstate[f] != 'r':
1130 self.ui.warn(_("%s not removed!\n") % f)
1130 self.ui.warn(_("%s not removed!\n") % f)
1131 else:
1131 else:
1132 m = f in manifests[0] and manifests[0] or manifests[1]
1132 m = f in manifests[0] and manifests[0] or manifests[1]
1133 t = self.file(f).read(m[f])
1133 t = self.file(f).read(m[f])
1134 self.wwrite(f, t, m.flags(f))
1134 self.wwrite(f, t, m.flags(f))
1135 self.dirstate.normal(f)
1135 self.dirstate.normal(f)
1136 finally:
1136 finally:
1137 wlock.release()
1137 wlock.release()
1138
1138
1139 def copy(self, source, dest):
1139 def copy(self, source, dest):
1140 p = self.wjoin(dest)
1140 p = self.wjoin(dest)
1141 if not (os.path.exists(p) or os.path.islink(p)):
1141 if not (os.path.exists(p) or os.path.islink(p)):
1142 self.ui.warn(_("%s does not exist!\n") % dest)
1142 self.ui.warn(_("%s does not exist!\n") % dest)
1143 elif not (os.path.isfile(p) or os.path.islink(p)):
1143 elif not (os.path.isfile(p) or os.path.islink(p)):
1144 self.ui.warn(_("copy failed: %s is not a file or a "
1144 self.ui.warn(_("copy failed: %s is not a file or a "
1145 "symbolic link\n") % dest)
1145 "symbolic link\n") % dest)
1146 else:
1146 else:
1147 wlock = self.wlock()
1147 wlock = self.wlock()
1148 try:
1148 try:
1149 if self.dirstate[dest] in '?r':
1149 if self.dirstate[dest] in '?r':
1150 self.dirstate.add(dest)
1150 self.dirstate.add(dest)
1151 self.dirstate.copy(source, dest)
1151 self.dirstate.copy(source, dest)
1152 finally:
1152 finally:
1153 wlock.release()
1153 wlock.release()
1154
1154
1155 def heads(self, start=None):
1155 def heads(self, start=None):
1156 heads = self.changelog.heads(start)
1156 heads = self.changelog.heads(start)
1157 # sort the output in rev descending order
1157 # sort the output in rev descending order
1158 heads = [(-self.changelog.rev(h), h) for h in heads]
1158 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 return [n for (r, n) in sorted(heads)]
1159 return [n for (r, n) in sorted(heads)]
1160
1160
1161 def branchheads(self, branch=None, start=None, closed=False):
1161 def branchheads(self, branch=None, start=None, closed=False):
1162 '''return a (possibly filtered) list of heads for the given branch
1162 '''return a (possibly filtered) list of heads for the given branch
1163
1163
1164 Heads are returned in topological order, from newest to oldest.
1164 Heads are returned in topological order, from newest to oldest.
1165 If branch is None, use the dirstate branch.
1165 If branch is None, use the dirstate branch.
1166 If start is not None, return only heads reachable from start.
1166 If start is not None, return only heads reachable from start.
1167 If closed is True, return heads that are marked as closed as well.
1167 If closed is True, return heads that are marked as closed as well.
1168 '''
1168 '''
1169 if branch is None:
1169 if branch is None:
1170 branch = self[None].branch()
1170 branch = self[None].branch()
1171 branches = self.branchmap()
1171 branches = self.branchmap()
1172 if branch not in branches:
1172 if branch not in branches:
1173 return []
1173 return []
1174 # the cache returns heads ordered lowest to highest
1174 # the cache returns heads ordered lowest to highest
1175 bheads = list(reversed(branches[branch]))
1175 bheads = list(reversed(branches[branch]))
1176 if start is not None:
1176 if start is not None:
1177 # filter out the heads that cannot be reached from startrev
1177 # filter out the heads that cannot be reached from startrev
1178 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1178 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1179 bheads = [h for h in bheads if h in fbheads]
1179 bheads = [h for h in bheads if h in fbheads]
1180 if not closed:
1180 if not closed:
1181 bheads = [h for h in bheads if
1181 bheads = [h for h in bheads if
1182 ('close' not in self.changelog.read(h)[5])]
1182 ('close' not in self.changelog.read(h)[5])]
1183 return bheads
1183 return bheads
1184
1184
1185 def branches(self, nodes):
1185 def branches(self, nodes):
1186 if not nodes:
1186 if not nodes:
1187 nodes = [self.changelog.tip()]
1187 nodes = [self.changelog.tip()]
1188 b = []
1188 b = []
1189 for n in nodes:
1189 for n in nodes:
1190 t = n
1190 t = n
1191 while 1:
1191 while 1:
1192 p = self.changelog.parents(n)
1192 p = self.changelog.parents(n)
1193 if p[1] != nullid or p[0] == nullid:
1193 if p[1] != nullid or p[0] == nullid:
1194 b.append((t, n, p[0], p[1]))
1194 b.append((t, n, p[0], p[1]))
1195 break
1195 break
1196 n = p[0]
1196 n = p[0]
1197 return b
1197 return b
1198
1198
1199 def between(self, pairs):
1199 def between(self, pairs):
1200 r = []
1200 r = []
1201
1201
1202 for top, bottom in pairs:
1202 for top, bottom in pairs:
1203 n, l, i = top, [], 0
1203 n, l, i = top, [], 0
1204 f = 1
1204 f = 1
1205
1205
1206 while n != bottom and n != nullid:
1206 while n != bottom and n != nullid:
1207 p = self.changelog.parents(n)[0]
1207 p = self.changelog.parents(n)[0]
1208 if i == f:
1208 if i == f:
1209 l.append(n)
1209 l.append(n)
1210 f = f * 2
1210 f = f * 2
1211 n = p
1211 n = p
1212 i += 1
1212 i += 1
1213
1213
1214 r.append(l)
1214 r.append(l)
1215
1215
1216 return r
1216 return r
1217
1217
1218 def findincoming(self, remote, base=None, heads=None, force=False):
1218 def findincoming(self, remote, base=None, heads=None, force=False):
1219 """Return list of roots of the subsets of missing nodes from remote
1219 """Return list of roots of the subsets of missing nodes from remote
1220
1220
1221 If base dict is specified, assume that these nodes and their parents
1221 If base dict is specified, assume that these nodes and their parents
1222 exist on the remote side and that no child of a node of base exists
1222 exist on the remote side and that no child of a node of base exists
1223 in both remote and self.
1223 in both remote and self.
1224 Furthermore base will be updated to include the nodes that exists
1224 Furthermore base will be updated to include the nodes that exists
1225 in self and remote but no children exists in self and remote.
1225 in self and remote but no children exists in self and remote.
1226 If a list of heads is specified, return only nodes which are heads
1226 If a list of heads is specified, return only nodes which are heads
1227 or ancestors of these heads.
1227 or ancestors of these heads.
1228
1228
1229 All the ancestors of base are in self and in remote.
1229 All the ancestors of base are in self and in remote.
1230 All the descendants of the list returned are missing in self.
1230 All the descendants of the list returned are missing in self.
1231 (and so we know that the rest of the nodes are missing in remote, see
1231 (and so we know that the rest of the nodes are missing in remote, see
1232 outgoing)
1232 outgoing)
1233 """
1233 """
1234 return self.findcommonincoming(remote, base, heads, force)[1]
1234 return self.findcommonincoming(remote, base, heads, force)[1]
1235
1235
1236 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1236 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1237 """Return a tuple (common, missing roots, heads) used to identify
1237 """Return a tuple (common, missing roots, heads) used to identify
1238 missing nodes from remote.
1238 missing nodes from remote.
1239
1239
1240 If base dict is specified, assume that these nodes and their parents
1240 If base dict is specified, assume that these nodes and their parents
1241 exist on the remote side and that no child of a node of base exists
1241 exist on the remote side and that no child of a node of base exists
1242 in both remote and self.
1242 in both remote and self.
1243 Furthermore base will be updated to include the nodes that exists
1243 Furthermore base will be updated to include the nodes that exists
1244 in self and remote but no children exists in self and remote.
1244 in self and remote but no children exists in self and remote.
1245 If a list of heads is specified, return only nodes which are heads
1245 If a list of heads is specified, return only nodes which are heads
1246 or ancestors of these heads.
1246 or ancestors of these heads.
1247
1247
1248 All the ancestors of base are in self and in remote.
1248 All the ancestors of base are in self and in remote.
1249 """
1249 """
1250 m = self.changelog.nodemap
1250 m = self.changelog.nodemap
1251 search = []
1251 search = []
1252 fetch = set()
1252 fetch = set()
1253 seen = set()
1253 seen = set()
1254 seenbranch = set()
1254 seenbranch = set()
1255 if base is None:
1255 if base is None:
1256 base = {}
1256 base = {}
1257
1257
1258 if not heads:
1258 if not heads:
1259 heads = remote.heads()
1259 heads = remote.heads()
1260
1260
1261 if self.changelog.tip() == nullid:
1261 if self.changelog.tip() == nullid:
1262 base[nullid] = 1
1262 base[nullid] = 1
1263 if heads != [nullid]:
1263 if heads != [nullid]:
1264 return [nullid], [nullid], list(heads)
1264 return [nullid], [nullid], list(heads)
1265 return [nullid], [], []
1265 return [nullid], [], []
1266
1266
1267 # assume we're closer to the tip than the root
1267 # assume we're closer to the tip than the root
1268 # and start by examining the heads
1268 # and start by examining the heads
1269 self.ui.status(_("searching for changes\n"))
1269 self.ui.status(_("searching for changes\n"))
1270
1270
1271 unknown = []
1271 unknown = []
1272 for h in heads:
1272 for h in heads:
1273 if h not in m:
1273 if h not in m:
1274 unknown.append(h)
1274 unknown.append(h)
1275 else:
1275 else:
1276 base[h] = 1
1276 base[h] = 1
1277
1277
1278 heads = unknown
1278 heads = unknown
1279 if not unknown:
1279 if not unknown:
1280 return base.keys(), [], []
1280 return base.keys(), [], []
1281
1281
1282 req = set(unknown)
1282 req = set(unknown)
1283 reqcnt = 0
1283 reqcnt = 0
1284
1284
1285 # search through remote branches
1285 # search through remote branches
1286 # a 'branch' here is a linear segment of history, with four parts:
1286 # a 'branch' here is a linear segment of history, with four parts:
1287 # head, root, first parent, second parent
1287 # head, root, first parent, second parent
1288 # (a branch always has two parents (or none) by definition)
1288 # (a branch always has two parents (or none) by definition)
1289 unknown = remote.branches(unknown)
1289 unknown = remote.branches(unknown)
1290 while unknown:
1290 while unknown:
1291 r = []
1291 r = []
1292 while unknown:
1292 while unknown:
1293 n = unknown.pop(0)
1293 n = unknown.pop(0)
1294 if n[0] in seen:
1294 if n[0] in seen:
1295 continue
1295 continue
1296
1296
1297 self.ui.debug("examining %s:%s\n"
1297 self.ui.debug("examining %s:%s\n"
1298 % (short(n[0]), short(n[1])))
1298 % (short(n[0]), short(n[1])))
1299 if n[0] == nullid: # found the end of the branch
1299 if n[0] == nullid: # found the end of the branch
1300 pass
1300 pass
1301 elif n in seenbranch:
1301 elif n in seenbranch:
1302 self.ui.debug("branch already found\n")
1302 self.ui.debug("branch already found\n")
1303 continue
1303 continue
1304 elif n[1] and n[1] in m: # do we know the base?
1304 elif n[1] and n[1] in m: # do we know the base?
1305 self.ui.debug("found incomplete branch %s:%s\n"
1305 self.ui.debug("found incomplete branch %s:%s\n"
1306 % (short(n[0]), short(n[1])))
1306 % (short(n[0]), short(n[1])))
1307 search.append(n[0:2]) # schedule branch range for scanning
1307 search.append(n[0:2]) # schedule branch range for scanning
1308 seenbranch.add(n)
1308 seenbranch.add(n)
1309 else:
1309 else:
1310 if n[1] not in seen and n[1] not in fetch:
1310 if n[1] not in seen and n[1] not in fetch:
1311 if n[2] in m and n[3] in m:
1311 if n[2] in m and n[3] in m:
1312 self.ui.debug("found new changeset %s\n" %
1312 self.ui.debug("found new changeset %s\n" %
1313 short(n[1]))
1313 short(n[1]))
1314 fetch.add(n[1]) # earliest unknown
1314 fetch.add(n[1]) # earliest unknown
1315 for p in n[2:4]:
1315 for p in n[2:4]:
1316 if p in m:
1316 if p in m:
1317 base[p] = 1 # latest known
1317 base[p] = 1 # latest known
1318
1318
1319 for p in n[2:4]:
1319 for p in n[2:4]:
1320 if p not in req and p not in m:
1320 if p not in req and p not in m:
1321 r.append(p)
1321 r.append(p)
1322 req.add(p)
1322 req.add(p)
1323 seen.add(n[0])
1323 seen.add(n[0])
1324
1324
1325 if r:
1325 if r:
1326 reqcnt += 1
1326 reqcnt += 1
1327 self.ui.debug("request %d: %s\n" %
1327 self.ui.debug("request %d: %s\n" %
1328 (reqcnt, " ".join(map(short, r))))
1328 (reqcnt, " ".join(map(short, r))))
1329 for p in xrange(0, len(r), 10):
1329 for p in xrange(0, len(r), 10):
1330 for b in remote.branches(r[p:p+10]):
1330 for b in remote.branches(r[p:p+10]):
1331 self.ui.debug("received %s:%s\n" %
1331 self.ui.debug("received %s:%s\n" %
1332 (short(b[0]), short(b[1])))
1332 (short(b[0]), short(b[1])))
1333 unknown.append(b)
1333 unknown.append(b)
1334
1334
1335 # do binary search on the branches we found
1335 # do binary search on the branches we found
1336 while search:
1336 while search:
1337 newsearch = []
1337 newsearch = []
1338 reqcnt += 1
1338 reqcnt += 1
1339 for n, l in zip(search, remote.between(search)):
1339 for n, l in zip(search, remote.between(search)):
1340 l.append(n[1])
1340 l.append(n[1])
1341 p = n[0]
1341 p = n[0]
1342 f = 1
1342 f = 1
1343 for i in l:
1343 for i in l:
1344 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1344 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1345 if i in m:
1345 if i in m:
1346 if f <= 2:
1346 if f <= 2:
1347 self.ui.debug("found new branch changeset %s\n" %
1347 self.ui.debug("found new branch changeset %s\n" %
1348 short(p))
1348 short(p))
1349 fetch.add(p)
1349 fetch.add(p)
1350 base[i] = 1
1350 base[i] = 1
1351 else:
1351 else:
1352 self.ui.debug("narrowed branch search to %s:%s\n"
1352 self.ui.debug("narrowed branch search to %s:%s\n"
1353 % (short(p), short(i)))
1353 % (short(p), short(i)))
1354 newsearch.append((p, i))
1354 newsearch.append((p, i))
1355 break
1355 break
1356 p, f = i, f * 2
1356 p, f = i, f * 2
1357 search = newsearch
1357 search = newsearch
1358
1358
1359 # sanity check our fetch list
1359 # sanity check our fetch list
1360 for f in fetch:
1360 for f in fetch:
1361 if f in m:
1361 if f in m:
1362 raise error.RepoError(_("already have changeset ")
1362 raise error.RepoError(_("already have changeset ")
1363 + short(f[:4]))
1363 + short(f[:4]))
1364
1364
1365 if base.keys() == [nullid]:
1365 if base.keys() == [nullid]:
1366 if force:
1366 if force:
1367 self.ui.warn(_("warning: repository is unrelated\n"))
1367 self.ui.warn(_("warning: repository is unrelated\n"))
1368 else:
1368 else:
1369 raise util.Abort(_("repository is unrelated"))
1369 raise util.Abort(_("repository is unrelated"))
1370
1370
1371 self.ui.debug("found new changesets starting at " +
1371 self.ui.debug("found new changesets starting at " +
1372 " ".join([short(f) for f in fetch]) + "\n")
1372 " ".join([short(f) for f in fetch]) + "\n")
1373
1373
1374 self.ui.debug("%d total queries\n" % reqcnt)
1374 self.ui.debug("%d total queries\n" % reqcnt)
1375
1375
1376 return base.keys(), list(fetch), heads
1376 return base.keys(), list(fetch), heads
1377
1377
1378 def findoutgoing(self, remote, base=None, heads=None, force=False):
1378 def findoutgoing(self, remote, base=None, heads=None, force=False):
1379 """Return list of nodes that are roots of subsets not in remote
1379 """Return list of nodes that are roots of subsets not in remote
1380
1380
1381 If base dict is specified, assume that these nodes and their parents
1381 If base dict is specified, assume that these nodes and their parents
1382 exist on the remote side.
1382 exist on the remote side.
1383 If a list of heads is specified, return only nodes which are heads
1383 If a list of heads is specified, return only nodes which are heads
1384 or ancestors of these heads, and return a second element which
1384 or ancestors of these heads, and return a second element which
1385 contains all remote heads which get new children.
1385 contains all remote heads which get new children.
1386 """
1386 """
1387 if base is None:
1387 if base is None:
1388 base = {}
1388 base = {}
1389 self.findincoming(remote, base, heads, force=force)
1389 self.findincoming(remote, base, heads, force=force)
1390
1390
1391 self.ui.debug("common changesets up to "
1391 self.ui.debug("common changesets up to "
1392 + " ".join(map(short, base.keys())) + "\n")
1392 + " ".join(map(short, base.keys())) + "\n")
1393
1393
1394 remain = set(self.changelog.nodemap)
1394 remain = set(self.changelog.nodemap)
1395
1395
1396 # prune everything remote has from the tree
1396 # prune everything remote has from the tree
1397 remain.remove(nullid)
1397 remain.remove(nullid)
1398 remove = base.keys()
1398 remove = base.keys()
1399 while remove:
1399 while remove:
1400 n = remove.pop(0)
1400 n = remove.pop(0)
1401 if n in remain:
1401 if n in remain:
1402 remain.remove(n)
1402 remain.remove(n)
1403 for p in self.changelog.parents(n):
1403 for p in self.changelog.parents(n):
1404 remove.append(p)
1404 remove.append(p)
1405
1405
1406 # find every node whose parents have been pruned
1406 # find every node whose parents have been pruned
1407 subset = []
1407 subset = []
1408 # find every remote head that will get new children
1408 # find every remote head that will get new children
1409 updated_heads = set()
1409 updated_heads = set()
1410 for n in remain:
1410 for n in remain:
1411 p1, p2 = self.changelog.parents(n)
1411 p1, p2 = self.changelog.parents(n)
1412 if p1 not in remain and p2 not in remain:
1412 if p1 not in remain and p2 not in remain:
1413 subset.append(n)
1413 subset.append(n)
1414 if heads:
1414 if heads:
1415 if p1 in heads:
1415 if p1 in heads:
1416 updated_heads.add(p1)
1416 updated_heads.add(p1)
1417 if p2 in heads:
1417 if p2 in heads:
1418 updated_heads.add(p2)
1418 updated_heads.add(p2)
1419
1419
1420 # this is the set of all roots we have to push
1420 # this is the set of all roots we have to push
1421 if heads:
1421 if heads:
1422 return subset, list(updated_heads)
1422 return subset, list(updated_heads)
1423 else:
1423 else:
1424 return subset
1424 return subset
1425
1425
1426 def pull(self, remote, heads=None, force=False):
1426 def pull(self, remote, heads=None, force=False):
1427 lock = self.lock()
1427 lock = self.lock()
1428 try:
1428 try:
1429 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1429 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1430 force=force)
1430 force=force)
1431 if fetch == [nullid]:
1431 if fetch == [nullid]:
1432 self.ui.status(_("requesting all changes\n"))
1432 self.ui.status(_("requesting all changes\n"))
1433
1433
1434 if not fetch:
1434 if not fetch:
1435 self.ui.status(_("no changes found\n"))
1435 self.ui.status(_("no changes found\n"))
1436 return 0
1436 return 0
1437
1437
1438 if heads is None and remote.capable('changegroupsubset'):
1438 if heads is None and remote.capable('changegroupsubset'):
1439 heads = rheads
1439 heads = rheads
1440
1440
1441 if heads is None:
1441 if heads is None:
1442 cg = remote.changegroup(fetch, 'pull')
1442 cg = remote.changegroup(fetch, 'pull')
1443 else:
1443 else:
1444 if not remote.capable('changegroupsubset'):
1444 if not remote.capable('changegroupsubset'):
1445 raise util.Abort(_("Partial pull cannot be done because "
1445 raise util.Abort(_("Partial pull cannot be done because "
1446 "other repository doesn't support "
1446 "other repository doesn't support "
1447 "changegroupsubset."))
1447 "changegroupsubset."))
1448 cg = remote.changegroupsubset(fetch, heads, 'pull')
1448 cg = remote.changegroupsubset(fetch, heads, 'pull')
1449 return self.addchangegroup(cg, 'pull', remote.url())
1449 return self.addchangegroup(cg, 'pull', remote.url())
1450 finally:
1450 finally:
1451 lock.release()
1451 lock.release()
1452
1452
1453 def push(self, remote, force=False, revs=None):
1453 def push(self, remote, force=False, revs=None):
1454 # there are two ways to push to remote repo:
1454 # there are two ways to push to remote repo:
1455 #
1455 #
1456 # addchangegroup assumes local user can lock remote
1456 # addchangegroup assumes local user can lock remote
1457 # repo (local filesystem, old ssh servers).
1457 # repo (local filesystem, old ssh servers).
1458 #
1458 #
1459 # unbundle assumes local user cannot lock remote repo (new ssh
1459 # unbundle assumes local user cannot lock remote repo (new ssh
1460 # servers, http servers).
1460 # servers, http servers).
1461
1461
1462 if remote.capable('unbundle'):
1462 if remote.capable('unbundle'):
1463 return self.push_unbundle(remote, force, revs)
1463 return self.push_unbundle(remote, force, revs)
1464 return self.push_addchangegroup(remote, force, revs)
1464 return self.push_addchangegroup(remote, force, revs)
1465
1465
1466 def prepush(self, remote, force, revs):
1466 def prepush(self, remote, force, revs):
1467 '''Analyze the local and remote repositories and determine which
1467 '''Analyze the local and remote repositories and determine which
1468 changesets need to be pushed to the remote. Return a tuple
1468 changesets need to be pushed to the remote. Return a tuple
1469 (changegroup, remoteheads). changegroup is a readable file-like
1469 (changegroup, remoteheads). changegroup is a readable file-like
1470 object whose read() returns successive changegroup chunks ready to
1470 object whose read() returns successive changegroup chunks ready to
1471 be sent over the wire. remoteheads is the list of remote heads.
1471 be sent over the wire. remoteheads is the list of remote heads.
1472 '''
1472 '''
1473 common = {}
1473 common = {}
1474 remote_heads = remote.heads()
1474 remote_heads = remote.heads()
1475 inc = self.findincoming(remote, common, remote_heads, force=force)
1475 inc = self.findincoming(remote, common, remote_heads, force=force)
1476
1476
1477 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1477 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1478 if revs is not None:
1478 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1479 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1480 else:
1481 bases, heads = update, self.changelog.heads()
1482
1479
1483 def checkbranch(lheads, rheads, updatelh):
1480 def checkbranch(lheads, rheads, updatelb):
1484 '''
1481 '''
1485 check whether there are more local heads than remote heads on
1482 check whether there are more local heads than remote heads on
1486 a specific branch.
1483 a specific branch.
1487
1484
1488 lheads: local branch heads
1485 lheads: local branch heads
1489 rheads: remote branch heads
1486 rheads: remote branch heads
1490 updatelh: outgoing local branch heads
1487 updatelb: outgoing local branch bases
1491 '''
1488 '''
1492
1489
1493 warn = 0
1490 warn = 0
1494
1491
1495 if not revs and len(lheads) > len(rheads):
1492 if not revs and len(lheads) > len(rheads):
1496 warn = 1
1493 warn = 1
1497 else:
1494 else:
1495 # add local heads involved in the push
1498 updatelheads = [self.changelog.heads(x, lheads)
1496 updatelheads = [self.changelog.heads(x, lheads)
1499 for x in updatelh]
1497 for x in updatelb]
1500 newheads = set(sum(updatelheads, [])) & set(lheads)
1498 newheads = set(sum(updatelheads, [])) & set(lheads)
1501
1499
1502 if not newheads:
1500 if not newheads:
1503 return True
1501 return True
1504
1502
1503 # add heads we don't have or that are not involved in the push
1505 for r in rheads:
1504 for r in rheads:
1506 if r in self.changelog.nodemap:
1505 if r in self.changelog.nodemap:
1507 desc = self.changelog.heads(r, heads)
1506 desc = self.changelog.heads(r, heads)
1508 l = [h for h in heads if h in desc]
1507 l = [h for h in heads if h in desc]
1509 if not l:
1508 if not l:
1510 newheads.add(r)
1509 newheads.add(r)
1511 else:
1510 else:
1512 newheads.add(r)
1511 newheads.add(r)
1513 if len(newheads) > len(rheads):
1512 if len(newheads) > len(rheads):
1514 warn = 1
1513 warn = 1
1515
1514
1516 if warn:
1515 if warn:
1517 if not rheads: # new branch requires --force
1516 if not rheads: # new branch requires --force
1518 self.ui.warn(_("abort: push creates new"
1517 self.ui.warn(_("abort: push creates new"
1519 " remote branch '%s'!\n") %
1518 " remote branch '%s'!\n") %
1520 self[updatelh[0]].branch())
1519 self[updatelb[0]].branch())
1521 else:
1520 else:
1522 self.ui.warn(_("abort: push creates new remote heads!\n"))
1521 self.ui.warn(_("abort: push creates new remote heads!\n"))
1523
1522
1524 self.ui.status(_("(did you forget to merge?"
1523 self.ui.status(_("(did you forget to merge?"
1525 " use push -f to force)\n"))
1524 " use push -f to force)\n"))
1526 return False
1525 return False
1527 return True
1526 return True
1528
1527
1529 if not bases:
1528 if not bases:
1530 self.ui.status(_("no changes found\n"))
1529 self.ui.status(_("no changes found\n"))
1531 return None, 1
1530 return None, 1
1532 elif not force:
1531 elif not force:
1533 # Check for each named branch if we're creating new remote heads.
1532 # Check for each named branch if we're creating new remote heads.
1534 # To be a remote head after push, node must be either:
1533 # To be a remote head after push, node must be either:
1535 # - unknown locally
1534 # - unknown locally
1536 # - a local outgoing head descended from update
1535 # - a local outgoing head descended from update
1537 # - a remote head that's known locally and not
1536 # - a remote head that's known locally and not
1538 # ancestral to an outgoing head
1537 # ancestral to an outgoing head
1539 #
1538 #
1540 # New named branches cannot be created without --force.
1539 # New named branches cannot be created without --force.
1541
1540
1542 if remote_heads != [nullid]:
1541 if remote_heads != [nullid]:
1543 if remote.capable('branchmap'):
1542 if remote.capable('branchmap'):
1544 localhds = {}
1543 localhds = {}
1545 if not revs:
1544 if not revs:
1546 localhds = self.branchmap()
1545 localhds = self.branchmap()
1547 else:
1546 else:
1548 for n in heads:
1547 for n in heads:
1549 branch = self[n].branch()
1548 branch = self[n].branch()
1550 if branch in localhds:
1549 if branch in localhds:
1551 localhds[branch].append(n)
1550 localhds[branch].append(n)
1552 else:
1551 else:
1553 localhds[branch] = [n]
1552 localhds[branch] = [n]
1554
1553
1555 remotehds = remote.branchmap()
1554 remotehds = remote.branchmap()
1556
1555
1557 for lh in localhds:
1556 for lh in localhds:
1558 if lh in remotehds:
1557 if lh in remotehds:
1559 rheads = remotehds[lh]
1558 rheads = remotehds[lh]
1560 else:
1559 else:
1561 rheads = []
1560 rheads = []
1562 lheads = localhds[lh]
1561 lheads = localhds[lh]
1563 updatelh = [upd for upd in update
1562 updatelb = [upd for upd in update
1564 if self[upd].branch() == lh]
1563 if self[upd].branch() == lh]
1565 if not updatelh:
1564 if not updatelb:
1566 continue
1565 continue
1567 if not checkbranch(lheads, rheads, updatelh):
1566 if not checkbranch(lheads, rheads, updatelb):
1568 return None, 0
1567 return None, 0
1569 else:
1568 else:
1570 if not checkbranch(heads, remote_heads, update):
1569 if not checkbranch(heads, remote_heads, update):
1571 return None, 0
1570 return None, 0
1572
1571
1573 if inc:
1572 if inc:
1574 self.ui.warn(_("note: unsynced remote changes!\n"))
1573 self.ui.warn(_("note: unsynced remote changes!\n"))
1575
1574
1576
1575
1577 if revs is None:
1576 if revs is None:
1578 # use the fast path, no race possible on push
1577 # use the fast path, no race possible on push
1579 cg = self._changegroup(common.keys(), 'push')
1578 cg = self._changegroup(common.keys(), 'push')
1580 else:
1579 else:
1581 cg = self.changegroupsubset(update, revs, 'push')
1580 cg = self.changegroupsubset(update, revs, 'push')
1582 return cg, remote_heads
1581 return cg, remote_heads
1583
1582
1584 def push_addchangegroup(self, remote, force, revs):
1583 def push_addchangegroup(self, remote, force, revs):
1585 lock = remote.lock()
1584 lock = remote.lock()
1586 try:
1585 try:
1587 ret = self.prepush(remote, force, revs)
1586 ret = self.prepush(remote, force, revs)
1588 if ret[0] is not None:
1587 if ret[0] is not None:
1589 cg, remote_heads = ret
1588 cg, remote_heads = ret
1590 return remote.addchangegroup(cg, 'push', self.url())
1589 return remote.addchangegroup(cg, 'push', self.url())
1591 return ret[1]
1590 return ret[1]
1592 finally:
1591 finally:
1593 lock.release()
1592 lock.release()
1594
1593
1595 def push_unbundle(self, remote, force, revs):
1594 def push_unbundle(self, remote, force, revs):
1596 # local repo finds heads on server, finds out what revs it
1595 # local repo finds heads on server, finds out what revs it
1597 # must push. once revs transferred, if server finds it has
1596 # must push. once revs transferred, if server finds it has
1598 # different heads (someone else won commit/push race), server
1597 # different heads (someone else won commit/push race), server
1599 # aborts.
1598 # aborts.
1600
1599
1601 ret = self.prepush(remote, force, revs)
1600 ret = self.prepush(remote, force, revs)
1602 if ret[0] is not None:
1601 if ret[0] is not None:
1603 cg, remote_heads = ret
1602 cg, remote_heads = ret
1604 if force: remote_heads = ['force']
1603 if force: remote_heads = ['force']
1605 return remote.unbundle(cg, remote_heads, 'push')
1604 return remote.unbundle(cg, remote_heads, 'push')
1606 return ret[1]
1605 return ret[1]
1607
1606
1608 def changegroupinfo(self, nodes, source):
1607 def changegroupinfo(self, nodes, source):
1609 if self.ui.verbose or source == 'bundle':
1608 if self.ui.verbose or source == 'bundle':
1610 self.ui.status(_("%d changesets found\n") % len(nodes))
1609 self.ui.status(_("%d changesets found\n") % len(nodes))
1611 if self.ui.debugflag:
1610 if self.ui.debugflag:
1612 self.ui.debug("list of changesets:\n")
1611 self.ui.debug("list of changesets:\n")
1613 for node in nodes:
1612 for node in nodes:
1614 self.ui.debug("%s\n" % hex(node))
1613 self.ui.debug("%s\n" % hex(node))
1615
1614
1616 def changegroupsubset(self, bases, heads, source, extranodes=None):
1615 def changegroupsubset(self, bases, heads, source, extranodes=None):
1617 """Compute a changegroup consisting of all the nodes that are
1616 """Compute a changegroup consisting of all the nodes that are
1618 descendents of any of the bases and ancestors of any of the heads.
1617 descendents of any of the bases and ancestors of any of the heads.
1619 Return a chunkbuffer object whose read() method will return
1618 Return a chunkbuffer object whose read() method will return
1620 successive changegroup chunks.
1619 successive changegroup chunks.
1621
1620
1622 It is fairly complex as determining which filenodes and which
1621 It is fairly complex as determining which filenodes and which
1623 manifest nodes need to be included for the changeset to be complete
1622 manifest nodes need to be included for the changeset to be complete
1624 is non-trivial.
1623 is non-trivial.
1625
1624
1626 Another wrinkle is doing the reverse, figuring out which changeset in
1625 Another wrinkle is doing the reverse, figuring out which changeset in
1627 the changegroup a particular filenode or manifestnode belongs to.
1626 the changegroup a particular filenode or manifestnode belongs to.
1628
1627
1629 The caller can specify some nodes that must be included in the
1628 The caller can specify some nodes that must be included in the
1630 changegroup using the extranodes argument. It should be a dict
1629 changegroup using the extranodes argument. It should be a dict
1631 where the keys are the filenames (or 1 for the manifest), and the
1630 where the keys are the filenames (or 1 for the manifest), and the
1632 values are lists of (node, linknode) tuples, where node is a wanted
1631 values are lists of (node, linknode) tuples, where node is a wanted
1633 node and linknode is the changelog node that should be transmitted as
1632 node and linknode is the changelog node that should be transmitted as
1634 the linkrev.
1633 the linkrev.
1635 """
1634 """
1636
1635
1637 if extranodes is None:
1636 if extranodes is None:
1638 # can we go through the fast path ?
1637 # can we go through the fast path ?
1639 heads.sort()
1638 heads.sort()
1640 allheads = self.heads()
1639 allheads = self.heads()
1641 allheads.sort()
1640 allheads.sort()
1642 if heads == allheads:
1641 if heads == allheads:
1643 common = []
1642 common = []
1644 # parents of bases are known from both sides
1643 # parents of bases are known from both sides
1645 for n in bases:
1644 for n in bases:
1646 for p in self.changelog.parents(n):
1645 for p in self.changelog.parents(n):
1647 if p != nullid:
1646 if p != nullid:
1648 common.append(p)
1647 common.append(p)
1649 return self._changegroup(common, source)
1648 return self._changegroup(common, source)
1650
1649
1651 self.hook('preoutgoing', throw=True, source=source)
1650 self.hook('preoutgoing', throw=True, source=source)
1652
1651
1653 # Set up some initial variables
1652 # Set up some initial variables
1654 # Make it easy to refer to self.changelog
1653 # Make it easy to refer to self.changelog
1655 cl = self.changelog
1654 cl = self.changelog
1656 # msng is short for missing - compute the list of changesets in this
1655 # msng is short for missing - compute the list of changesets in this
1657 # changegroup.
1656 # changegroup.
1658 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1657 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1659 self.changegroupinfo(msng_cl_lst, source)
1658 self.changegroupinfo(msng_cl_lst, source)
1660 # Some bases may turn out to be superfluous, and some heads may be
1659 # Some bases may turn out to be superfluous, and some heads may be
1661 # too. nodesbetween will return the minimal set of bases and heads
1660 # too. nodesbetween will return the minimal set of bases and heads
1662 # necessary to re-create the changegroup.
1661 # necessary to re-create the changegroup.
1663
1662
1664 # Known heads are the list of heads that it is assumed the recipient
1663 # Known heads are the list of heads that it is assumed the recipient
1665 # of this changegroup will know about.
1664 # of this changegroup will know about.
1666 knownheads = set()
1665 knownheads = set()
1667 # We assume that all parents of bases are known heads.
1666 # We assume that all parents of bases are known heads.
1668 for n in bases:
1667 for n in bases:
1669 knownheads.update(cl.parents(n))
1668 knownheads.update(cl.parents(n))
1670 knownheads.discard(nullid)
1669 knownheads.discard(nullid)
1671 knownheads = list(knownheads)
1670 knownheads = list(knownheads)
1672 if knownheads:
1671 if knownheads:
1673 # Now that we know what heads are known, we can compute which
1672 # Now that we know what heads are known, we can compute which
1674 # changesets are known. The recipient must know about all
1673 # changesets are known. The recipient must know about all
1675 # changesets required to reach the known heads from the null
1674 # changesets required to reach the known heads from the null
1676 # changeset.
1675 # changeset.
1677 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1676 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1678 junk = None
1677 junk = None
1679 # Transform the list into a set.
1678 # Transform the list into a set.
1680 has_cl_set = set(has_cl_set)
1679 has_cl_set = set(has_cl_set)
1681 else:
1680 else:
1682 # If there were no known heads, the recipient cannot be assumed to
1681 # If there were no known heads, the recipient cannot be assumed to
1683 # know about any changesets.
1682 # know about any changesets.
1684 has_cl_set = set()
1683 has_cl_set = set()
1685
1684
1686 # Make it easy to refer to self.manifest
1685 # Make it easy to refer to self.manifest
1687 mnfst = self.manifest
1686 mnfst = self.manifest
1688 # We don't know which manifests are missing yet
1687 # We don't know which manifests are missing yet
1689 msng_mnfst_set = {}
1688 msng_mnfst_set = {}
1690 # Nor do we know which filenodes are missing.
1689 # Nor do we know which filenodes are missing.
1691 msng_filenode_set = {}
1690 msng_filenode_set = {}
1692
1691
1693 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1692 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1694 junk = None
1693 junk = None
1695
1694
1696 # A changeset always belongs to itself, so the changenode lookup
1695 # A changeset always belongs to itself, so the changenode lookup
1697 # function for a changenode is identity.
1696 # function for a changenode is identity.
1698 def identity(x):
1697 def identity(x):
1699 return x
1698 return x
1700
1699
1701 # If we determine that a particular file or manifest node must be a
1700 # If we determine that a particular file or manifest node must be a
1702 # node that the recipient of the changegroup will already have, we can
1701 # node that the recipient of the changegroup will already have, we can
1703 # also assume the recipient will have all the parents. This function
1702 # also assume the recipient will have all the parents. This function
1704 # prunes them from the set of missing nodes.
1703 # prunes them from the set of missing nodes.
1705 def prune_parents(revlog, hasset, msngset):
1704 def prune_parents(revlog, hasset, msngset):
1706 haslst = list(hasset)
1705 haslst = list(hasset)
1707 haslst.sort(key=revlog.rev)
1706 haslst.sort(key=revlog.rev)
1708 for node in haslst:
1707 for node in haslst:
1709 parentlst = [p for p in revlog.parents(node) if p != nullid]
1708 parentlst = [p for p in revlog.parents(node) if p != nullid]
1710 while parentlst:
1709 while parentlst:
1711 n = parentlst.pop()
1710 n = parentlst.pop()
1712 if n not in hasset:
1711 if n not in hasset:
1713 hasset.add(n)
1712 hasset.add(n)
1714 p = [p for p in revlog.parents(n) if p != nullid]
1713 p = [p for p in revlog.parents(n) if p != nullid]
1715 parentlst.extend(p)
1714 parentlst.extend(p)
1716 for n in hasset:
1715 for n in hasset:
1717 msngset.pop(n, None)
1716 msngset.pop(n, None)
1718
1717
1719 # This is a function generating function used to set up an environment
1718 # This is a function generating function used to set up an environment
1720 # for the inner function to execute in.
1719 # for the inner function to execute in.
1721 def manifest_and_file_collector(changedfileset):
1720 def manifest_and_file_collector(changedfileset):
1722 # This is an information gathering function that gathers
1721 # This is an information gathering function that gathers
1723 # information from each changeset node that goes out as part of
1722 # information from each changeset node that goes out as part of
1724 # the changegroup. The information gathered is a list of which
1723 # the changegroup. The information gathered is a list of which
1725 # manifest nodes are potentially required (the recipient may
1724 # manifest nodes are potentially required (the recipient may
1726 # already have them) and total list of all files which were
1725 # already have them) and total list of all files which were
1727 # changed in any changeset in the changegroup.
1726 # changed in any changeset in the changegroup.
1728 #
1727 #
1729 # We also remember the first changenode we saw any manifest
1728 # We also remember the first changenode we saw any manifest
1730 # referenced by so we can later determine which changenode 'owns'
1729 # referenced by so we can later determine which changenode 'owns'
1731 # the manifest.
1730 # the manifest.
1732 def collect_manifests_and_files(clnode):
1731 def collect_manifests_and_files(clnode):
1733 c = cl.read(clnode)
1732 c = cl.read(clnode)
1734 for f in c[3]:
1733 for f in c[3]:
1735 # This is to make sure we only have one instance of each
1734 # This is to make sure we only have one instance of each
1736 # filename string for each filename.
1735 # filename string for each filename.
1737 changedfileset.setdefault(f, f)
1736 changedfileset.setdefault(f, f)
1738 msng_mnfst_set.setdefault(c[0], clnode)
1737 msng_mnfst_set.setdefault(c[0], clnode)
1739 return collect_manifests_and_files
1738 return collect_manifests_and_files
1740
1739
1741 # Figure out which manifest nodes (of the ones we think might be part
1740 # Figure out which manifest nodes (of the ones we think might be part
1742 # of the changegroup) the recipient must know about and remove them
1741 # of the changegroup) the recipient must know about and remove them
1743 # from the changegroup.
1742 # from the changegroup.
1744 def prune_manifests():
1743 def prune_manifests():
1745 has_mnfst_set = set()
1744 has_mnfst_set = set()
1746 for n in msng_mnfst_set:
1745 for n in msng_mnfst_set:
1747 # If a 'missing' manifest thinks it belongs to a changenode
1746 # If a 'missing' manifest thinks it belongs to a changenode
1748 # the recipient is assumed to have, obviously the recipient
1747 # the recipient is assumed to have, obviously the recipient
1749 # must have that manifest.
1748 # must have that manifest.
1750 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1749 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1751 if linknode in has_cl_set:
1750 if linknode in has_cl_set:
1752 has_mnfst_set.add(n)
1751 has_mnfst_set.add(n)
1753 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1752 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1754
1753
1755 # Use the information collected in collect_manifests_and_files to say
1754 # Use the information collected in collect_manifests_and_files to say
1756 # which changenode any manifestnode belongs to.
1755 # which changenode any manifestnode belongs to.
1757 def lookup_manifest_link(mnfstnode):
1756 def lookup_manifest_link(mnfstnode):
1758 return msng_mnfst_set[mnfstnode]
1757 return msng_mnfst_set[mnfstnode]
1759
1758
1760 # A function generating function that sets up the initial environment
1759 # A function generating function that sets up the initial environment
1761 # the inner function.
1760 # the inner function.
1762 def filenode_collector(changedfiles):
1761 def filenode_collector(changedfiles):
1763 next_rev = [0]
1762 next_rev = [0]
1764 # This gathers information from each manifestnode included in the
1763 # This gathers information from each manifestnode included in the
1765 # changegroup about which filenodes the manifest node references
1764 # changegroup about which filenodes the manifest node references
1766 # so we can include those in the changegroup too.
1765 # so we can include those in the changegroup too.
1767 #
1766 #
1768 # It also remembers which changenode each filenode belongs to. It
1767 # It also remembers which changenode each filenode belongs to. It
1769 # does this by assuming the a filenode belongs to the changenode
1768 # does this by assuming the a filenode belongs to the changenode
1770 # the first manifest that references it belongs to.
1769 # the first manifest that references it belongs to.
1771 def collect_msng_filenodes(mnfstnode):
1770 def collect_msng_filenodes(mnfstnode):
1772 r = mnfst.rev(mnfstnode)
1771 r = mnfst.rev(mnfstnode)
1773 if r == next_rev[0]:
1772 if r == next_rev[0]:
1774 # If the last rev we looked at was the one just previous,
1773 # If the last rev we looked at was the one just previous,
1775 # we only need to see a diff.
1774 # we only need to see a diff.
1776 deltamf = mnfst.readdelta(mnfstnode)
1775 deltamf = mnfst.readdelta(mnfstnode)
1777 # For each line in the delta
1776 # For each line in the delta
1778 for f, fnode in deltamf.iteritems():
1777 for f, fnode in deltamf.iteritems():
1779 f = changedfiles.get(f, None)
1778 f = changedfiles.get(f, None)
1780 # And if the file is in the list of files we care
1779 # And if the file is in the list of files we care
1781 # about.
1780 # about.
1782 if f is not None:
1781 if f is not None:
1783 # Get the changenode this manifest belongs to
1782 # Get the changenode this manifest belongs to
1784 clnode = msng_mnfst_set[mnfstnode]
1783 clnode = msng_mnfst_set[mnfstnode]
1785 # Create the set of filenodes for the file if
1784 # Create the set of filenodes for the file if
1786 # there isn't one already.
1785 # there isn't one already.
1787 ndset = msng_filenode_set.setdefault(f, {})
1786 ndset = msng_filenode_set.setdefault(f, {})
1788 # And set the filenode's changelog node to the
1787 # And set the filenode's changelog node to the
1789 # manifest's if it hasn't been set already.
1788 # manifest's if it hasn't been set already.
1790 ndset.setdefault(fnode, clnode)
1789 ndset.setdefault(fnode, clnode)
1791 else:
1790 else:
1792 # Otherwise we need a full manifest.
1791 # Otherwise we need a full manifest.
1793 m = mnfst.read(mnfstnode)
1792 m = mnfst.read(mnfstnode)
1794 # For every file in we care about.
1793 # For every file in we care about.
1795 for f in changedfiles:
1794 for f in changedfiles:
1796 fnode = m.get(f, None)
1795 fnode = m.get(f, None)
1797 # If it's in the manifest
1796 # If it's in the manifest
1798 if fnode is not None:
1797 if fnode is not None:
1799 # See comments above.
1798 # See comments above.
1800 clnode = msng_mnfst_set[mnfstnode]
1799 clnode = msng_mnfst_set[mnfstnode]
1801 ndset = msng_filenode_set.setdefault(f, {})
1800 ndset = msng_filenode_set.setdefault(f, {})
1802 ndset.setdefault(fnode, clnode)
1801 ndset.setdefault(fnode, clnode)
1803 # Remember the revision we hope to see next.
1802 # Remember the revision we hope to see next.
1804 next_rev[0] = r + 1
1803 next_rev[0] = r + 1
1805 return collect_msng_filenodes
1804 return collect_msng_filenodes
1806
1805
1807 # We have a list of filenodes we think we need for a file, lets remove
1806 # We have a list of filenodes we think we need for a file, lets remove
1808 # all those we know the recipient must have.
1807 # all those we know the recipient must have.
1809 def prune_filenodes(f, filerevlog):
1808 def prune_filenodes(f, filerevlog):
1810 msngset = msng_filenode_set[f]
1809 msngset = msng_filenode_set[f]
1811 hasset = set()
1810 hasset = set()
1812 # If a 'missing' filenode thinks it belongs to a changenode we
1811 # If a 'missing' filenode thinks it belongs to a changenode we
1813 # assume the recipient must have, then the recipient must have
1812 # assume the recipient must have, then the recipient must have
1814 # that filenode.
1813 # that filenode.
1815 for n in msngset:
1814 for n in msngset:
1816 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1815 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1817 if clnode in has_cl_set:
1816 if clnode in has_cl_set:
1818 hasset.add(n)
1817 hasset.add(n)
1819 prune_parents(filerevlog, hasset, msngset)
1818 prune_parents(filerevlog, hasset, msngset)
1820
1819
1821 # A function generator function that sets up the a context for the
1820 # A function generator function that sets up the a context for the
1822 # inner function.
1821 # inner function.
1823 def lookup_filenode_link_func(fname):
1822 def lookup_filenode_link_func(fname):
1824 msngset = msng_filenode_set[fname]
1823 msngset = msng_filenode_set[fname]
1825 # Lookup the changenode the filenode belongs to.
1824 # Lookup the changenode the filenode belongs to.
1826 def lookup_filenode_link(fnode):
1825 def lookup_filenode_link(fnode):
1827 return msngset[fnode]
1826 return msngset[fnode]
1828 return lookup_filenode_link
1827 return lookup_filenode_link
1829
1828
1830 # Add the nodes that were explicitly requested.
1829 # Add the nodes that were explicitly requested.
1831 def add_extra_nodes(name, nodes):
1830 def add_extra_nodes(name, nodes):
1832 if not extranodes or name not in extranodes:
1831 if not extranodes or name not in extranodes:
1833 return
1832 return
1834
1833
1835 for node, linknode in extranodes[name]:
1834 for node, linknode in extranodes[name]:
1836 if node not in nodes:
1835 if node not in nodes:
1837 nodes[node] = linknode
1836 nodes[node] = linknode
1838
1837
1839 # Now that we have all theses utility functions to help out and
1838 # Now that we have all theses utility functions to help out and
1840 # logically divide up the task, generate the group.
1839 # logically divide up the task, generate the group.
1841 def gengroup():
1840 def gengroup():
1842 # The set of changed files starts empty.
1841 # The set of changed files starts empty.
1843 changedfiles = {}
1842 changedfiles = {}
1844 # Create a changenode group generator that will call our functions
1843 # Create a changenode group generator that will call our functions
1845 # back to lookup the owning changenode and collect information.
1844 # back to lookup the owning changenode and collect information.
1846 group = cl.group(msng_cl_lst, identity,
1845 group = cl.group(msng_cl_lst, identity,
1847 manifest_and_file_collector(changedfiles))
1846 manifest_and_file_collector(changedfiles))
1848 for chnk in group:
1847 for chnk in group:
1849 yield chnk
1848 yield chnk
1850
1849
1851 # The list of manifests has been collected by the generator
1850 # The list of manifests has been collected by the generator
1852 # calling our functions back.
1851 # calling our functions back.
1853 prune_manifests()
1852 prune_manifests()
1854 add_extra_nodes(1, msng_mnfst_set)
1853 add_extra_nodes(1, msng_mnfst_set)
1855 msng_mnfst_lst = msng_mnfst_set.keys()
1854 msng_mnfst_lst = msng_mnfst_set.keys()
1856 # Sort the manifestnodes by revision number.
1855 # Sort the manifestnodes by revision number.
1857 msng_mnfst_lst.sort(key=mnfst.rev)
1856 msng_mnfst_lst.sort(key=mnfst.rev)
1858 # Create a generator for the manifestnodes that calls our lookup
1857 # Create a generator for the manifestnodes that calls our lookup
1859 # and data collection functions back.
1858 # and data collection functions back.
1860 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1859 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1861 filenode_collector(changedfiles))
1860 filenode_collector(changedfiles))
1862 for chnk in group:
1861 for chnk in group:
1863 yield chnk
1862 yield chnk
1864
1863
1865 # These are no longer needed, dereference and toss the memory for
1864 # These are no longer needed, dereference and toss the memory for
1866 # them.
1865 # them.
1867 msng_mnfst_lst = None
1866 msng_mnfst_lst = None
1868 msng_mnfst_set.clear()
1867 msng_mnfst_set.clear()
1869
1868
1870 if extranodes:
1869 if extranodes:
1871 for fname in extranodes:
1870 for fname in extranodes:
1872 if isinstance(fname, int):
1871 if isinstance(fname, int):
1873 continue
1872 continue
1874 msng_filenode_set.setdefault(fname, {})
1873 msng_filenode_set.setdefault(fname, {})
1875 changedfiles[fname] = 1
1874 changedfiles[fname] = 1
1876 # Go through all our files in order sorted by name.
1875 # Go through all our files in order sorted by name.
1877 for fname in sorted(changedfiles):
1876 for fname in sorted(changedfiles):
1878 filerevlog = self.file(fname)
1877 filerevlog = self.file(fname)
1879 if not len(filerevlog):
1878 if not len(filerevlog):
1880 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 raise util.Abort(_("empty or missing revlog for %s") % fname)
1881 # Toss out the filenodes that the recipient isn't really
1880 # Toss out the filenodes that the recipient isn't really
1882 # missing.
1881 # missing.
1883 if fname in msng_filenode_set:
1882 if fname in msng_filenode_set:
1884 prune_filenodes(fname, filerevlog)
1883 prune_filenodes(fname, filerevlog)
1885 add_extra_nodes(fname, msng_filenode_set[fname])
1884 add_extra_nodes(fname, msng_filenode_set[fname])
1886 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 msng_filenode_lst = msng_filenode_set[fname].keys()
1887 else:
1886 else:
1888 msng_filenode_lst = []
1887 msng_filenode_lst = []
1889 # If any filenodes are left, generate the group for them,
1888 # If any filenodes are left, generate the group for them,
1890 # otherwise don't bother.
1889 # otherwise don't bother.
1891 if len(msng_filenode_lst) > 0:
1890 if len(msng_filenode_lst) > 0:
1892 yield changegroup.chunkheader(len(fname))
1891 yield changegroup.chunkheader(len(fname))
1893 yield fname
1892 yield fname
1894 # Sort the filenodes by their revision #
1893 # Sort the filenodes by their revision #
1895 msng_filenode_lst.sort(key=filerevlog.rev)
1894 msng_filenode_lst.sort(key=filerevlog.rev)
1896 # Create a group generator and only pass in a changenode
1895 # Create a group generator and only pass in a changenode
1897 # lookup function as we need to collect no information
1896 # lookup function as we need to collect no information
1898 # from filenodes.
1897 # from filenodes.
1899 group = filerevlog.group(msng_filenode_lst,
1898 group = filerevlog.group(msng_filenode_lst,
1900 lookup_filenode_link_func(fname))
1899 lookup_filenode_link_func(fname))
1901 for chnk in group:
1900 for chnk in group:
1902 yield chnk
1901 yield chnk
1903 if fname in msng_filenode_set:
1902 if fname in msng_filenode_set:
1904 # Don't need this anymore, toss it to free memory.
1903 # Don't need this anymore, toss it to free memory.
1905 del msng_filenode_set[fname]
1904 del msng_filenode_set[fname]
1906 # Signal that no more groups are left.
1905 # Signal that no more groups are left.
1907 yield changegroup.closechunk()
1906 yield changegroup.closechunk()
1908
1907
1909 if msng_cl_lst:
1908 if msng_cl_lst:
1910 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1909 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1911
1910
1912 return util.chunkbuffer(gengroup())
1911 return util.chunkbuffer(gengroup())
1913
1912
1914 def changegroup(self, basenodes, source):
1913 def changegroup(self, basenodes, source):
1915 # to avoid a race we use changegroupsubset() (issue1320)
1914 # to avoid a race we use changegroupsubset() (issue1320)
1916 return self.changegroupsubset(basenodes, self.heads(), source)
1915 return self.changegroupsubset(basenodes, self.heads(), source)
1917
1916
1918 def _changegroup(self, common, source):
1917 def _changegroup(self, common, source):
1919 """Compute the changegroup of all nodes that we have that a recipient
1918 """Compute the changegroup of all nodes that we have that a recipient
1920 doesn't. Return a chunkbuffer object whose read() method will return
1919 doesn't. Return a chunkbuffer object whose read() method will return
1921 successive changegroup chunks.
1920 successive changegroup chunks.
1922
1921
1923 This is much easier than the previous function as we can assume that
1922 This is much easier than the previous function as we can assume that
1924 the recipient has any changenode we aren't sending them.
1923 the recipient has any changenode we aren't sending them.
1925
1924
1926 common is the set of common nodes between remote and self"""
1925 common is the set of common nodes between remote and self"""
1927
1926
1928 self.hook('preoutgoing', throw=True, source=source)
1927 self.hook('preoutgoing', throw=True, source=source)
1929
1928
1930 cl = self.changelog
1929 cl = self.changelog
1931 nodes = cl.findmissing(common)
1930 nodes = cl.findmissing(common)
1932 revset = set([cl.rev(n) for n in nodes])
1931 revset = set([cl.rev(n) for n in nodes])
1933 self.changegroupinfo(nodes, source)
1932 self.changegroupinfo(nodes, source)
1934
1933
1935 def identity(x):
1934 def identity(x):
1936 return x
1935 return x
1937
1936
1938 def gennodelst(log):
1937 def gennodelst(log):
1939 for r in log:
1938 for r in log:
1940 if log.linkrev(r) in revset:
1939 if log.linkrev(r) in revset:
1941 yield log.node(r)
1940 yield log.node(r)
1942
1941
1943 def changed_file_collector(changedfileset):
1942 def changed_file_collector(changedfileset):
1944 def collect_changed_files(clnode):
1943 def collect_changed_files(clnode):
1945 c = cl.read(clnode)
1944 c = cl.read(clnode)
1946 changedfileset.update(c[3])
1945 changedfileset.update(c[3])
1947 return collect_changed_files
1946 return collect_changed_files
1948
1947
1949 def lookuprevlink_func(revlog):
1948 def lookuprevlink_func(revlog):
1950 def lookuprevlink(n):
1949 def lookuprevlink(n):
1951 return cl.node(revlog.linkrev(revlog.rev(n)))
1950 return cl.node(revlog.linkrev(revlog.rev(n)))
1952 return lookuprevlink
1951 return lookuprevlink
1953
1952
1954 def gengroup():
1953 def gengroup():
1955 '''yield a sequence of changegroup chunks (strings)'''
1954 '''yield a sequence of changegroup chunks (strings)'''
1956 # construct a list of all changed files
1955 # construct a list of all changed files
1957 changedfiles = set()
1956 changedfiles = set()
1958
1957
1959 for chnk in cl.group(nodes, identity,
1958 for chnk in cl.group(nodes, identity,
1960 changed_file_collector(changedfiles)):
1959 changed_file_collector(changedfiles)):
1961 yield chnk
1960 yield chnk
1962
1961
1963 mnfst = self.manifest
1962 mnfst = self.manifest
1964 nodeiter = gennodelst(mnfst)
1963 nodeiter = gennodelst(mnfst)
1965 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1964 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1966 yield chnk
1965 yield chnk
1967
1966
1968 for fname in sorted(changedfiles):
1967 for fname in sorted(changedfiles):
1969 filerevlog = self.file(fname)
1968 filerevlog = self.file(fname)
1970 if not len(filerevlog):
1969 if not len(filerevlog):
1971 raise util.Abort(_("empty or missing revlog for %s") % fname)
1970 raise util.Abort(_("empty or missing revlog for %s") % fname)
1972 nodeiter = gennodelst(filerevlog)
1971 nodeiter = gennodelst(filerevlog)
1973 nodeiter = list(nodeiter)
1972 nodeiter = list(nodeiter)
1974 if nodeiter:
1973 if nodeiter:
1975 yield changegroup.chunkheader(len(fname))
1974 yield changegroup.chunkheader(len(fname))
1976 yield fname
1975 yield fname
1977 lookup = lookuprevlink_func(filerevlog)
1976 lookup = lookuprevlink_func(filerevlog)
1978 for chnk in filerevlog.group(nodeiter, lookup):
1977 for chnk in filerevlog.group(nodeiter, lookup):
1979 yield chnk
1978 yield chnk
1980
1979
1981 yield changegroup.closechunk()
1980 yield changegroup.closechunk()
1982
1981
1983 if nodes:
1982 if nodes:
1984 self.hook('outgoing', node=hex(nodes[0]), source=source)
1983 self.hook('outgoing', node=hex(nodes[0]), source=source)
1985
1984
1986 return util.chunkbuffer(gengroup())
1985 return util.chunkbuffer(gengroup())
1987
1986
1988 def addchangegroup(self, source, srctype, url, emptyok=False):
1987 def addchangegroup(self, source, srctype, url, emptyok=False):
1989 """add changegroup to repo.
1988 """add changegroup to repo.
1990
1989
1991 return values:
1990 return values:
1992 - nothing changed or no source: 0
1991 - nothing changed or no source: 0
1993 - more heads than before: 1+added heads (2..n)
1992 - more heads than before: 1+added heads (2..n)
1994 - less heads than before: -1-removed heads (-2..-n)
1993 - less heads than before: -1-removed heads (-2..-n)
1995 - number of heads stays the same: 1
1994 - number of heads stays the same: 1
1996 """
1995 """
1997 def csmap(x):
1996 def csmap(x):
1998 self.ui.debug("add changeset %s\n" % short(x))
1997 self.ui.debug("add changeset %s\n" % short(x))
1999 return len(cl)
1998 return len(cl)
2000
1999
2001 def revmap(x):
2000 def revmap(x):
2002 return cl.rev(x)
2001 return cl.rev(x)
2003
2002
2004 if not source:
2003 if not source:
2005 return 0
2004 return 0
2006
2005
2007 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2006 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2008
2007
2009 changesets = files = revisions = 0
2008 changesets = files = revisions = 0
2010
2009
2011 # write changelog data to temp files so concurrent readers will not see
2010 # write changelog data to temp files so concurrent readers will not see
2012 # inconsistent view
2011 # inconsistent view
2013 cl = self.changelog
2012 cl = self.changelog
2014 cl.delayupdate()
2013 cl.delayupdate()
2015 oldheads = len(cl.heads())
2014 oldheads = len(cl.heads())
2016
2015
2017 tr = self.transaction()
2016 tr = self.transaction()
2018 try:
2017 try:
2019 trp = weakref.proxy(tr)
2018 trp = weakref.proxy(tr)
2020 # pull off the changeset group
2019 # pull off the changeset group
2021 self.ui.status(_("adding changesets\n"))
2020 self.ui.status(_("adding changesets\n"))
2022 clstart = len(cl)
2021 clstart = len(cl)
2023 chunkiter = changegroup.chunkiter(source)
2022 chunkiter = changegroup.chunkiter(source)
2024 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2023 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2025 raise util.Abort(_("received changelog group is empty"))
2024 raise util.Abort(_("received changelog group is empty"))
2026 clend = len(cl)
2025 clend = len(cl)
2027 changesets = clend - clstart
2026 changesets = clend - clstart
2028
2027
2029 # pull off the manifest group
2028 # pull off the manifest group
2030 self.ui.status(_("adding manifests\n"))
2029 self.ui.status(_("adding manifests\n"))
2031 chunkiter = changegroup.chunkiter(source)
2030 chunkiter = changegroup.chunkiter(source)
2032 # no need to check for empty manifest group here:
2031 # no need to check for empty manifest group here:
2033 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2032 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 # no new manifest will be created and the manifest group will
2033 # no new manifest will be created and the manifest group will
2035 # be empty during the pull
2034 # be empty during the pull
2036 self.manifest.addgroup(chunkiter, revmap, trp)
2035 self.manifest.addgroup(chunkiter, revmap, trp)
2037
2036
2038 # process the files
2037 # process the files
2039 self.ui.status(_("adding file changes\n"))
2038 self.ui.status(_("adding file changes\n"))
2040 while 1:
2039 while 1:
2041 f = changegroup.getchunk(source)
2040 f = changegroup.getchunk(source)
2042 if not f:
2041 if not f:
2043 break
2042 break
2044 self.ui.debug("adding %s revisions\n" % f)
2043 self.ui.debug("adding %s revisions\n" % f)
2045 fl = self.file(f)
2044 fl = self.file(f)
2046 o = len(fl)
2045 o = len(fl)
2047 chunkiter = changegroup.chunkiter(source)
2046 chunkiter = changegroup.chunkiter(source)
2048 if fl.addgroup(chunkiter, revmap, trp) is None:
2047 if fl.addgroup(chunkiter, revmap, trp) is None:
2049 raise util.Abort(_("received file revlog group is empty"))
2048 raise util.Abort(_("received file revlog group is empty"))
2050 revisions += len(fl) - o
2049 revisions += len(fl) - o
2051 files += 1
2050 files += 1
2052
2051
2053 newheads = len(cl.heads())
2052 newheads = len(cl.heads())
2054 heads = ""
2053 heads = ""
2055 if oldheads and newheads != oldheads:
2054 if oldheads and newheads != oldheads:
2056 heads = _(" (%+d heads)") % (newheads - oldheads)
2055 heads = _(" (%+d heads)") % (newheads - oldheads)
2057
2056
2058 self.ui.status(_("added %d changesets"
2057 self.ui.status(_("added %d changesets"
2059 " with %d changes to %d files%s\n")
2058 " with %d changes to %d files%s\n")
2060 % (changesets, revisions, files, heads))
2059 % (changesets, revisions, files, heads))
2061
2060
2062 if changesets > 0:
2061 if changesets > 0:
2063 p = lambda: cl.writepending() and self.root or ""
2062 p = lambda: cl.writepending() and self.root or ""
2064 self.hook('pretxnchangegroup', throw=True,
2063 self.hook('pretxnchangegroup', throw=True,
2065 node=hex(cl.node(clstart)), source=srctype,
2064 node=hex(cl.node(clstart)), source=srctype,
2066 url=url, pending=p)
2065 url=url, pending=p)
2067
2066
2068 # make changelog see real files again
2067 # make changelog see real files again
2069 cl.finalize(trp)
2068 cl.finalize(trp)
2070
2069
2071 tr.close()
2070 tr.close()
2072 finally:
2071 finally:
2073 del tr
2072 del tr
2074
2073
2075 if changesets > 0:
2074 if changesets > 0:
2076 # forcefully update the on-disk branch cache
2075 # forcefully update the on-disk branch cache
2077 self.ui.debug("updating the branch cache\n")
2076 self.ui.debug("updating the branch cache\n")
2078 self.branchtags()
2077 self.branchtags()
2079 self.hook("changegroup", node=hex(cl.node(clstart)),
2078 self.hook("changegroup", node=hex(cl.node(clstart)),
2080 source=srctype, url=url)
2079 source=srctype, url=url)
2081
2080
2082 for i in xrange(clstart, clend):
2081 for i in xrange(clstart, clend):
2083 self.hook("incoming", node=hex(cl.node(i)),
2082 self.hook("incoming", node=hex(cl.node(i)),
2084 source=srctype, url=url)
2083 source=srctype, url=url)
2085
2084
2086 # never return 0 here:
2085 # never return 0 here:
2087 if newheads < oldheads:
2086 if newheads < oldheads:
2088 return newheads - oldheads - 1
2087 return newheads - oldheads - 1
2089 else:
2088 else:
2090 return newheads - oldheads + 1
2089 return newheads - oldheads + 1
2091
2090
2092
2091
2093 def stream_in(self, remote):
2092 def stream_in(self, remote):
2094 fp = remote.stream_out()
2093 fp = remote.stream_out()
2095 l = fp.readline()
2094 l = fp.readline()
2096 try:
2095 try:
2097 resp = int(l)
2096 resp = int(l)
2098 except ValueError:
2097 except ValueError:
2099 raise error.ResponseError(
2098 raise error.ResponseError(
2100 _('Unexpected response from remote server:'), l)
2099 _('Unexpected response from remote server:'), l)
2101 if resp == 1:
2100 if resp == 1:
2102 raise util.Abort(_('operation forbidden by server'))
2101 raise util.Abort(_('operation forbidden by server'))
2103 elif resp == 2:
2102 elif resp == 2:
2104 raise util.Abort(_('locking the remote repository failed'))
2103 raise util.Abort(_('locking the remote repository failed'))
2105 elif resp != 0:
2104 elif resp != 0:
2106 raise util.Abort(_('the server sent an unknown error code'))
2105 raise util.Abort(_('the server sent an unknown error code'))
2107 self.ui.status(_('streaming all changes\n'))
2106 self.ui.status(_('streaming all changes\n'))
2108 l = fp.readline()
2107 l = fp.readline()
2109 try:
2108 try:
2110 total_files, total_bytes = map(int, l.split(' ', 1))
2109 total_files, total_bytes = map(int, l.split(' ', 1))
2111 except (ValueError, TypeError):
2110 except (ValueError, TypeError):
2112 raise error.ResponseError(
2111 raise error.ResponseError(
2113 _('Unexpected response from remote server:'), l)
2112 _('Unexpected response from remote server:'), l)
2114 self.ui.status(_('%d files to transfer, %s of data\n') %
2113 self.ui.status(_('%d files to transfer, %s of data\n') %
2115 (total_files, util.bytecount(total_bytes)))
2114 (total_files, util.bytecount(total_bytes)))
2116 start = time.time()
2115 start = time.time()
2117 for i in xrange(total_files):
2116 for i in xrange(total_files):
2118 # XXX doesn't support '\n' or '\r' in filenames
2117 # XXX doesn't support '\n' or '\r' in filenames
2119 l = fp.readline()
2118 l = fp.readline()
2120 try:
2119 try:
2121 name, size = l.split('\0', 1)
2120 name, size = l.split('\0', 1)
2122 size = int(size)
2121 size = int(size)
2123 except (ValueError, TypeError):
2122 except (ValueError, TypeError):
2124 raise error.ResponseError(
2123 raise error.ResponseError(
2125 _('Unexpected response from remote server:'), l)
2124 _('Unexpected response from remote server:'), l)
2126 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2125 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2127 # for backwards compat, name was partially encoded
2126 # for backwards compat, name was partially encoded
2128 ofp = self.sopener(store.decodedir(name), 'w')
2127 ofp = self.sopener(store.decodedir(name), 'w')
2129 for chunk in util.filechunkiter(fp, limit=size):
2128 for chunk in util.filechunkiter(fp, limit=size):
2130 ofp.write(chunk)
2129 ofp.write(chunk)
2131 ofp.close()
2130 ofp.close()
2132 elapsed = time.time() - start
2131 elapsed = time.time() - start
2133 if elapsed <= 0:
2132 if elapsed <= 0:
2134 elapsed = 0.001
2133 elapsed = 0.001
2135 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2134 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2136 (util.bytecount(total_bytes), elapsed,
2135 (util.bytecount(total_bytes), elapsed,
2137 util.bytecount(total_bytes / elapsed)))
2136 util.bytecount(total_bytes / elapsed)))
2138 self.invalidate()
2137 self.invalidate()
2139 return len(self.heads()) + 1
2138 return len(self.heads()) + 1
2140
2139
2141 def clone(self, remote, heads=[], stream=False):
2140 def clone(self, remote, heads=[], stream=False):
2142 '''clone remote repository.
2141 '''clone remote repository.
2143
2142
2144 keyword arguments:
2143 keyword arguments:
2145 heads: list of revs to clone (forces use of pull)
2144 heads: list of revs to clone (forces use of pull)
2146 stream: use streaming clone if possible'''
2145 stream: use streaming clone if possible'''
2147
2146
2148 # now, all clients that can request uncompressed clones can
2147 # now, all clients that can request uncompressed clones can
2149 # read repo formats supported by all servers that can serve
2148 # read repo formats supported by all servers that can serve
2150 # them.
2149 # them.
2151
2150
2152 # if revlog format changes, client will have to check version
2151 # if revlog format changes, client will have to check version
2153 # and format flags on "stream" capability, and use
2152 # and format flags on "stream" capability, and use
2154 # uncompressed only if compatible.
2153 # uncompressed only if compatible.
2155
2154
2156 if stream and not heads and remote.capable('stream'):
2155 if stream and not heads and remote.capable('stream'):
2157 return self.stream_in(remote)
2156 return self.stream_in(remote)
2158 return self.pull(remote, heads)
2157 return self.pull(remote, heads)
2159
2158
2160 # used to avoid circular references so destructors work
2159 # used to avoid circular references so destructors work
2161 def aftertrans(files):
2160 def aftertrans(files):
2162 renamefiles = [tuple(t) for t in files]
2161 renamefiles = [tuple(t) for t in files]
2163 def a():
2162 def a():
2164 for src, dest in renamefiles:
2163 for src, dest in renamefiles:
2165 util.rename(src, dest)
2164 util.rename(src, dest)
2166 return a
2165 return a
2167
2166
2168 def instance(ui, path, create):
2167 def instance(ui, path, create):
2169 return localrepository(ui, util.drop_scheme('file', path), create)
2168 return localrepository(ui, util.drop_scheme('file', path), create)
2170
2169
2171 def islocal(path):
2170 def islocal(path):
2172 return True
2171 return True
@@ -1,1286 +1,1290 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
8 # GNU General Public License version 2, incorporated herein by reference.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil
17 import error, osutil, encoding
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, random, textwrap
19 import os, stat, time, calendar, random, textwrap
20 import imp
20 import imp
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 try:
31 try:
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 except ImportError:
33 except ImportError:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import subprocess
39 import subprocess
40 closefds = os.name == 'posix'
40 closefds = os.name == 'posix'
41 def popen2(cmd):
41 def popen2(cmd):
42 # Setting bufsize to -1 lets the system decide the buffer size.
42 # Setting bufsize to -1 lets the system decide the buffer size.
43 # The default for bufsize is 0, meaning unbuffered. This leads to
43 # The default for bufsize is 0, meaning unbuffered. This leads to
44 # poor performance on Mac OS X: http://bugs.python.org/issue4194
44 # poor performance on Mac OS X: http://bugs.python.org/issue4194
45 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
45 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
46 close_fds=closefds,
46 close_fds=closefds,
47 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
47 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
48 return p.stdin, p.stdout
48 return p.stdin, p.stdout
49 def popen3(cmd):
49 def popen3(cmd):
50 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
50 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
51 close_fds=closefds,
51 close_fds=closefds,
52 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
52 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
53 stderr=subprocess.PIPE)
53 stderr=subprocess.PIPE)
54 return p.stdin, p.stdout, p.stderr
54 return p.stdin, p.stdout, p.stderr
55
55
56 def version():
56 def version():
57 """Return version information if available."""
57 """Return version information if available."""
58 try:
58 try:
59 import __version__
59 import __version__
60 return __version__.version
60 return __version__.version
61 except ImportError:
61 except ImportError:
62 return 'unknown'
62 return 'unknown'
63
63
64 # used by parsedate
64 # used by parsedate
65 defaultdateformats = (
65 defaultdateformats = (
66 '%Y-%m-%d %H:%M:%S',
66 '%Y-%m-%d %H:%M:%S',
67 '%Y-%m-%d %I:%M:%S%p',
67 '%Y-%m-%d %I:%M:%S%p',
68 '%Y-%m-%d %H:%M',
68 '%Y-%m-%d %H:%M',
69 '%Y-%m-%d %I:%M%p',
69 '%Y-%m-%d %I:%M%p',
70 '%Y-%m-%d',
70 '%Y-%m-%d',
71 '%m-%d',
71 '%m-%d',
72 '%m/%d',
72 '%m/%d',
73 '%m/%d/%y',
73 '%m/%d/%y',
74 '%m/%d/%Y',
74 '%m/%d/%Y',
75 '%a %b %d %H:%M:%S %Y',
75 '%a %b %d %H:%M:%S %Y',
76 '%a %b %d %I:%M:%S%p %Y',
76 '%a %b %d %I:%M:%S%p %Y',
77 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
77 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
78 '%b %d %H:%M:%S %Y',
78 '%b %d %H:%M:%S %Y',
79 '%b %d %I:%M:%S%p %Y',
79 '%b %d %I:%M:%S%p %Y',
80 '%b %d %H:%M:%S',
80 '%b %d %H:%M:%S',
81 '%b %d %I:%M:%S%p',
81 '%b %d %I:%M:%S%p',
82 '%b %d %H:%M',
82 '%b %d %H:%M',
83 '%b %d %I:%M%p',
83 '%b %d %I:%M%p',
84 '%b %d %Y',
84 '%b %d %Y',
85 '%b %d',
85 '%b %d',
86 '%H:%M:%S',
86 '%H:%M:%S',
87 '%I:%M:%S%p',
87 '%I:%M:%S%p',
88 '%H:%M',
88 '%H:%M',
89 '%I:%M%p',
89 '%I:%M%p',
90 )
90 )
91
91
92 extendeddateformats = defaultdateformats + (
92 extendeddateformats = defaultdateformats + (
93 "%Y",
93 "%Y",
94 "%Y-%m",
94 "%Y-%m",
95 "%b",
95 "%b",
96 "%b %Y",
96 "%b %Y",
97 )
97 )
98
98
99 def cachefunc(func):
99 def cachefunc(func):
100 '''cache the result of function calls'''
100 '''cache the result of function calls'''
101 # XXX doesn't handle keywords args
101 # XXX doesn't handle keywords args
102 cache = {}
102 cache = {}
103 if func.func_code.co_argcount == 1:
103 if func.func_code.co_argcount == 1:
104 # we gain a small amount of time because
104 # we gain a small amount of time because
105 # we don't need to pack/unpack the list
105 # we don't need to pack/unpack the list
106 def f(arg):
106 def f(arg):
107 if arg not in cache:
107 if arg not in cache:
108 cache[arg] = func(arg)
108 cache[arg] = func(arg)
109 return cache[arg]
109 return cache[arg]
110 else:
110 else:
111 def f(*args):
111 def f(*args):
112 if args not in cache:
112 if args not in cache:
113 cache[args] = func(*args)
113 cache[args] = func(*args)
114 return cache[args]
114 return cache[args]
115
115
116 return f
116 return f
117
117
118 def lrucachefunc(func):
118 def lrucachefunc(func):
119 '''cache most recent results of function calls'''
119 '''cache most recent results of function calls'''
120 cache = {}
120 cache = {}
121 order = []
121 order = []
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 def f(arg):
123 def f(arg):
124 if arg not in cache:
124 if arg not in cache:
125 if len(cache) > 20:
125 if len(cache) > 20:
126 del cache[order.pop(0)]
126 del cache[order.pop(0)]
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 else:
128 else:
129 order.remove(arg)
129 order.remove(arg)
130 order.append(arg)
130 order.append(arg)
131 return cache[arg]
131 return cache[arg]
132 else:
132 else:
133 def f(*args):
133 def f(*args):
134 if args not in cache:
134 if args not in cache:
135 if len(cache) > 20:
135 if len(cache) > 20:
136 del cache[order.pop(0)]
136 del cache[order.pop(0)]
137 cache[args] = func(*args)
137 cache[args] = func(*args)
138 else:
138 else:
139 order.remove(args)
139 order.remove(args)
140 order.append(args)
140 order.append(args)
141 return cache[args]
141 return cache[args]
142
142
143 return f
143 return f
144
144
145 class propertycache(object):
145 class propertycache(object):
146 def __init__(self, func):
146 def __init__(self, func):
147 self.func = func
147 self.func = func
148 self.name = func.__name__
148 self.name = func.__name__
149 def __get__(self, obj, type=None):
149 def __get__(self, obj, type=None):
150 result = self.func(obj)
150 result = self.func(obj)
151 setattr(obj, self.name, result)
151 setattr(obj, self.name, result)
152 return result
152 return result
153
153
154 def pipefilter(s, cmd):
154 def pipefilter(s, cmd):
155 '''filter string S through command CMD, returning its output'''
155 '''filter string S through command CMD, returning its output'''
156 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
156 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
157 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
157 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
158 pout, perr = p.communicate(s)
158 pout, perr = p.communicate(s)
159 return pout
159 return pout
160
160
161 def tempfilter(s, cmd):
161 def tempfilter(s, cmd):
162 '''filter string S through a pair of temporary files with CMD.
162 '''filter string S through a pair of temporary files with CMD.
163 CMD is used as a template to create the real command to be run,
163 CMD is used as a template to create the real command to be run,
164 with the strings INFILE and OUTFILE replaced by the real names of
164 with the strings INFILE and OUTFILE replaced by the real names of
165 the temporary files generated.'''
165 the temporary files generated.'''
166 inname, outname = None, None
166 inname, outname = None, None
167 try:
167 try:
168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
169 fp = os.fdopen(infd, 'wb')
169 fp = os.fdopen(infd, 'wb')
170 fp.write(s)
170 fp.write(s)
171 fp.close()
171 fp.close()
172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
173 os.close(outfd)
173 os.close(outfd)
174 cmd = cmd.replace('INFILE', inname)
174 cmd = cmd.replace('INFILE', inname)
175 cmd = cmd.replace('OUTFILE', outname)
175 cmd = cmd.replace('OUTFILE', outname)
176 code = os.system(cmd)
176 code = os.system(cmd)
177 if sys.platform == 'OpenVMS' and code & 1:
177 if sys.platform == 'OpenVMS' and code & 1:
178 code = 0
178 code = 0
179 if code: raise Abort(_("command '%s' failed: %s") %
179 if code: raise Abort(_("command '%s' failed: %s") %
180 (cmd, explain_exit(code)))
180 (cmd, explain_exit(code)))
181 return open(outname, 'rb').read()
181 return open(outname, 'rb').read()
182 finally:
182 finally:
183 try:
183 try:
184 if inname: os.unlink(inname)
184 if inname: os.unlink(inname)
185 except: pass
185 except: pass
186 try:
186 try:
187 if outname: os.unlink(outname)
187 if outname: os.unlink(outname)
188 except: pass
188 except: pass
189
189
190 filtertable = {
190 filtertable = {
191 'tempfile:': tempfilter,
191 'tempfile:': tempfilter,
192 'pipe:': pipefilter,
192 'pipe:': pipefilter,
193 }
193 }
194
194
195 def filter(s, cmd):
195 def filter(s, cmd):
196 "filter a string through a command that transforms its input to its output"
196 "filter a string through a command that transforms its input to its output"
197 for name, fn in filtertable.iteritems():
197 for name, fn in filtertable.iteritems():
198 if cmd.startswith(name):
198 if cmd.startswith(name):
199 return fn(s, cmd[len(name):].lstrip())
199 return fn(s, cmd[len(name):].lstrip())
200 return pipefilter(s, cmd)
200 return pipefilter(s, cmd)
201
201
202 def binary(s):
202 def binary(s):
203 """return true if a string is binary data"""
203 """return true if a string is binary data"""
204 return bool(s and '\0' in s)
204 return bool(s and '\0' in s)
205
205
206 def increasingchunks(source, min=1024, max=65536):
206 def increasingchunks(source, min=1024, max=65536):
207 '''return no less than min bytes per chunk while data remains,
207 '''return no less than min bytes per chunk while data remains,
208 doubling min after each chunk until it reaches max'''
208 doubling min after each chunk until it reaches max'''
209 def log2(x):
209 def log2(x):
210 if not x:
210 if not x:
211 return 0
211 return 0
212 i = 0
212 i = 0
213 while x:
213 while x:
214 x >>= 1
214 x >>= 1
215 i += 1
215 i += 1
216 return i - 1
216 return i - 1
217
217
218 buf = []
218 buf = []
219 blen = 0
219 blen = 0
220 for chunk in source:
220 for chunk in source:
221 buf.append(chunk)
221 buf.append(chunk)
222 blen += len(chunk)
222 blen += len(chunk)
223 if blen >= min:
223 if blen >= min:
224 if min < max:
224 if min < max:
225 min = min << 1
225 min = min << 1
226 nmin = 1 << log2(blen)
226 nmin = 1 << log2(blen)
227 if nmin > min:
227 if nmin > min:
228 min = nmin
228 min = nmin
229 if min > max:
229 if min > max:
230 min = max
230 min = max
231 yield ''.join(buf)
231 yield ''.join(buf)
232 blen = 0
232 blen = 0
233 buf = []
233 buf = []
234 if buf:
234 if buf:
235 yield ''.join(buf)
235 yield ''.join(buf)
236
236
237 Abort = error.Abort
237 Abort = error.Abort
238
238
239 def always(fn): return True
239 def always(fn): return True
240 def never(fn): return False
240 def never(fn): return False
241
241
242 def pathto(root, n1, n2):
242 def pathto(root, n1, n2):
243 '''return the relative path from one place to another.
243 '''return the relative path from one place to another.
244 root should use os.sep to separate directories
244 root should use os.sep to separate directories
245 n1 should use os.sep to separate directories
245 n1 should use os.sep to separate directories
246 n2 should use "/" to separate directories
246 n2 should use "/" to separate directories
247 returns an os.sep-separated path.
247 returns an os.sep-separated path.
248
248
249 If n1 is a relative path, it's assumed it's
249 If n1 is a relative path, it's assumed it's
250 relative to root.
250 relative to root.
251 n2 should always be relative to root.
251 n2 should always be relative to root.
252 '''
252 '''
253 if not n1: return localpath(n2)
253 if not n1: return localpath(n2)
254 if os.path.isabs(n1):
254 if os.path.isabs(n1):
255 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
255 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
256 return os.path.join(root, localpath(n2))
256 return os.path.join(root, localpath(n2))
257 n2 = '/'.join((pconvert(root), n2))
257 n2 = '/'.join((pconvert(root), n2))
258 a, b = splitpath(n1), n2.split('/')
258 a, b = splitpath(n1), n2.split('/')
259 a.reverse()
259 a.reverse()
260 b.reverse()
260 b.reverse()
261 while a and b and a[-1] == b[-1]:
261 while a and b and a[-1] == b[-1]:
262 a.pop()
262 a.pop()
263 b.pop()
263 b.pop()
264 b.reverse()
264 b.reverse()
265 return os.sep.join((['..'] * len(a)) + b) or '.'
265 return os.sep.join((['..'] * len(a)) + b) or '.'
266
266
267 def canonpath(root, cwd, myname):
267 def canonpath(root, cwd, myname):
268 """return the canonical path of myname, given cwd and root"""
268 """return the canonical path of myname, given cwd and root"""
269 if endswithsep(root):
269 if endswithsep(root):
270 rootsep = root
270 rootsep = root
271 else:
271 else:
272 rootsep = root + os.sep
272 rootsep = root + os.sep
273 name = myname
273 name = myname
274 if not os.path.isabs(name):
274 if not os.path.isabs(name):
275 name = os.path.join(root, cwd, name)
275 name = os.path.join(root, cwd, name)
276 name = os.path.normpath(name)
276 name = os.path.normpath(name)
277 audit_path = path_auditor(root)
277 audit_path = path_auditor(root)
278 if name != rootsep and name.startswith(rootsep):
278 if name != rootsep and name.startswith(rootsep):
279 name = name[len(rootsep):]
279 name = name[len(rootsep):]
280 audit_path(name)
280 audit_path(name)
281 return pconvert(name)
281 return pconvert(name)
282 elif name == root:
282 elif name == root:
283 return ''
283 return ''
284 else:
284 else:
285 # Determine whether `name' is in the hierarchy at or beneath `root',
285 # Determine whether `name' is in the hierarchy at or beneath `root',
286 # by iterating name=dirname(name) until that causes no change (can't
286 # by iterating name=dirname(name) until that causes no change (can't
287 # check name == '/', because that doesn't work on windows). For each
287 # check name == '/', because that doesn't work on windows). For each
288 # `name', compare dev/inode numbers. If they match, the list `rel'
288 # `name', compare dev/inode numbers. If they match, the list `rel'
289 # holds the reversed list of components making up the relative file
289 # holds the reversed list of components making up the relative file
290 # name we want.
290 # name we want.
291 root_st = os.stat(root)
291 root_st = os.stat(root)
292 rel = []
292 rel = []
293 while True:
293 while True:
294 try:
294 try:
295 name_st = os.stat(name)
295 name_st = os.stat(name)
296 except OSError:
296 except OSError:
297 break
297 break
298 if samestat(name_st, root_st):
298 if samestat(name_st, root_st):
299 if not rel:
299 if not rel:
300 # name was actually the same as root (maybe a symlink)
300 # name was actually the same as root (maybe a symlink)
301 return ''
301 return ''
302 rel.reverse()
302 rel.reverse()
303 name = os.path.join(*rel)
303 name = os.path.join(*rel)
304 audit_path(name)
304 audit_path(name)
305 return pconvert(name)
305 return pconvert(name)
306 dirname, basename = os.path.split(name)
306 dirname, basename = os.path.split(name)
307 rel.append(basename)
307 rel.append(basename)
308 if dirname == name:
308 if dirname == name:
309 break
309 break
310 name = dirname
310 name = dirname
311
311
312 raise Abort('%s not under root' % myname)
312 raise Abort('%s not under root' % myname)
313
313
314 _hgexecutable = None
314 _hgexecutable = None
315
315
316 def main_is_frozen():
316 def main_is_frozen():
317 """return True if we are a frozen executable.
317 """return True if we are a frozen executable.
318
318
319 The code supports py2exe (most common, Windows only) and tools/freeze
319 The code supports py2exe (most common, Windows only) and tools/freeze
320 (portable, not much used).
320 (portable, not much used).
321 """
321 """
322 return (hasattr(sys, "frozen") or # new py2exe
322 return (hasattr(sys, "frozen") or # new py2exe
323 hasattr(sys, "importers") or # old py2exe
323 hasattr(sys, "importers") or # old py2exe
324 imp.is_frozen("__main__")) # tools/freeze
324 imp.is_frozen("__main__")) # tools/freeze
325
325
326 def hgexecutable():
326 def hgexecutable():
327 """return location of the 'hg' executable.
327 """return location of the 'hg' executable.
328
328
329 Defaults to $HG or 'hg' in the search path.
329 Defaults to $HG or 'hg' in the search path.
330 """
330 """
331 if _hgexecutable is None:
331 if _hgexecutable is None:
332 hg = os.environ.get('HG')
332 hg = os.environ.get('HG')
333 if hg:
333 if hg:
334 set_hgexecutable(hg)
334 set_hgexecutable(hg)
335 elif main_is_frozen():
335 elif main_is_frozen():
336 set_hgexecutable(sys.executable)
336 set_hgexecutable(sys.executable)
337 else:
337 else:
338 set_hgexecutable(find_exe('hg') or 'hg')
338 set_hgexecutable(find_exe('hg') or 'hg')
339 return _hgexecutable
339 return _hgexecutable
340
340
341 def set_hgexecutable(path):
341 def set_hgexecutable(path):
342 """set location of the 'hg' executable"""
342 """set location of the 'hg' executable"""
343 global _hgexecutable
343 global _hgexecutable
344 _hgexecutable = path
344 _hgexecutable = path
345
345
346 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
346 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
347 '''enhanced shell command execution.
347 '''enhanced shell command execution.
348 run with environment maybe modified, maybe in different dir.
348 run with environment maybe modified, maybe in different dir.
349
349
350 if command fails and onerr is None, return status. if ui object,
350 if command fails and onerr is None, return status. if ui object,
351 print error message and return status, else raise onerr object as
351 print error message and return status, else raise onerr object as
352 exception.'''
352 exception.'''
353 def py2shell(val):
353 def py2shell(val):
354 'convert python object into string that is useful to shell'
354 'convert python object into string that is useful to shell'
355 if val is None or val is False:
355 if val is None or val is False:
356 return '0'
356 return '0'
357 if val is True:
357 if val is True:
358 return '1'
358 return '1'
359 return str(val)
359 return str(val)
360 oldenv = {}
360 oldenv = {}
361 for k in environ:
361 for k in environ:
362 oldenv[k] = os.environ.get(k)
362 oldenv[k] = os.environ.get(k)
363 if cwd is not None:
363 if cwd is not None:
364 oldcwd = os.getcwd()
364 oldcwd = os.getcwd()
365 origcmd = cmd
365 origcmd = cmd
366 if os.name == 'nt':
366 if os.name == 'nt':
367 cmd = '"%s"' % cmd
367 cmd = '"%s"' % cmd
368 try:
368 try:
369 for k, v in environ.iteritems():
369 for k, v in environ.iteritems():
370 os.environ[k] = py2shell(v)
370 os.environ[k] = py2shell(v)
371 os.environ['HG'] = hgexecutable()
371 os.environ['HG'] = hgexecutable()
372 if cwd is not None and oldcwd != cwd:
372 if cwd is not None and oldcwd != cwd:
373 os.chdir(cwd)
373 os.chdir(cwd)
374 rc = os.system(cmd)
374 rc = os.system(cmd)
375 if sys.platform == 'OpenVMS' and rc & 1:
375 if sys.platform == 'OpenVMS' and rc & 1:
376 rc = 0
376 rc = 0
377 if rc and onerr:
377 if rc and onerr:
378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
379 explain_exit(rc)[0])
379 explain_exit(rc)[0])
380 if errprefix:
380 if errprefix:
381 errmsg = '%s: %s' % (errprefix, errmsg)
381 errmsg = '%s: %s' % (errprefix, errmsg)
382 try:
382 try:
383 onerr.warn(errmsg + '\n')
383 onerr.warn(errmsg + '\n')
384 except AttributeError:
384 except AttributeError:
385 raise onerr(errmsg)
385 raise onerr(errmsg)
386 return rc
386 return rc
387 finally:
387 finally:
388 for k, v in oldenv.iteritems():
388 for k, v in oldenv.iteritems():
389 if v is None:
389 if v is None:
390 del os.environ[k]
390 del os.environ[k]
391 else:
391 else:
392 os.environ[k] = v
392 os.environ[k] = v
393 if cwd is not None and oldcwd != cwd:
393 if cwd is not None and oldcwd != cwd:
394 os.chdir(oldcwd)
394 os.chdir(oldcwd)
395
395
396 def checksignature(func):
396 def checksignature(func):
397 '''wrap a function with code to check for calling errors'''
397 '''wrap a function with code to check for calling errors'''
398 def check(*args, **kwargs):
398 def check(*args, **kwargs):
399 try:
399 try:
400 return func(*args, **kwargs)
400 return func(*args, **kwargs)
401 except TypeError:
401 except TypeError:
402 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
402 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
403 raise error.SignatureError
403 raise error.SignatureError
404 raise
404 raise
405
405
406 return check
406 return check
407
407
408 # os.path.lexists is not available on python2.3
408 # os.path.lexists is not available on python2.3
409 def lexists(filename):
409 def lexists(filename):
410 "test whether a file with this name exists. does not follow symlinks"
410 "test whether a file with this name exists. does not follow symlinks"
411 try:
411 try:
412 os.lstat(filename)
412 os.lstat(filename)
413 except:
413 except:
414 return False
414 return False
415 return True
415 return True
416
416
417 def rename(src, dst):
417 def rename(src, dst):
418 """forcibly rename a file"""
418 """forcibly rename a file"""
419 try:
419 try:
420 os.rename(src, dst)
420 os.rename(src, dst)
421 except OSError, err: # FIXME: check err (EEXIST ?)
421 except OSError, err: # FIXME: check err (EEXIST ?)
422
422
423 # On windows, rename to existing file is not allowed, so we
423 # On windows, rename to existing file is not allowed, so we
424 # must delete destination first. But if a file is open, unlink
424 # must delete destination first. But if a file is open, unlink
425 # schedules it for delete but does not delete it. Rename
425 # schedules it for delete but does not delete it. Rename
426 # happens immediately even for open files, so we rename
426 # happens immediately even for open files, so we rename
427 # destination to a temporary name, then delete that. Then
427 # destination to a temporary name, then delete that. Then
428 # rename is safe to do.
428 # rename is safe to do.
429 # The temporary name is chosen at random to avoid the situation
429 # The temporary name is chosen at random to avoid the situation
430 # where a file is left lying around from a previous aborted run.
430 # where a file is left lying around from a previous aborted run.
431 # The usual race condition this introduces can't be avoided as
431 # The usual race condition this introduces can't be avoided as
432 # we need the name to rename into, and not the file itself. Due
432 # we need the name to rename into, and not the file itself. Due
433 # to the nature of the operation however, any races will at worst
433 # to the nature of the operation however, any races will at worst
434 # lead to the rename failing and the current operation aborting.
434 # lead to the rename failing and the current operation aborting.
435
435
436 def tempname(prefix):
436 def tempname(prefix):
437 for tries in xrange(10):
437 for tries in xrange(10):
438 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
438 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
439 if not os.path.exists(temp):
439 if not os.path.exists(temp):
440 return temp
440 return temp
441 raise IOError, (errno.EEXIST, "No usable temporary filename found")
441 raise IOError, (errno.EEXIST, "No usable temporary filename found")
442
442
443 temp = tempname(dst)
443 temp = tempname(dst)
444 os.rename(dst, temp)
444 os.rename(dst, temp)
445 os.unlink(temp)
445 os.unlink(temp)
446 os.rename(src, dst)
446 os.rename(src, dst)
447
447
448 def unlink(f):
448 def unlink(f):
449 """unlink and remove the directory if it is empty"""
449 """unlink and remove the directory if it is empty"""
450 os.unlink(f)
450 os.unlink(f)
451 # try removing directories that might now be empty
451 # try removing directories that might now be empty
452 try:
452 try:
453 os.removedirs(os.path.dirname(f))
453 os.removedirs(os.path.dirname(f))
454 except OSError:
454 except OSError:
455 pass
455 pass
456
456
457 def copyfile(src, dest):
457 def copyfile(src, dest):
458 "copy a file, preserving mode and atime/mtime"
458 "copy a file, preserving mode and atime/mtime"
459 if os.path.islink(src):
459 if os.path.islink(src):
460 try:
460 try:
461 os.unlink(dest)
461 os.unlink(dest)
462 except:
462 except:
463 pass
463 pass
464 os.symlink(os.readlink(src), dest)
464 os.symlink(os.readlink(src), dest)
465 else:
465 else:
466 try:
466 try:
467 shutil.copyfile(src, dest)
467 shutil.copyfile(src, dest)
468 shutil.copystat(src, dest)
468 shutil.copystat(src, dest)
469 except shutil.Error, inst:
469 except shutil.Error, inst:
470 raise Abort(str(inst))
470 raise Abort(str(inst))
471
471
472 def copyfiles(src, dst, hardlink=None):
472 def copyfiles(src, dst, hardlink=None):
473 """Copy a directory tree using hardlinks if possible"""
473 """Copy a directory tree using hardlinks if possible"""
474
474
475 if hardlink is None:
475 if hardlink is None:
476 hardlink = (os.stat(src).st_dev ==
476 hardlink = (os.stat(src).st_dev ==
477 os.stat(os.path.dirname(dst)).st_dev)
477 os.stat(os.path.dirname(dst)).st_dev)
478
478
479 if os.path.isdir(src):
479 if os.path.isdir(src):
480 os.mkdir(dst)
480 os.mkdir(dst)
481 for name, kind in osutil.listdir(src):
481 for name, kind in osutil.listdir(src):
482 srcname = os.path.join(src, name)
482 srcname = os.path.join(src, name)
483 dstname = os.path.join(dst, name)
483 dstname = os.path.join(dst, name)
484 copyfiles(srcname, dstname, hardlink)
484 copyfiles(srcname, dstname, hardlink)
485 else:
485 else:
486 if hardlink:
486 if hardlink:
487 try:
487 try:
488 os_link(src, dst)
488 os_link(src, dst)
489 except (IOError, OSError):
489 except (IOError, OSError):
490 hardlink = False
490 hardlink = False
491 shutil.copy(src, dst)
491 shutil.copy(src, dst)
492 else:
492 else:
493 shutil.copy(src, dst)
493 shutil.copy(src, dst)
494
494
495 class path_auditor(object):
495 class path_auditor(object):
496 '''ensure that a filesystem path contains no banned components.
496 '''ensure that a filesystem path contains no banned components.
497 the following properties of a path are checked:
497 the following properties of a path are checked:
498
498
499 - under top-level .hg
499 - under top-level .hg
500 - starts at the root of a windows drive
500 - starts at the root of a windows drive
501 - contains ".."
501 - contains ".."
502 - traverses a symlink (e.g. a/symlink_here/b)
502 - traverses a symlink (e.g. a/symlink_here/b)
503 - inside a nested repository'''
503 - inside a nested repository'''
504
504
505 def __init__(self, root):
505 def __init__(self, root):
506 self.audited = set()
506 self.audited = set()
507 self.auditeddir = set()
507 self.auditeddir = set()
508 self.root = root
508 self.root = root
509
509
510 def __call__(self, path):
510 def __call__(self, path):
511 if path in self.audited:
511 if path in self.audited:
512 return
512 return
513 normpath = os.path.normcase(path)
513 normpath = os.path.normcase(path)
514 parts = splitpath(normpath)
514 parts = splitpath(normpath)
515 if (os.path.splitdrive(path)[0]
515 if (os.path.splitdrive(path)[0]
516 or parts[0].lower() in ('.hg', '.hg.', '')
516 or parts[0].lower() in ('.hg', '.hg.', '')
517 or os.pardir in parts):
517 or os.pardir in parts):
518 raise Abort(_("path contains illegal component: %s") % path)
518 raise Abort(_("path contains illegal component: %s") % path)
519 if '.hg' in path.lower():
519 if '.hg' in path.lower():
520 lparts = [p.lower() for p in parts]
520 lparts = [p.lower() for p in parts]
521 for p in '.hg', '.hg.':
521 for p in '.hg', '.hg.':
522 if p in lparts[1:]:
522 if p in lparts[1:]:
523 pos = lparts.index(p)
523 pos = lparts.index(p)
524 base = os.path.join(*parts[:pos])
524 base = os.path.join(*parts[:pos])
525 raise Abort(_('path %r is inside repo %r') % (path, base))
525 raise Abort(_('path %r is inside repo %r') % (path, base))
526 def check(prefix):
526 def check(prefix):
527 curpath = os.path.join(self.root, prefix)
527 curpath = os.path.join(self.root, prefix)
528 try:
528 try:
529 st = os.lstat(curpath)
529 st = os.lstat(curpath)
530 except OSError, err:
530 except OSError, err:
531 # EINVAL can be raised as invalid path syntax under win32.
531 # EINVAL can be raised as invalid path syntax under win32.
532 # They must be ignored for patterns can be checked too.
532 # They must be ignored for patterns can be checked too.
533 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
533 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
534 raise
534 raise
535 else:
535 else:
536 if stat.S_ISLNK(st.st_mode):
536 if stat.S_ISLNK(st.st_mode):
537 raise Abort(_('path %r traverses symbolic link %r') %
537 raise Abort(_('path %r traverses symbolic link %r') %
538 (path, prefix))
538 (path, prefix))
539 elif (stat.S_ISDIR(st.st_mode) and
539 elif (stat.S_ISDIR(st.st_mode) and
540 os.path.isdir(os.path.join(curpath, '.hg'))):
540 os.path.isdir(os.path.join(curpath, '.hg'))):
541 raise Abort(_('path %r is inside repo %r') %
541 raise Abort(_('path %r is inside repo %r') %
542 (path, prefix))
542 (path, prefix))
543 parts.pop()
543 parts.pop()
544 prefixes = []
544 prefixes = []
545 while parts:
545 while parts:
546 prefix = os.sep.join(parts)
546 prefix = os.sep.join(parts)
547 if prefix in self.auditeddir:
547 if prefix in self.auditeddir:
548 break
548 break
549 check(prefix)
549 check(prefix)
550 prefixes.append(prefix)
550 prefixes.append(prefix)
551 parts.pop()
551 parts.pop()
552
552
553 self.audited.add(path)
553 self.audited.add(path)
554 # only add prefixes to the cache after checking everything: we don't
554 # only add prefixes to the cache after checking everything: we don't
555 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
555 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
556 self.auditeddir.update(prefixes)
556 self.auditeddir.update(prefixes)
557
557
558 def nlinks(pathname):
558 def nlinks(pathname):
559 """Return number of hardlinks for the given file."""
559 """Return number of hardlinks for the given file."""
560 return os.lstat(pathname).st_nlink
560 return os.lstat(pathname).st_nlink
561
561
562 if hasattr(os, 'link'):
562 if hasattr(os, 'link'):
563 os_link = os.link
563 os_link = os.link
564 else:
564 else:
565 def os_link(src, dst):
565 def os_link(src, dst):
566 raise OSError(0, _("Hardlinks not supported"))
566 raise OSError(0, _("Hardlinks not supported"))
567
567
568 def lookup_reg(key, name=None, scope=None):
568 def lookup_reg(key, name=None, scope=None):
569 return None
569 return None
570
570
571 if os.name == 'nt':
571 if os.name == 'nt':
572 from windows import *
572 from windows import *
573 else:
573 else:
574 from posix import *
574 from posix import *
575
575
576 def makelock(info, pathname):
576 def makelock(info, pathname):
577 try:
577 try:
578 return os.symlink(info, pathname)
578 return os.symlink(info, pathname)
579 except OSError, why:
579 except OSError, why:
580 if why.errno == errno.EEXIST:
580 if why.errno == errno.EEXIST:
581 raise
581 raise
582 except AttributeError: # no symlink in os
582 except AttributeError: # no symlink in os
583 pass
583 pass
584
584
585 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
585 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
586 os.write(ld, info)
586 os.write(ld, info)
587 os.close(ld)
587 os.close(ld)
588
588
589 def readlock(pathname):
589 def readlock(pathname):
590 try:
590 try:
591 return os.readlink(pathname)
591 return os.readlink(pathname)
592 except OSError, why:
592 except OSError, why:
593 if why.errno not in (errno.EINVAL, errno.ENOSYS):
593 if why.errno not in (errno.EINVAL, errno.ENOSYS):
594 raise
594 raise
595 except AttributeError: # no symlink in os
595 except AttributeError: # no symlink in os
596 pass
596 pass
597 return posixfile(pathname).read()
597 return posixfile(pathname).read()
598
598
599 def fstat(fp):
599 def fstat(fp):
600 '''stat file object that may not have fileno method.'''
600 '''stat file object that may not have fileno method.'''
601 try:
601 try:
602 return os.fstat(fp.fileno())
602 return os.fstat(fp.fileno())
603 except AttributeError:
603 except AttributeError:
604 return os.stat(fp.name)
604 return os.stat(fp.name)
605
605
606 # File system features
606 # File system features
607
607
608 def checkcase(path):
608 def checkcase(path):
609 """
609 """
610 Check whether the given path is on a case-sensitive filesystem
610 Check whether the given path is on a case-sensitive filesystem
611
611
612 Requires a path (like /foo/.hg) ending with a foldable final
612 Requires a path (like /foo/.hg) ending with a foldable final
613 directory component.
613 directory component.
614 """
614 """
615 s1 = os.stat(path)
615 s1 = os.stat(path)
616 d, b = os.path.split(path)
616 d, b = os.path.split(path)
617 p2 = os.path.join(d, b.upper())
617 p2 = os.path.join(d, b.upper())
618 if path == p2:
618 if path == p2:
619 p2 = os.path.join(d, b.lower())
619 p2 = os.path.join(d, b.lower())
620 try:
620 try:
621 s2 = os.stat(p2)
621 s2 = os.stat(p2)
622 if s2 == s1:
622 if s2 == s1:
623 return False
623 return False
624 return True
624 return True
625 except:
625 except:
626 return True
626 return True
627
627
628 _fspathcache = {}
628 _fspathcache = {}
629 def fspath(name, root):
629 def fspath(name, root):
630 '''Get name in the case stored in the filesystem
630 '''Get name in the case stored in the filesystem
631
631
632 The name is either relative to root, or it is an absolute path starting
632 The name is either relative to root, or it is an absolute path starting
633 with root. Note that this function is unnecessary, and should not be
633 with root. Note that this function is unnecessary, and should not be
634 called, for case-sensitive filesystems (simply because it's expensive).
634 called, for case-sensitive filesystems (simply because it's expensive).
635 '''
635 '''
636 # If name is absolute, make it relative
636 # If name is absolute, make it relative
637 if name.lower().startswith(root.lower()):
637 if name.lower().startswith(root.lower()):
638 l = len(root)
638 l = len(root)
639 if name[l] == os.sep or name[l] == os.altsep:
639 if name[l] == os.sep or name[l] == os.altsep:
640 l = l + 1
640 l = l + 1
641 name = name[l:]
641 name = name[l:]
642
642
643 if not os.path.exists(os.path.join(root, name)):
643 if not os.path.exists(os.path.join(root, name)):
644 return None
644 return None
645
645
646 seps = os.sep
646 seps = os.sep
647 if os.altsep:
647 if os.altsep:
648 seps = seps + os.altsep
648 seps = seps + os.altsep
649 # Protect backslashes. This gets silly very quickly.
649 # Protect backslashes. This gets silly very quickly.
650 seps.replace('\\','\\\\')
650 seps.replace('\\','\\\\')
651 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
651 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
652 dir = os.path.normcase(os.path.normpath(root))
652 dir = os.path.normcase(os.path.normpath(root))
653 result = []
653 result = []
654 for part, sep in pattern.findall(name):
654 for part, sep in pattern.findall(name):
655 if sep:
655 if sep:
656 result.append(sep)
656 result.append(sep)
657 continue
657 continue
658
658
659 if dir not in _fspathcache:
659 if dir not in _fspathcache:
660 _fspathcache[dir] = os.listdir(dir)
660 _fspathcache[dir] = os.listdir(dir)
661 contents = _fspathcache[dir]
661 contents = _fspathcache[dir]
662
662
663 lpart = part.lower()
663 lpart = part.lower()
664 lenp = len(part)
664 lenp = len(part)
665 for n in contents:
665 for n in contents:
666 if lenp == len(n) and n.lower() == lpart:
666 if lenp == len(n) and n.lower() == lpart:
667 result.append(n)
667 result.append(n)
668 break
668 break
669 else:
669 else:
670 # Cannot happen, as the file exists!
670 # Cannot happen, as the file exists!
671 result.append(part)
671 result.append(part)
672 dir = os.path.join(dir, lpart)
672 dir = os.path.join(dir, lpart)
673
673
674 return ''.join(result)
674 return ''.join(result)
675
675
676 def checkexec(path):
676 def checkexec(path):
677 """
677 """
678 Check whether the given path is on a filesystem with UNIX-like exec flags
678 Check whether the given path is on a filesystem with UNIX-like exec flags
679
679
680 Requires a directory (like /foo/.hg)
680 Requires a directory (like /foo/.hg)
681 """
681 """
682
682
683 # VFAT on some Linux versions can flip mode but it doesn't persist
683 # VFAT on some Linux versions can flip mode but it doesn't persist
684 # a FS remount. Frequently we can detect it if files are created
684 # a FS remount. Frequently we can detect it if files are created
685 # with exec bit on.
685 # with exec bit on.
686
686
687 try:
687 try:
688 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
688 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
689 fh, fn = tempfile.mkstemp("", "", path)
689 fh, fn = tempfile.mkstemp("", "", path)
690 try:
690 try:
691 os.close(fh)
691 os.close(fh)
692 m = os.stat(fn).st_mode & 0777
692 m = os.stat(fn).st_mode & 0777
693 new_file_has_exec = m & EXECFLAGS
693 new_file_has_exec = m & EXECFLAGS
694 os.chmod(fn, m ^ EXECFLAGS)
694 os.chmod(fn, m ^ EXECFLAGS)
695 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
695 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
696 finally:
696 finally:
697 os.unlink(fn)
697 os.unlink(fn)
698 except (IOError, OSError):
698 except (IOError, OSError):
699 # we don't care, the user probably won't be able to commit anyway
699 # we don't care, the user probably won't be able to commit anyway
700 return False
700 return False
701 return not (new_file_has_exec or exec_flags_cannot_flip)
701 return not (new_file_has_exec or exec_flags_cannot_flip)
702
702
703 def checklink(path):
703 def checklink(path):
704 """check whether the given path is on a symlink-capable filesystem"""
704 """check whether the given path is on a symlink-capable filesystem"""
705 # mktemp is not racy because symlink creation will fail if the
705 # mktemp is not racy because symlink creation will fail if the
706 # file already exists
706 # file already exists
707 name = tempfile.mktemp(dir=path)
707 name = tempfile.mktemp(dir=path)
708 try:
708 try:
709 os.symlink(".", name)
709 os.symlink(".", name)
710 os.unlink(name)
710 os.unlink(name)
711 return True
711 return True
712 except (OSError, AttributeError):
712 except (OSError, AttributeError):
713 return False
713 return False
714
714
715 def needbinarypatch():
715 def needbinarypatch():
716 """return True if patches should be applied in binary mode by default."""
716 """return True if patches should be applied in binary mode by default."""
717 return os.name == 'nt'
717 return os.name == 'nt'
718
718
719 def endswithsep(path):
719 def endswithsep(path):
720 '''Check path ends with os.sep or os.altsep.'''
720 '''Check path ends with os.sep or os.altsep.'''
721 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
721 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
722
722
723 def splitpath(path):
723 def splitpath(path):
724 '''Split path by os.sep.
724 '''Split path by os.sep.
725 Note that this function does not use os.altsep because this is
725 Note that this function does not use os.altsep because this is
726 an alternative of simple "xxx.split(os.sep)".
726 an alternative of simple "xxx.split(os.sep)".
727 It is recommended to use os.path.normpath() before using this
727 It is recommended to use os.path.normpath() before using this
728 function if need.'''
728 function if need.'''
729 return path.split(os.sep)
729 return path.split(os.sep)
730
730
731 def gui():
731 def gui():
732 '''Are we running in a GUI?'''
732 '''Are we running in a GUI?'''
733 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
733 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
734
734
735 def mktempcopy(name, emptyok=False, createmode=None):
735 def mktempcopy(name, emptyok=False, createmode=None):
736 """Create a temporary file with the same contents from name
736 """Create a temporary file with the same contents from name
737
737
738 The permission bits are copied from the original file.
738 The permission bits are copied from the original file.
739
739
740 If the temporary file is going to be truncated immediately, you
740 If the temporary file is going to be truncated immediately, you
741 can use emptyok=True as an optimization.
741 can use emptyok=True as an optimization.
742
742
743 Returns the name of the temporary file.
743 Returns the name of the temporary file.
744 """
744 """
745 d, fn = os.path.split(name)
745 d, fn = os.path.split(name)
746 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
746 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
747 os.close(fd)
747 os.close(fd)
748 # Temporary files are created with mode 0600, which is usually not
748 # Temporary files are created with mode 0600, which is usually not
749 # what we want. If the original file already exists, just copy
749 # what we want. If the original file already exists, just copy
750 # its mode. Otherwise, manually obey umask.
750 # its mode. Otherwise, manually obey umask.
751 try:
751 try:
752 st_mode = os.lstat(name).st_mode & 0777
752 st_mode = os.lstat(name).st_mode & 0777
753 except OSError, inst:
753 except OSError, inst:
754 if inst.errno != errno.ENOENT:
754 if inst.errno != errno.ENOENT:
755 raise
755 raise
756 st_mode = createmode
756 st_mode = createmode
757 if st_mode is None:
757 if st_mode is None:
758 st_mode = ~umask
758 st_mode = ~umask
759 st_mode &= 0666
759 st_mode &= 0666
760 os.chmod(temp, st_mode)
760 os.chmod(temp, st_mode)
761 if emptyok:
761 if emptyok:
762 return temp
762 return temp
763 try:
763 try:
764 try:
764 try:
765 ifp = posixfile(name, "rb")
765 ifp = posixfile(name, "rb")
766 except IOError, inst:
766 except IOError, inst:
767 if inst.errno == errno.ENOENT:
767 if inst.errno == errno.ENOENT:
768 return temp
768 return temp
769 if not getattr(inst, 'filename', None):
769 if not getattr(inst, 'filename', None):
770 inst.filename = name
770 inst.filename = name
771 raise
771 raise
772 ofp = posixfile(temp, "wb")
772 ofp = posixfile(temp, "wb")
773 for chunk in filechunkiter(ifp):
773 for chunk in filechunkiter(ifp):
774 ofp.write(chunk)
774 ofp.write(chunk)
775 ifp.close()
775 ifp.close()
776 ofp.close()
776 ofp.close()
777 except:
777 except:
778 try: os.unlink(temp)
778 try: os.unlink(temp)
779 except: pass
779 except: pass
780 raise
780 raise
781 return temp
781 return temp
782
782
783 class atomictempfile(object):
783 class atomictempfile(object):
784 """file-like object that atomically updates a file
784 """file-like object that atomically updates a file
785
785
786 All writes will be redirected to a temporary copy of the original
786 All writes will be redirected to a temporary copy of the original
787 file. When rename is called, the copy is renamed to the original
787 file. When rename is called, the copy is renamed to the original
788 name, making the changes visible.
788 name, making the changes visible.
789 """
789 """
790 def __init__(self, name, mode, createmode):
790 def __init__(self, name, mode, createmode):
791 self.__name = name
791 self.__name = name
792 self._fp = None
792 self._fp = None
793 self.temp = mktempcopy(name, emptyok=('w' in mode),
793 self.temp = mktempcopy(name, emptyok=('w' in mode),
794 createmode=createmode)
794 createmode=createmode)
795 self._fp = posixfile(self.temp, mode)
795 self._fp = posixfile(self.temp, mode)
796
796
797 def __getattr__(self, name):
797 def __getattr__(self, name):
798 return getattr(self._fp, name)
798 return getattr(self._fp, name)
799
799
800 def rename(self):
800 def rename(self):
801 if not self._fp.closed:
801 if not self._fp.closed:
802 self._fp.close()
802 self._fp.close()
803 rename(self.temp, localpath(self.__name))
803 rename(self.temp, localpath(self.__name))
804
804
805 def __del__(self):
805 def __del__(self):
806 if not self._fp:
806 if not self._fp:
807 return
807 return
808 if not self._fp.closed:
808 if not self._fp.closed:
809 try:
809 try:
810 os.unlink(self.temp)
810 os.unlink(self.temp)
811 except: pass
811 except: pass
812 self._fp.close()
812 self._fp.close()
813
813
814 def makedirs(name, mode=None):
814 def makedirs(name, mode=None):
815 """recursive directory creation with parent mode inheritance"""
815 """recursive directory creation with parent mode inheritance"""
816 try:
816 try:
817 os.mkdir(name)
817 os.mkdir(name)
818 if mode is not None:
818 if mode is not None:
819 os.chmod(name, mode)
819 os.chmod(name, mode)
820 return
820 return
821 except OSError, err:
821 except OSError, err:
822 if err.errno == errno.EEXIST:
822 if err.errno == errno.EEXIST:
823 return
823 return
824 if err.errno != errno.ENOENT:
824 if err.errno != errno.ENOENT:
825 raise
825 raise
826 parent = os.path.abspath(os.path.dirname(name))
826 parent = os.path.abspath(os.path.dirname(name))
827 makedirs(parent, mode)
827 makedirs(parent, mode)
828 makedirs(name, mode)
828 makedirs(name, mode)
829
829
830 class opener(object):
830 class opener(object):
831 """Open files relative to a base directory
831 """Open files relative to a base directory
832
832
833 This class is used to hide the details of COW semantics and
833 This class is used to hide the details of COW semantics and
834 remote file access from higher level code.
834 remote file access from higher level code.
835 """
835 """
836 def __init__(self, base, audit=True):
836 def __init__(self, base, audit=True):
837 self.base = base
837 self.base = base
838 if audit:
838 if audit:
839 self.audit_path = path_auditor(base)
839 self.audit_path = path_auditor(base)
840 else:
840 else:
841 self.audit_path = always
841 self.audit_path = always
842 self.createmode = None
842 self.createmode = None
843
843
844 @propertycache
844 @propertycache
845 def _can_symlink(self):
845 def _can_symlink(self):
846 return checklink(self.base)
846 return checklink(self.base)
847
847
848 def _fixfilemode(self, name):
848 def _fixfilemode(self, name):
849 if self.createmode is None:
849 if self.createmode is None:
850 return
850 return
851 os.chmod(name, self.createmode & 0666)
851 os.chmod(name, self.createmode & 0666)
852
852
853 def __call__(self, path, mode="r", text=False, atomictemp=False):
853 def __call__(self, path, mode="r", text=False, atomictemp=False):
854 self.audit_path(path)
854 self.audit_path(path)
855 f = os.path.join(self.base, path)
855 f = os.path.join(self.base, path)
856
856
857 if not text and "b" not in mode:
857 if not text and "b" not in mode:
858 mode += "b" # for that other OS
858 mode += "b" # for that other OS
859
859
860 nlink = -1
860 nlink = -1
861 if mode not in ("r", "rb"):
861 if mode not in ("r", "rb"):
862 try:
862 try:
863 nlink = nlinks(f)
863 nlink = nlinks(f)
864 except OSError:
864 except OSError:
865 nlink = 0
865 nlink = 0
866 d = os.path.dirname(f)
866 d = os.path.dirname(f)
867 if not os.path.isdir(d):
867 if not os.path.isdir(d):
868 makedirs(d, self.createmode)
868 makedirs(d, self.createmode)
869 if atomictemp:
869 if atomictemp:
870 return atomictempfile(f, mode, self.createmode)
870 return atomictempfile(f, mode, self.createmode)
871 if nlink > 1:
871 if nlink > 1:
872 rename(mktempcopy(f), f)
872 rename(mktempcopy(f), f)
873 fp = posixfile(f, mode)
873 fp = posixfile(f, mode)
874 if nlink == 0:
874 if nlink == 0:
875 self._fixfilemode(f)
875 self._fixfilemode(f)
876 return fp
876 return fp
877
877
878 def symlink(self, src, dst):
878 def symlink(self, src, dst):
879 self.audit_path(dst)
879 self.audit_path(dst)
880 linkname = os.path.join(self.base, dst)
880 linkname = os.path.join(self.base, dst)
881 try:
881 try:
882 os.unlink(linkname)
882 os.unlink(linkname)
883 except OSError:
883 except OSError:
884 pass
884 pass
885
885
886 dirname = os.path.dirname(linkname)
886 dirname = os.path.dirname(linkname)
887 if not os.path.exists(dirname):
887 if not os.path.exists(dirname):
888 makedirs(dirname, self.createmode)
888 makedirs(dirname, self.createmode)
889
889
890 if self._can_symlink:
890 if self._can_symlink:
891 try:
891 try:
892 os.symlink(src, linkname)
892 os.symlink(src, linkname)
893 except OSError, err:
893 except OSError, err:
894 raise OSError(err.errno, _('could not symlink to %r: %s') %
894 raise OSError(err.errno, _('could not symlink to %r: %s') %
895 (src, err.strerror), linkname)
895 (src, err.strerror), linkname)
896 else:
896 else:
897 f = self(dst, "w")
897 f = self(dst, "w")
898 f.write(src)
898 f.write(src)
899 f.close()
899 f.close()
900 self._fixfilemode(dst)
900 self._fixfilemode(dst)
901
901
902 class chunkbuffer(object):
902 class chunkbuffer(object):
903 """Allow arbitrary sized chunks of data to be efficiently read from an
903 """Allow arbitrary sized chunks of data to be efficiently read from an
904 iterator over chunks of arbitrary size."""
904 iterator over chunks of arbitrary size."""
905
905
906 def __init__(self, in_iter):
906 def __init__(self, in_iter):
907 """in_iter is the iterator that's iterating over the input chunks.
907 """in_iter is the iterator that's iterating over the input chunks.
908 targetsize is how big a buffer to try to maintain."""
908 targetsize is how big a buffer to try to maintain."""
909 self.iter = iter(in_iter)
909 self.iter = iter(in_iter)
910 self.buf = ''
910 self.buf = ''
911 self.targetsize = 2**16
911 self.targetsize = 2**16
912
912
913 def read(self, l):
913 def read(self, l):
914 """Read L bytes of data from the iterator of chunks of data.
914 """Read L bytes of data from the iterator of chunks of data.
915 Returns less than L bytes if the iterator runs dry."""
915 Returns less than L bytes if the iterator runs dry."""
916 if l > len(self.buf) and self.iter:
916 if l > len(self.buf) and self.iter:
917 # Clamp to a multiple of self.targetsize
917 # Clamp to a multiple of self.targetsize
918 targetsize = max(l, self.targetsize)
918 targetsize = max(l, self.targetsize)
919 collector = cStringIO.StringIO()
919 collector = cStringIO.StringIO()
920 collector.write(self.buf)
920 collector.write(self.buf)
921 collected = len(self.buf)
921 collected = len(self.buf)
922 for chunk in self.iter:
922 for chunk in self.iter:
923 collector.write(chunk)
923 collector.write(chunk)
924 collected += len(chunk)
924 collected += len(chunk)
925 if collected >= targetsize:
925 if collected >= targetsize:
926 break
926 break
927 if collected < targetsize:
927 if collected < targetsize:
928 self.iter = False
928 self.iter = False
929 self.buf = collector.getvalue()
929 self.buf = collector.getvalue()
930 if len(self.buf) == l:
930 if len(self.buf) == l:
931 s, self.buf = str(self.buf), ''
931 s, self.buf = str(self.buf), ''
932 else:
932 else:
933 s, self.buf = self.buf[:l], buffer(self.buf, l)
933 s, self.buf = self.buf[:l], buffer(self.buf, l)
934 return s
934 return s
935
935
936 def filechunkiter(f, size=65536, limit=None):
936 def filechunkiter(f, size=65536, limit=None):
937 """Create a generator that produces the data in the file size
937 """Create a generator that produces the data in the file size
938 (default 65536) bytes at a time, up to optional limit (default is
938 (default 65536) bytes at a time, up to optional limit (default is
939 to read all data). Chunks may be less than size bytes if the
939 to read all data). Chunks may be less than size bytes if the
940 chunk is the last chunk in the file, or the file is a socket or
940 chunk is the last chunk in the file, or the file is a socket or
941 some other type of file that sometimes reads less data than is
941 some other type of file that sometimes reads less data than is
942 requested."""
942 requested."""
943 assert size >= 0
943 assert size >= 0
944 assert limit is None or limit >= 0
944 assert limit is None or limit >= 0
945 while True:
945 while True:
946 if limit is None: nbytes = size
946 if limit is None: nbytes = size
947 else: nbytes = min(limit, size)
947 else: nbytes = min(limit, size)
948 s = nbytes and f.read(nbytes)
948 s = nbytes and f.read(nbytes)
949 if not s: break
949 if not s: break
950 if limit: limit -= len(s)
950 if limit: limit -= len(s)
951 yield s
951 yield s
952
952
953 def makedate():
953 def makedate():
954 lt = time.localtime()
954 lt = time.localtime()
955 if lt[8] == 1 and time.daylight:
955 if lt[8] == 1 and time.daylight:
956 tz = time.altzone
956 tz = time.altzone
957 else:
957 else:
958 tz = time.timezone
958 tz = time.timezone
959 return time.mktime(lt), tz
959 return time.mktime(lt), tz
960
960
961 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
961 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
962 """represent a (unixtime, offset) tuple as a localized time.
962 """represent a (unixtime, offset) tuple as a localized time.
963 unixtime is seconds since the epoch, and offset is the time zone's
963 unixtime is seconds since the epoch, and offset is the time zone's
964 number of seconds away from UTC. if timezone is false, do not
964 number of seconds away from UTC. if timezone is false, do not
965 append time zone to string."""
965 append time zone to string."""
966 t, tz = date or makedate()
966 t, tz = date or makedate()
967 if "%1" in format or "%2" in format:
967 if "%1" in format or "%2" in format:
968 sign = (tz > 0) and "-" or "+"
968 sign = (tz > 0) and "-" or "+"
969 minutes = abs(tz) // 60
969 minutes = abs(tz) // 60
970 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
970 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
971 format = format.replace("%2", "%02d" % (minutes % 60))
971 format = format.replace("%2", "%02d" % (minutes % 60))
972 s = time.strftime(format, time.gmtime(float(t) - tz))
972 s = time.strftime(format, time.gmtime(float(t) - tz))
973 return s
973 return s
974
974
975 def shortdate(date=None):
975 def shortdate(date=None):
976 """turn (timestamp, tzoff) tuple into iso 8631 date."""
976 """turn (timestamp, tzoff) tuple into iso 8631 date."""
977 return datestr(date, format='%Y-%m-%d')
977 return datestr(date, format='%Y-%m-%d')
978
978
979 def strdate(string, format, defaults=[]):
979 def strdate(string, format, defaults=[]):
980 """parse a localized time string and return a (unixtime, offset) tuple.
980 """parse a localized time string and return a (unixtime, offset) tuple.
981 if the string cannot be parsed, ValueError is raised."""
981 if the string cannot be parsed, ValueError is raised."""
982 def timezone(string):
982 def timezone(string):
983 tz = string.split()[-1]
983 tz = string.split()[-1]
984 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
984 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
985 sign = (tz[0] == "+") and 1 or -1
985 sign = (tz[0] == "+") and 1 or -1
986 hours = int(tz[1:3])
986 hours = int(tz[1:3])
987 minutes = int(tz[3:5])
987 minutes = int(tz[3:5])
988 return -sign * (hours * 60 + minutes) * 60
988 return -sign * (hours * 60 + minutes) * 60
989 if tz == "GMT" or tz == "UTC":
989 if tz == "GMT" or tz == "UTC":
990 return 0
990 return 0
991 return None
991 return None
992
992
993 # NOTE: unixtime = localunixtime + offset
993 # NOTE: unixtime = localunixtime + offset
994 offset, date = timezone(string), string
994 offset, date = timezone(string), string
995 if offset != None:
995 if offset != None:
996 date = " ".join(string.split()[:-1])
996 date = " ".join(string.split()[:-1])
997
997
998 # add missing elements from defaults
998 # add missing elements from defaults
999 for part in defaults:
999 for part in defaults:
1000 found = [True for p in part if ("%"+p) in format]
1000 found = [True for p in part if ("%"+p) in format]
1001 if not found:
1001 if not found:
1002 date += "@" + defaults[part]
1002 date += "@" + defaults[part]
1003 format += "@%" + part[0]
1003 format += "@%" + part[0]
1004
1004
1005 timetuple = time.strptime(date, format)
1005 timetuple = time.strptime(date, format)
1006 localunixtime = int(calendar.timegm(timetuple))
1006 localunixtime = int(calendar.timegm(timetuple))
1007 if offset is None:
1007 if offset is None:
1008 # local timezone
1008 # local timezone
1009 unixtime = int(time.mktime(timetuple))
1009 unixtime = int(time.mktime(timetuple))
1010 offset = unixtime - localunixtime
1010 offset = unixtime - localunixtime
1011 else:
1011 else:
1012 unixtime = localunixtime + offset
1012 unixtime = localunixtime + offset
1013 return unixtime, offset
1013 return unixtime, offset
1014
1014
1015 def parsedate(date, formats=None, defaults=None):
1015 def parsedate(date, formats=None, defaults=None):
1016 """parse a localized date/time string and return a (unixtime, offset) tuple.
1016 """parse a localized date/time string and return a (unixtime, offset) tuple.
1017
1017
1018 The date may be a "unixtime offset" string or in one of the specified
1018 The date may be a "unixtime offset" string or in one of the specified
1019 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1019 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1020 """
1020 """
1021 if not date:
1021 if not date:
1022 return 0, 0
1022 return 0, 0
1023 if isinstance(date, tuple) and len(date) == 2:
1023 if isinstance(date, tuple) and len(date) == 2:
1024 return date
1024 return date
1025 if not formats:
1025 if not formats:
1026 formats = defaultdateformats
1026 formats = defaultdateformats
1027 date = date.strip()
1027 date = date.strip()
1028 try:
1028 try:
1029 when, offset = map(int, date.split(' '))
1029 when, offset = map(int, date.split(' '))
1030 except ValueError:
1030 except ValueError:
1031 # fill out defaults
1031 # fill out defaults
1032 if not defaults:
1032 if not defaults:
1033 defaults = {}
1033 defaults = {}
1034 now = makedate()
1034 now = makedate()
1035 for part in "d mb yY HI M S".split():
1035 for part in "d mb yY HI M S".split():
1036 if part not in defaults:
1036 if part not in defaults:
1037 if part[0] in "HMS":
1037 if part[0] in "HMS":
1038 defaults[part] = "00"
1038 defaults[part] = "00"
1039 else:
1039 else:
1040 defaults[part] = datestr(now, "%" + part[0])
1040 defaults[part] = datestr(now, "%" + part[0])
1041
1041
1042 for format in formats:
1042 for format in formats:
1043 try:
1043 try:
1044 when, offset = strdate(date, format, defaults)
1044 when, offset = strdate(date, format, defaults)
1045 except (ValueError, OverflowError):
1045 except (ValueError, OverflowError):
1046 pass
1046 pass
1047 else:
1047 else:
1048 break
1048 break
1049 else:
1049 else:
1050 raise Abort(_('invalid date: %r ') % date)
1050 raise Abort(_('invalid date: %r ') % date)
1051 # validate explicit (probably user-specified) date and
1051 # validate explicit (probably user-specified) date and
1052 # time zone offset. values must fit in signed 32 bits for
1052 # time zone offset. values must fit in signed 32 bits for
1053 # current 32-bit linux runtimes. timezones go from UTC-12
1053 # current 32-bit linux runtimes. timezones go from UTC-12
1054 # to UTC+14
1054 # to UTC+14
1055 if abs(when) > 0x7fffffff:
1055 if abs(when) > 0x7fffffff:
1056 raise Abort(_('date exceeds 32 bits: %d') % when)
1056 raise Abort(_('date exceeds 32 bits: %d') % when)
1057 if offset < -50400 or offset > 43200:
1057 if offset < -50400 or offset > 43200:
1058 raise Abort(_('impossible time zone offset: %d') % offset)
1058 raise Abort(_('impossible time zone offset: %d') % offset)
1059 return when, offset
1059 return when, offset
1060
1060
1061 def matchdate(date):
1061 def matchdate(date):
1062 """Return a function that matches a given date match specifier
1062 """Return a function that matches a given date match specifier
1063
1063
1064 Formats include:
1064 Formats include:
1065
1065
1066 '{date}' match a given date to the accuracy provided
1066 '{date}' match a given date to the accuracy provided
1067
1067
1068 '<{date}' on or before a given date
1068 '<{date}' on or before a given date
1069
1069
1070 '>{date}' on or after a given date
1070 '>{date}' on or after a given date
1071
1071
1072 """
1072 """
1073
1073
1074 def lower(date):
1074 def lower(date):
1075 d = dict(mb="1", d="1")
1075 d = dict(mb="1", d="1")
1076 return parsedate(date, extendeddateformats, d)[0]
1076 return parsedate(date, extendeddateformats, d)[0]
1077
1077
1078 def upper(date):
1078 def upper(date):
1079 d = dict(mb="12", HI="23", M="59", S="59")
1079 d = dict(mb="12", HI="23", M="59", S="59")
1080 for days in "31 30 29".split():
1080 for days in "31 30 29".split():
1081 try:
1081 try:
1082 d["d"] = days
1082 d["d"] = days
1083 return parsedate(date, extendeddateformats, d)[0]
1083 return parsedate(date, extendeddateformats, d)[0]
1084 except:
1084 except:
1085 pass
1085 pass
1086 d["d"] = "28"
1086 d["d"] = "28"
1087 return parsedate(date, extendeddateformats, d)[0]
1087 return parsedate(date, extendeddateformats, d)[0]
1088
1088
1089 date = date.strip()
1089 date = date.strip()
1090 if date[0] == "<":
1090 if date[0] == "<":
1091 when = upper(date[1:])
1091 when = upper(date[1:])
1092 return lambda x: x <= when
1092 return lambda x: x <= when
1093 elif date[0] == ">":
1093 elif date[0] == ">":
1094 when = lower(date[1:])
1094 when = lower(date[1:])
1095 return lambda x: x >= when
1095 return lambda x: x >= when
1096 elif date[0] == "-":
1096 elif date[0] == "-":
1097 try:
1097 try:
1098 days = int(date[1:])
1098 days = int(date[1:])
1099 except ValueError:
1099 except ValueError:
1100 raise Abort(_("invalid day spec: %s") % date[1:])
1100 raise Abort(_("invalid day spec: %s") % date[1:])
1101 when = makedate()[0] - days * 3600 * 24
1101 when = makedate()[0] - days * 3600 * 24
1102 return lambda x: x >= when
1102 return lambda x: x >= when
1103 elif " to " in date:
1103 elif " to " in date:
1104 a, b = date.split(" to ")
1104 a, b = date.split(" to ")
1105 start, stop = lower(a), upper(b)
1105 start, stop = lower(a), upper(b)
1106 return lambda x: x >= start and x <= stop
1106 return lambda x: x >= start and x <= stop
1107 else:
1107 else:
1108 start, stop = lower(date), upper(date)
1108 start, stop = lower(date), upper(date)
1109 return lambda x: x >= start and x <= stop
1109 return lambda x: x >= start and x <= stop
1110
1110
1111 def shortuser(user):
1111 def shortuser(user):
1112 """Return a short representation of a user name or email address."""
1112 """Return a short representation of a user name or email address."""
1113 f = user.find('@')
1113 f = user.find('@')
1114 if f >= 0:
1114 if f >= 0:
1115 user = user[:f]
1115 user = user[:f]
1116 f = user.find('<')
1116 f = user.find('<')
1117 if f >= 0:
1117 if f >= 0:
1118 user = user[f+1:]
1118 user = user[f+1:]
1119 f = user.find(' ')
1119 f = user.find(' ')
1120 if f >= 0:
1120 if f >= 0:
1121 user = user[:f]
1121 user = user[:f]
1122 f = user.find('.')
1122 f = user.find('.')
1123 if f >= 0:
1123 if f >= 0:
1124 user = user[:f]
1124 user = user[:f]
1125 return user
1125 return user
1126
1126
1127 def email(author):
1127 def email(author):
1128 '''get email of author.'''
1128 '''get email of author.'''
1129 r = author.find('>')
1129 r = author.find('>')
1130 if r == -1: r = None
1130 if r == -1: r = None
1131 return author[author.find('<')+1:r]
1131 return author[author.find('<')+1:r]
1132
1132
1133 def ellipsis(text, maxlength=400):
1133 def ellipsis(text, maxlength=400):
1134 """Trim string to at most maxlength (default: 400) characters."""
1134 """Trim string to at most maxlength (default: 400) characters."""
1135 if len(text) <= maxlength:
1135 if len(text) <= maxlength:
1136 return text
1136 return text
1137 else:
1137 else:
1138 return "%s..." % (text[:maxlength-3])
1138 return "%s..." % (text[:maxlength-3])
1139
1139
1140 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1140 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1141 '''yield every hg repository under path, recursively.'''
1141 '''yield every hg repository under path, recursively.'''
1142 def errhandler(err):
1142 def errhandler(err):
1143 if err.filename == path:
1143 if err.filename == path:
1144 raise err
1144 raise err
1145 if followsym and hasattr(os.path, 'samestat'):
1145 if followsym and hasattr(os.path, 'samestat'):
1146 def _add_dir_if_not_there(dirlst, dirname):
1146 def _add_dir_if_not_there(dirlst, dirname):
1147 match = False
1147 match = False
1148 samestat = os.path.samestat
1148 samestat = os.path.samestat
1149 dirstat = os.stat(dirname)
1149 dirstat = os.stat(dirname)
1150 for lstdirstat in dirlst:
1150 for lstdirstat in dirlst:
1151 if samestat(dirstat, lstdirstat):
1151 if samestat(dirstat, lstdirstat):
1152 match = True
1152 match = True
1153 break
1153 break
1154 if not match:
1154 if not match:
1155 dirlst.append(dirstat)
1155 dirlst.append(dirstat)
1156 return not match
1156 return not match
1157 else:
1157 else:
1158 followsym = False
1158 followsym = False
1159
1159
1160 if (seen_dirs is None) and followsym:
1160 if (seen_dirs is None) and followsym:
1161 seen_dirs = []
1161 seen_dirs = []
1162 _add_dir_if_not_there(seen_dirs, path)
1162 _add_dir_if_not_there(seen_dirs, path)
1163 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1163 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1164 if '.hg' in dirs:
1164 if '.hg' in dirs:
1165 yield root # found a repository
1165 yield root # found a repository
1166 qroot = os.path.join(root, '.hg', 'patches')
1166 qroot = os.path.join(root, '.hg', 'patches')
1167 if os.path.isdir(os.path.join(qroot, '.hg')):
1167 if os.path.isdir(os.path.join(qroot, '.hg')):
1168 yield qroot # we have a patch queue repo here
1168 yield qroot # we have a patch queue repo here
1169 if recurse:
1169 if recurse:
1170 # avoid recursing inside the .hg directory
1170 # avoid recursing inside the .hg directory
1171 dirs.remove('.hg')
1171 dirs.remove('.hg')
1172 else:
1172 else:
1173 dirs[:] = [] # don't descend further
1173 dirs[:] = [] # don't descend further
1174 elif followsym:
1174 elif followsym:
1175 newdirs = []
1175 newdirs = []
1176 for d in dirs:
1176 for d in dirs:
1177 fname = os.path.join(root, d)
1177 fname = os.path.join(root, d)
1178 if _add_dir_if_not_there(seen_dirs, fname):
1178 if _add_dir_if_not_there(seen_dirs, fname):
1179 if os.path.islink(fname):
1179 if os.path.islink(fname):
1180 for hgname in walkrepos(fname, True, seen_dirs):
1180 for hgname in walkrepos(fname, True, seen_dirs):
1181 yield hgname
1181 yield hgname
1182 else:
1182 else:
1183 newdirs.append(d)
1183 newdirs.append(d)
1184 dirs[:] = newdirs
1184 dirs[:] = newdirs
1185
1185
1186 _rcpath = None
1186 _rcpath = None
1187
1187
1188 def os_rcpath():
1188 def os_rcpath():
1189 '''return default os-specific hgrc search path'''
1189 '''return default os-specific hgrc search path'''
1190 path = system_rcpath()
1190 path = system_rcpath()
1191 path.extend(user_rcpath())
1191 path.extend(user_rcpath())
1192 path = [os.path.normpath(f) for f in path]
1192 path = [os.path.normpath(f) for f in path]
1193 return path
1193 return path
1194
1194
1195 def rcpath():
1195 def rcpath():
1196 '''return hgrc search path. if env var HGRCPATH is set, use it.
1196 '''return hgrc search path. if env var HGRCPATH is set, use it.
1197 for each item in path, if directory, use files ending in .rc,
1197 for each item in path, if directory, use files ending in .rc,
1198 else use item.
1198 else use item.
1199 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1199 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1200 if no HGRCPATH, use default os-specific path.'''
1200 if no HGRCPATH, use default os-specific path.'''
1201 global _rcpath
1201 global _rcpath
1202 if _rcpath is None:
1202 if _rcpath is None:
1203 if 'HGRCPATH' in os.environ:
1203 if 'HGRCPATH' in os.environ:
1204 _rcpath = []
1204 _rcpath = []
1205 for p in os.environ['HGRCPATH'].split(os.pathsep):
1205 for p in os.environ['HGRCPATH'].split(os.pathsep):
1206 if not p: continue
1206 if not p: continue
1207 if os.path.isdir(p):
1207 if os.path.isdir(p):
1208 for f, kind in osutil.listdir(p):
1208 for f, kind in osutil.listdir(p):
1209 if f.endswith('.rc'):
1209 if f.endswith('.rc'):
1210 _rcpath.append(os.path.join(p, f))
1210 _rcpath.append(os.path.join(p, f))
1211 else:
1211 else:
1212 _rcpath.append(p)
1212 _rcpath.append(p)
1213 else:
1213 else:
1214 _rcpath = os_rcpath()
1214 _rcpath = os_rcpath()
1215 return _rcpath
1215 return _rcpath
1216
1216
1217 def bytecount(nbytes):
1217 def bytecount(nbytes):
1218 '''return byte count formatted as readable string, with units'''
1218 '''return byte count formatted as readable string, with units'''
1219
1219
1220 units = (
1220 units = (
1221 (100, 1<<30, _('%.0f GB')),
1221 (100, 1<<30, _('%.0f GB')),
1222 (10, 1<<30, _('%.1f GB')),
1222 (10, 1<<30, _('%.1f GB')),
1223 (1, 1<<30, _('%.2f GB')),
1223 (1, 1<<30, _('%.2f GB')),
1224 (100, 1<<20, _('%.0f MB')),
1224 (100, 1<<20, _('%.0f MB')),
1225 (10, 1<<20, _('%.1f MB')),
1225 (10, 1<<20, _('%.1f MB')),
1226 (1, 1<<20, _('%.2f MB')),
1226 (1, 1<<20, _('%.2f MB')),
1227 (100, 1<<10, _('%.0f KB')),
1227 (100, 1<<10, _('%.0f KB')),
1228 (10, 1<<10, _('%.1f KB')),
1228 (10, 1<<10, _('%.1f KB')),
1229 (1, 1<<10, _('%.2f KB')),
1229 (1, 1<<10, _('%.2f KB')),
1230 (1, 1, _('%.0f bytes')),
1230 (1, 1, _('%.0f bytes')),
1231 )
1231 )
1232
1232
1233 for multiplier, divisor, format in units:
1233 for multiplier, divisor, format in units:
1234 if nbytes >= divisor * multiplier:
1234 if nbytes >= divisor * multiplier:
1235 return format % (nbytes / float(divisor))
1235 return format % (nbytes / float(divisor))
1236 return units[-1][2] % nbytes
1236 return units[-1][2] % nbytes
1237
1237
1238 def drop_scheme(scheme, path):
1238 def drop_scheme(scheme, path):
1239 sc = scheme + ':'
1239 sc = scheme + ':'
1240 if path.startswith(sc):
1240 if path.startswith(sc):
1241 path = path[len(sc):]
1241 path = path[len(sc):]
1242 if path.startswith('//'):
1242 if path.startswith('//'):
1243 path = path[2:]
1243 path = path[2:]
1244 return path
1244 return path
1245
1245
1246 def uirepr(s):
1246 def uirepr(s):
1247 # Avoid double backslash in Windows path repr()
1247 # Avoid double backslash in Windows path repr()
1248 return repr(s).replace('\\\\', '\\')
1248 return repr(s).replace('\\\\', '\\')
1249
1249
1250 def termwidth():
1250 def termwidth():
1251 if 'COLUMNS' in os.environ:
1251 if 'COLUMNS' in os.environ:
1252 try:
1252 try:
1253 return int(os.environ['COLUMNS'])
1253 return int(os.environ['COLUMNS'])
1254 except ValueError:
1254 except ValueError:
1255 pass
1255 pass
1256 try:
1256 try:
1257 import termios, array, fcntl
1257 import termios, array, fcntl
1258 for dev in (sys.stdout, sys.stdin):
1258 for dev in (sys.stdout, sys.stdin):
1259 try:
1259 try:
1260 try:
1260 try:
1261 fd = dev.fileno()
1261 fd = dev.fileno()
1262 except AttributeError:
1262 except AttributeError:
1263 continue
1263 continue
1264 if not os.isatty(fd):
1264 if not os.isatty(fd):
1265 continue
1265 continue
1266 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1266 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1267 return array.array('h', arri)[1]
1267 return array.array('h', arri)[1]
1268 except ValueError:
1268 except ValueError:
1269 pass
1269 pass
1270 except ImportError:
1270 except ImportError:
1271 pass
1271 pass
1272 return 80
1272 return 80
1273
1273
1274 def wrap(line, hangindent, width=None):
1274 def wrap(line, hangindent, width=None):
1275 if width is None:
1275 if width is None:
1276 width = termwidth() - 2
1276 width = termwidth() - 2
1277 if width <= hangindent:
1277 if width <= hangindent:
1278 # adjust for weird terminal size
1278 # adjust for weird terminal size
1279 width = max(78, hangindent + 1)
1279 width = max(78, hangindent + 1)
1280 padding = '\n' + ' ' * hangindent
1280 padding = '\n' + ' ' * hangindent
1281 return padding.join(textwrap.wrap(line, width=width - hangindent))
1281 # To avoid corrupting multi-byte characters in line, we must wrap
1282 # a Unicode string instead of a bytestring.
1283 u = line.decode(encoding.encoding)
1284 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1285 return w.encode(encoding.encoding)
1282
1286
1283 def iterlines(iterator):
1287 def iterlines(iterator):
1284 for chunk in iterator:
1288 for chunk in iterator:
1285 for line in chunk.splitlines():
1289 for line in chunk.splitlines():
1286 yield line
1290 yield line
@@ -1,126 +1,143 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir a
3 mkdir a
4 cd a
4 cd a
5 hg init
5 hg init
6 echo foo > t1
6 echo foo > t1
7 hg add t1
7 hg add t1
8 hg commit -m "1" -d "1000000 0"
8 hg commit -m "1" -d "1000000 0"
9
9
10 cd ..
10 cd ..
11 hg clone a b
11 hg clone a b
12
12
13 cd a
13 cd a
14 echo foo > t2
14 echo foo > t2
15 hg add t2
15 hg add t2
16 hg commit -m "2" -d "1000000 0"
16 hg commit -m "2" -d "1000000 0"
17
17
18 cd ../b
18 cd ../b
19 echo foo > t3
19 echo foo > t3
20 hg add t3
20 hg add t3
21 hg commit -m "3" -d "1000000 0"
21 hg commit -m "3" -d "1000000 0"
22
22
23 hg push ../a
23 hg push ../a
24 hg pull ../a
24 hg pull ../a
25 hg push ../a
25 hg push ../a
26 hg merge
26 hg merge
27 hg commit -m "4" -d "1000000 0"
27 hg commit -m "4" -d "1000000 0"
28 hg push ../a
28 hg push ../a
29 cd ..
29 cd ..
30
30
31 hg init c
31 hg init c
32 cd c
32 cd c
33 for i in 0 1 2; do
33 for i in 0 1 2; do
34 echo $i >> foo
34 echo $i >> foo
35 hg ci -Am $i -d "1000000 0"
35 hg ci -Am $i -d "1000000 0"
36 done
36 done
37 cd ..
37 cd ..
38
38
39 hg clone c d
39 hg clone c d
40 cd d
40 cd d
41 for i in 0 1; do
41 for i in 0 1; do
42 hg co -C $i
42 hg co -C $i
43 echo d-$i >> foo
43 echo d-$i >> foo
44 hg ci -m d-$i -d "1000000 0"
44 hg ci -m d-$i -d "1000000 0"
45 done
45 done
46
46
47 HGMERGE=true hg merge 3
47 HGMERGE=true hg merge 3
48 hg ci -m c-d -d "1000000 0"
48 hg ci -m c-d -d "1000000 0"
49
49
50 hg push ../c; echo $?
50 hg push ../c; echo $?
51 hg push -r 2 ../c; echo $?
51 hg push -r 2 ../c; echo $?
52 hg push -r 3 ../c; echo $?
52 hg push -r 3 ../c; echo $?
53 hg push -r 3 -r 4 ../c; echo $?
53 hg push -r 3 -r 4 ../c; echo $?
54 hg push -f -r 3 -r 4 ../c; echo $?
54 hg push -f -r 3 -r 4 ../c; echo $?
55 hg push -r 5 ../c; echo $?
55 hg push -r 5 ../c; echo $?
56
56
57 # issue 450
57 # issue 450
58 hg init ../e
58 hg init ../e
59 hg push -r 0 ../e ; echo $?
59 hg push -r 0 ../e ; echo $?
60 hg push -r 1 ../e ; echo $?
60 hg push -r 1 ../e ; echo $?
61
61
62 cd ..
62 cd ..
63
63
64 # issue 736
64 # issue 736
65 echo % issue 736
65 echo % issue 736
66 hg init f
66 hg init f
67 cd f
67 cd f
68 hg -q branch a
68 hg -q branch a
69 echo 0 > foo
69 echo 0 > foo
70 hg -q ci -d "1000000 0" -Am 0
70 hg -q ci -d "1000000 0" -Am 0
71 echo 1 > foo
71 echo 1 > foo
72 hg -q ci -d "1000000 0" -m 1
72 hg -q ci -d "1000000 0" -m 1
73 hg -q up 0
73 hg -q up 0
74 echo 2 > foo
74 echo 2 > foo
75 hg -q ci -d "1000000 0" -m 2
75 hg -q ci -d "1000000 0" -m 2
76 hg -q up 0
76 hg -q up 0
77 hg -q branch b
77 hg -q branch b
78 echo 3 > foo
78 echo 3 > foo
79 hg -q ci -d "1000000 0" -m 3
79 hg -q ci -d "1000000 0" -m 3
80 cd ..
80 cd ..
81
81
82 hg -q clone f g
82 hg -q clone f g
83 cd g
83 cd g
84
84
85 echo % push on existing branch and new branch
85 echo % push on existing branch and new branch
86 hg -q up 1
86 hg -q up 1
87 echo 4 > foo
87 echo 4 > foo
88 hg -q ci -d "1000000 0" -m 4
88 hg -q ci -d "1000000 0" -m 4
89 hg -q up 0
89 hg -q up 0
90 echo 5 > foo
90 echo 5 > foo
91 hg -q branch c
91 hg -q branch c
92 hg -q ci -d "1000000 0" -m 5
92 hg -q ci -d "1000000 0" -m 5
93 hg push -r 4 -r 5 ../f; echo $?
93 hg push -r 4 -r 5 ../f; echo $?
94
94
95 echo % fail on multiple head push
95 echo % fail on multiple head push
96 hg -q up 1
96 hg -q up 1
97 echo 6 > foo
97 echo 6 > foo
98 hg -q ci -d "1000000 0" -m 6
98 hg -q ci -d "1000000 0" -m 6
99 hg push -r 4 -r 6 ../f; echo $?
99 hg push -r 4 -r 6 ../f; echo $?
100
100
101 echo % push replacement head on existing branches
101 echo % push replacement head on existing branches
102 hg -q up 3
102 hg -q up 3
103 echo 7 > foo
103 echo 7 > foo
104 hg -q ci -d "1000000 0" -m 7
104 hg -q ci -d "1000000 0" -m 7
105 hg push -r 6 -r 7 ../f; echo $?
105 hg push -r 6 -r 7 ../f; echo $?
106
106
107 echo % merge of branch a to other branch b followed by unrelated push on branch a
107 echo % merge of branch a to other branch b followed by unrelated push on branch a
108 hg -q up 6
108 hg -q up 6
109 HGMERGE=true hg -q merge 7
109 HGMERGE=true hg -q merge 7
110 hg -q ci -d "1000000 0" -m 8
110 hg -q ci -d "1000000 0" -m 8
111 hg -q up 7
111 hg -q up 7
112 echo 9 > foo
112 echo 9 > foo
113 hg -q ci -d "1000000 0" -m 9
113 hg -q ci -d "1000000 0" -m 9
114 hg push -r 8 ../f; echo $?
114 hg push -r 8 ../f; echo $?
115 hg push -r 9 ../f; echo $?
115 hg push -r 9 ../f; echo $?
116
116
117 echo % cheating the counting algorithm
117 echo % cheating the counting algorithm
118 hg -q up 8
118 hg -q up 8
119 HGMERGE=true hg -q merge 2
119 HGMERGE=true hg -q merge 2
120 hg -q ci -d "1000000 0" -m 10
120 hg -q ci -d "1000000 0" -m 10
121 hg -q up 1
121 hg -q up 1
122 echo 11 > foo
122 echo 11 > foo
123 hg -q ci -d "1000000 0" -m 11
123 hg -q ci -d "1000000 0" -m 11
124 hg push -r 10 -r 11 ../f; echo $?
124 hg push -r 10 -r 11 ../f; echo $?
125
125
126 echo % checking prepush logic does not allow silently pushing multiple new heads
127 cd ..
128 hg init g
129 echo init > g/init
130 hg -R g ci -Am init
131 echo a > g/a
132 hg -R g ci -Am a
133 hg clone g h
134 hg -R g up 0
135 echo b > g/b
136 hg -R g ci -Am b
137 hg -R h up 0
138 echo c > h/c
139 hg -R h ci -Am c
140 hg -R h push g
141 echo
142
126 exit 0
143 exit 0
@@ -1,126 +1,143 b''
1 updating working directory
1 updating working directory
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 pushing to ../a
3 pushing to ../a
4 searching for changes
4 searching for changes
5 abort: push creates new remote heads!
5 abort: push creates new remote heads!
6 (did you forget to merge? use push -f to force)
6 (did you forget to merge? use push -f to force)
7 pulling from ../a
7 pulling from ../a
8 searching for changes
8 searching for changes
9 adding changesets
9 adding changesets
10 adding manifests
10 adding manifests
11 adding file changes
11 adding file changes
12 added 1 changesets with 1 changes to 1 files (+1 heads)
12 added 1 changesets with 1 changes to 1 files (+1 heads)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
14 pushing to ../a
14 pushing to ../a
15 searching for changes
15 searching for changes
16 abort: push creates new remote heads!
16 abort: push creates new remote heads!
17 (did you forget to merge? use push -f to force)
17 (did you forget to merge? use push -f to force)
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 (branch merge, don't forget to commit)
19 (branch merge, don't forget to commit)
20 pushing to ../a
20 pushing to ../a
21 searching for changes
21 searching for changes
22 adding changesets
22 adding changesets
23 adding manifests
23 adding manifests
24 adding file changes
24 adding file changes
25 added 2 changesets with 1 changes to 1 files
25 added 2 changesets with 1 changes to 1 files
26 adding foo
26 adding foo
27 updating working directory
27 updating working directory
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 created new head
30 created new head
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 created new head
32 created new head
33 merging foo
33 merging foo
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
35 (branch merge, don't forget to commit)
35 (branch merge, don't forget to commit)
36 pushing to ../c
36 pushing to ../c
37 searching for changes
37 searching for changes
38 abort: push creates new remote heads!
38 abort: push creates new remote heads!
39 (did you forget to merge? use push -f to force)
39 (did you forget to merge? use push -f to force)
40 1
40 1
41 pushing to ../c
41 pushing to ../c
42 searching for changes
42 searching for changes
43 no changes found
43 no changes found
44 0
44 0
45 pushing to ../c
45 pushing to ../c
46 searching for changes
46 searching for changes
47 abort: push creates new remote heads!
47 abort: push creates new remote heads!
48 (did you forget to merge? use push -f to force)
48 (did you forget to merge? use push -f to force)
49 1
49 1
50 pushing to ../c
50 pushing to ../c
51 searching for changes
51 searching for changes
52 abort: push creates new remote heads!
52 abort: push creates new remote heads!
53 (did you forget to merge? use push -f to force)
53 (did you forget to merge? use push -f to force)
54 1
54 1
55 pushing to ../c
55 pushing to ../c
56 searching for changes
56 searching for changes
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 2 changesets with 2 changes to 1 files (+2 heads)
60 added 2 changesets with 2 changes to 1 files (+2 heads)
61 0
61 0
62 pushing to ../c
62 pushing to ../c
63 searching for changes
63 searching for changes
64 adding changesets
64 adding changesets
65 adding manifests
65 adding manifests
66 adding file changes
66 adding file changes
67 added 1 changesets with 1 changes to 1 files (-1 heads)
67 added 1 changesets with 1 changes to 1 files (-1 heads)
68 0
68 0
69 pushing to ../e
69 pushing to ../e
70 searching for changes
70 searching for changes
71 adding changesets
71 adding changesets
72 adding manifests
72 adding manifests
73 adding file changes
73 adding file changes
74 added 1 changesets with 1 changes to 1 files
74 added 1 changesets with 1 changes to 1 files
75 0
75 0
76 pushing to ../e
76 pushing to ../e
77 searching for changes
77 searching for changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 1 changesets with 1 changes to 1 files
81 added 1 changesets with 1 changes to 1 files
82 0
82 0
83 % issue 736
83 % issue 736
84 % push on existing branch and new branch
84 % push on existing branch and new branch
85 pushing to ../f
85 pushing to ../f
86 searching for changes
86 searching for changes
87 abort: push creates new remote branch 'c'!
87 abort: push creates new remote branch 'c'!
88 (did you forget to merge? use push -f to force)
88 (did you forget to merge? use push -f to force)
89 1
89 1
90 % fail on multiple head push
90 % fail on multiple head push
91 pushing to ../f
91 pushing to ../f
92 searching for changes
92 searching for changes
93 abort: push creates new remote heads!
93 abort: push creates new remote heads!
94 (did you forget to merge? use push -f to force)
94 (did you forget to merge? use push -f to force)
95 1
95 1
96 % push replacement head on existing branches
96 % push replacement head on existing branches
97 pushing to ../f
97 pushing to ../f
98 searching for changes
98 searching for changes
99 adding changesets
99 adding changesets
100 adding manifests
100 adding manifests
101 adding file changes
101 adding file changes
102 added 2 changesets with 2 changes to 1 files
102 added 2 changesets with 2 changes to 1 files
103 0
103 0
104 % merge of branch a to other branch b followed by unrelated push on branch a
104 % merge of branch a to other branch b followed by unrelated push on branch a
105 pushing to ../f
105 pushing to ../f
106 searching for changes
106 searching for changes
107 adding changesets
107 adding changesets
108 adding manifests
108 adding manifests
109 adding file changes
109 adding file changes
110 added 1 changesets with 1 changes to 1 files (-1 heads)
110 added 1 changesets with 1 changes to 1 files (-1 heads)
111 0
111 0
112 pushing to ../f
112 pushing to ../f
113 searching for changes
113 searching for changes
114 adding changesets
114 adding changesets
115 adding manifests
115 adding manifests
116 adding file changes
116 adding file changes
117 added 1 changesets with 1 changes to 1 files (+1 heads)
117 added 1 changesets with 1 changes to 1 files (+1 heads)
118 0
118 0
119 % cheating the counting algorithm
119 % cheating the counting algorithm
120 pushing to ../f
120 pushing to ../f
121 searching for changes
121 searching for changes
122 adding changesets
122 adding changesets
123 adding manifests
123 adding manifests
124 adding file changes
124 adding file changes
125 added 2 changesets with 2 changes to 1 files
125 added 2 changesets with 2 changes to 1 files
126 0
126 0
127 % checking prepush logic does not allow silently pushing multiple new heads
128 abort: repository g already exists!
129 adding init
130 adding a
131 updating working directory
132 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
134 adding b
135 created new head
136 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
137 adding c
138 created new head
139 pushing to g
140 searching for changes
141 abort: push creates new remote heads!
142 (did you forget to merge? use push -f to force)
143
General Comments 0
You need to be logged in to leave comments. Login now