##// END OF EJS Templates
tags: reverse and simplify head-walking
Matt Mackall -
r8852:a81652fc default
parent child Browse files
Show More
@@ -1,2178 +1,2177 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 self.tagscache = None
92 self.tagscache = None
93 self._tagstypecache = None
93 self._tagstypecache = None
94 self.branchcache = None
94 self.branchcache = None
95 self._ubranchcache = None # UTF-8 version of branchcache
95 self._ubranchcache = None # UTF-8 version of branchcache
96 self._branchcachetip = None
96 self._branchcachetip = None
97 self.nodetagscache = None
97 self.nodetagscache = None
98 self.filterpats = {}
98 self.filterpats = {}
99 self._datafilters = {}
99 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
101
101
102 @propertycache
102 @propertycache
103 def changelog(self):
103 def changelog(self):
104 c = changelog.changelog(self.sopener)
104 c = changelog.changelog(self.sopener)
105 if 'HG_PENDING' in os.environ:
105 if 'HG_PENDING' in os.environ:
106 p = os.environ['HG_PENDING']
106 p = os.environ['HG_PENDING']
107 if p.startswith(self.root):
107 if p.startswith(self.root):
108 c.readpending('00changelog.i.a')
108 c.readpending('00changelog.i.a')
109 self.sopener.defversion = c.version
109 self.sopener.defversion = c.version
110 return c
110 return c
111
111
112 @propertycache
112 @propertycache
113 def manifest(self):
113 def manifest(self):
114 return manifest.manifest(self.sopener)
114 return manifest.manifest(self.sopener)
115
115
116 @propertycache
116 @propertycache
117 def dirstate(self):
117 def dirstate(self):
118 return dirstate.dirstate(self.opener, self.ui, self.root)
118 return dirstate.dirstate(self.opener, self.ui, self.root)
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid is None:
121 if changeid is None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, extra={}):
143 def _tag(self, names, node, message, local, user, date, extra={}):
144 if isinstance(names, str):
144 if isinstance(names, str):
145 allchars = names
145 allchars = names
146 names = (names,)
146 names = (names,)
147 else:
147 else:
148 allchars = ''.join(names)
148 allchars = ''.join(names)
149 for c in self.tag_disallowed:
149 for c in self.tag_disallowed:
150 if c in allchars:
150 if c in allchars:
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152
152
153 for name in names:
153 for name in names:
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 local=local)
155 local=local)
156
156
157 def writetags(fp, names, munge, prevtags):
157 def writetags(fp, names, munge, prevtags):
158 fp.seek(0, 2)
158 fp.seek(0, 2)
159 if prevtags and prevtags[-1] != '\n':
159 if prevtags and prevtags[-1] != '\n':
160 fp.write('\n')
160 fp.write('\n')
161 for name in names:
161 for name in names:
162 m = munge and munge(name) or name
162 m = munge and munge(name) or name
163 if self._tagstypecache and name in self._tagstypecache:
163 if self._tagstypecache and name in self._tagstypecache:
164 old = self.tagscache.get(name, nullid)
164 old = self.tagscache.get(name, nullid)
165 fp.write('%s %s\n' % (hex(old), m))
165 fp.write('%s %s\n' % (hex(old), m))
166 fp.write('%s %s\n' % (hex(node), m))
166 fp.write('%s %s\n' % (hex(node), m))
167 fp.close()
167 fp.close()
168
168
169 prevtags = ''
169 prevtags = ''
170 if local:
170 if local:
171 try:
171 try:
172 fp = self.opener('localtags', 'r+')
172 fp = self.opener('localtags', 'r+')
173 except IOError:
173 except IOError:
174 fp = self.opener('localtags', 'a')
174 fp = self.opener('localtags', 'a')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177
177
178 # local tags are stored in the current charset
178 # local tags are stored in the current charset
179 writetags(fp, names, None, prevtags)
179 writetags(fp, names, None, prevtags)
180 for name in names:
180 for name in names:
181 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
182 return
182 return
183
183
184 try:
184 try:
185 fp = self.wfile('.hgtags', 'rb+')
185 fp = self.wfile('.hgtags', 'rb+')
186 except IOError:
186 except IOError:
187 fp = self.wfile('.hgtags', 'ab')
187 fp = self.wfile('.hgtags', 'ab')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # committed tags are stored in UTF-8
191 # committed tags are stored in UTF-8
192 writetags(fp, names, encoding.fromlocal, prevtags)
192 writetags(fp, names, encoding.fromlocal, prevtags)
193
193
194 if '.hgtags' not in self.dirstate:
194 if '.hgtags' not in self.dirstate:
195 self.add(['.hgtags'])
195 self.add(['.hgtags'])
196
196
197 m = match_.exact(self.root, '', ['.hgtags'])
197 m = match_.exact(self.root, '', ['.hgtags'])
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199
199
200 for name in names:
200 for name in names:
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202
202
203 return tagnode
203 return tagnode
204
204
205 def tag(self, names, node, message, local, user, date):
205 def tag(self, names, node, message, local, user, date):
206 '''tag a revision with one or more symbolic names.
206 '''tag a revision with one or more symbolic names.
207
207
208 names is a list of strings or, when adding a single tag, names may be a
208 names is a list of strings or, when adding a single tag, names may be a
209 string.
209 string.
210
210
211 if local is True, the tags are stored in a per-repository file.
211 if local is True, the tags are stored in a per-repository file.
212 otherwise, they are stored in the .hgtags file, and a new
212 otherwise, they are stored in the .hgtags file, and a new
213 changeset is committed with the change.
213 changeset is committed with the change.
214
214
215 keyword arguments:
215 keyword arguments:
216
216
217 local: whether to store tags in non-version-controlled file
217 local: whether to store tags in non-version-controlled file
218 (default False)
218 (default False)
219
219
220 message: commit message to use if committing
220 message: commit message to use if committing
221
221
222 user: name of user to use if committing
222 user: name of user to use if committing
223
223
224 date: date tuple to use if committing'''
224 date: date tuple to use if committing'''
225
225
226 for x in self.status()[:5]:
226 for x in self.status()[:5]:
227 if '.hgtags' in x:
227 if '.hgtags' in x:
228 raise util.Abort(_('working copy of .hgtags is changed '
228 raise util.Abort(_('working copy of .hgtags is changed '
229 '(please commit .hgtags manually)'))
229 '(please commit .hgtags manually)'))
230
230
231 self.tags() # instantiate the cache
231 self.tags() # instantiate the cache
232 self._tag(names, node, message, local, user, date)
232 self._tag(names, node, message, local, user, date)
233
233
234 def tags(self):
234 def tags(self):
235 '''return a mapping of tag to node'''
235 '''return a mapping of tag to node'''
236 if self.tagscache:
236 if self.tagscache:
237 return self.tagscache
237 return self.tagscache
238
238
239 globaltags = {}
239 globaltags = {}
240 tagtypes = {}
240 tagtypes = {}
241
241
242 def readtags(lines, fn, tagtype):
242 def readtags(lines, fn, tagtype):
243 filetags = {}
243 filetags = {}
244 count = 0
244 count = 0
245
245
246 def warn(msg):
246 def warn(msg):
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248
248
249 for l in lines:
249 for l in lines:
250 count += 1
250 count += 1
251 if not l:
251 if not l:
252 continue
252 continue
253 s = l.split(" ", 1)
253 s = l.split(" ", 1)
254 if len(s) != 2:
254 if len(s) != 2:
255 warn(_("cannot parse entry"))
255 warn(_("cannot parse entry"))
256 continue
256 continue
257 node, key = s
257 node, key = s
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 try:
259 try:
260 bin_n = bin(node)
260 bin_n = bin(node)
261 except TypeError:
261 except TypeError:
262 warn(_("node '%s' is not well formed") % node)
262 warn(_("node '%s' is not well formed") % node)
263 continue
263 continue
264 if bin_n not in self.changelog.nodemap:
264 if bin_n not in self.changelog.nodemap:
265 warn(_("tag '%s' refers to unknown node") % key)
265 warn(_("tag '%s' refers to unknown node") % key)
266 continue
266 continue
267
267
268 h = []
268 h = []
269 if key in filetags:
269 if key in filetags:
270 n, h = filetags[key]
270 n, h = filetags[key]
271 h.append(n)
271 h.append(n)
272 filetags[key] = (bin_n, h)
272 filetags[key] = (bin_n, h)
273
273
274 for k, nh in filetags.iteritems():
274 for k, nh in filetags.iteritems():
275 if k not in globaltags:
275 if k not in globaltags:
276 globaltags[k] = nh
276 globaltags[k] = nh
277 tagtypes[k] = tagtype
277 tagtypes[k] = tagtype
278 continue
278 continue
279
279
280 # we prefer the global tag if:
280 # we prefer the global tag if:
281 # it supercedes us OR
281 # it supercedes us OR
282 # mutual supercedes and it has a higher rank
282 # mutual supercedes and it has a higher rank
283 # otherwise we win because we're tip-most
283 # otherwise we win because we're tip-most
284 an, ah = nh
284 an, ah = nh
285 bn, bh = globaltags[k]
285 bn, bh = globaltags[k]
286 if (bn != an and an in bh and
286 if (bn != an and an in bh and
287 (bn not in ah or len(bh) > len(ah))):
287 (bn not in ah or len(bh) > len(ah))):
288 an = bn
288 an = bn
289 ah.extend([n for n in bh if n not in ah])
289 ah.extend([n for n in bh if n not in ah])
290 globaltags[k] = an, ah
290 globaltags[k] = an, ah
291 tagtypes[k] = tagtype
291 tagtypes[k] = tagtype
292
292
293 def tagnodes():
293 def tagnodes():
294 last = {}
294 seen = set()
295 ret = []
295 ret = []
296 for node in reversed(self.heads()):
296 for node in self.heads():
297 c = self[node]
297 c = self[node]
298 try:
298 try:
299 fnode = c.filenode('.hgtags')
299 fnode = c.filenode('.hgtags')
300 except error.LookupError:
300 except error.LookupError:
301 continue
301 continue
302 ret.append((node, fnode))
302 if fnode not in seen:
303 if fnode in last:
303 ret.append((node, fnode))
304 ret[last[fnode]] = None
304 seen.add(fnode)
305 last[fnode] = len(ret) - 1
305 return reversed(ret)
306 return [item for item in ret if item]
307
306
308 # read the tags file from each head, ending with the tip
307 # read the tags file from each head, ending with the tip
309 f = None
308 f = None
310 for node, fnode in tagnodes():
309 for node, fnode in tagnodes():
311 f = (f and f.filectx(fnode) or
310 f = (f and f.filectx(fnode) or
312 self.filectx('.hgtags', fileid=fnode))
311 self.filectx('.hgtags', fileid=fnode))
313 readtags(f.data().splitlines(), f, "global")
312 readtags(f.data().splitlines(), f, "global")
314
313
315 try:
314 try:
316 data = encoding.fromlocal(self.opener("localtags").read())
315 data = encoding.fromlocal(self.opener("localtags").read())
317 # localtags are stored in the local character set
316 # localtags are stored in the local character set
318 # while the internal tag table is stored in UTF-8
317 # while the internal tag table is stored in UTF-8
319 readtags(data.splitlines(), "localtags", "local")
318 readtags(data.splitlines(), "localtags", "local")
320 except IOError:
319 except IOError:
321 pass
320 pass
322
321
323 self.tagscache = {}
322 self.tagscache = {}
324 self._tagstypecache = {}
323 self._tagstypecache = {}
325 for k, nh in globaltags.iteritems():
324 for k, nh in globaltags.iteritems():
326 n = nh[0]
325 n = nh[0]
327 if n != nullid:
326 if n != nullid:
328 self.tagscache[k] = n
327 self.tagscache[k] = n
329 self._tagstypecache[k] = tagtypes[k]
328 self._tagstypecache[k] = tagtypes[k]
330 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
331 return self.tagscache
330 return self.tagscache
332
331
333 def tagtype(self, tagname):
332 def tagtype(self, tagname):
334 '''
333 '''
335 return the type of the given tag. result can be:
334 return the type of the given tag. result can be:
336
335
337 'local' : a local tag
336 'local' : a local tag
338 'global' : a global tag
337 'global' : a global tag
339 None : tag does not exist
338 None : tag does not exist
340 '''
339 '''
341
340
342 self.tags()
341 self.tags()
343
342
344 return self._tagstypecache.get(tagname)
343 return self._tagstypecache.get(tagname)
345
344
346 def tagslist(self):
345 def tagslist(self):
347 '''return a list of tags ordered by revision'''
346 '''return a list of tags ordered by revision'''
348 l = []
347 l = []
349 for t, n in self.tags().iteritems():
348 for t, n in self.tags().iteritems():
350 try:
349 try:
351 r = self.changelog.rev(n)
350 r = self.changelog.rev(n)
352 except:
351 except:
353 r = -2 # sort to the beginning of the list if unknown
352 r = -2 # sort to the beginning of the list if unknown
354 l.append((r, t, n))
353 l.append((r, t, n))
355 return [(t, n) for r, t, n in sorted(l)]
354 return [(t, n) for r, t, n in sorted(l)]
356
355
357 def nodetags(self, node):
356 def nodetags(self, node):
358 '''return the tags associated with a node'''
357 '''return the tags associated with a node'''
359 if not self.nodetagscache:
358 if not self.nodetagscache:
360 self.nodetagscache = {}
359 self.nodetagscache = {}
361 for t, n in self.tags().iteritems():
360 for t, n in self.tags().iteritems():
362 self.nodetagscache.setdefault(n, []).append(t)
361 self.nodetagscache.setdefault(n, []).append(t)
363 return self.nodetagscache.get(node, [])
362 return self.nodetagscache.get(node, [])
364
363
365 def _branchtags(self, partial, lrev):
364 def _branchtags(self, partial, lrev):
366 # TODO: rename this function?
365 # TODO: rename this function?
367 tiprev = len(self) - 1
366 tiprev = len(self) - 1
368 if lrev != tiprev:
367 if lrev != tiprev:
369 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 self._updatebranchcache(partial, lrev+1, tiprev+1)
370 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369 self._writebranchcache(partial, self.changelog.tip(), tiprev)
371
370
372 return partial
371 return partial
373
372
374 def branchmap(self):
373 def branchmap(self):
375 tip = self.changelog.tip()
374 tip = self.changelog.tip()
376 if self.branchcache is not None and self._branchcachetip == tip:
375 if self.branchcache is not None and self._branchcachetip == tip:
377 return self.branchcache
376 return self.branchcache
378
377
379 oldtip = self._branchcachetip
378 oldtip = self._branchcachetip
380 self._branchcachetip = tip
379 self._branchcachetip = tip
381 if self.branchcache is None:
380 if self.branchcache is None:
382 self.branchcache = {} # avoid recursion in changectx
381 self.branchcache = {} # avoid recursion in changectx
383 else:
382 else:
384 self.branchcache.clear() # keep using the same dict
383 self.branchcache.clear() # keep using the same dict
385 if oldtip is None or oldtip not in self.changelog.nodemap:
384 if oldtip is None or oldtip not in self.changelog.nodemap:
386 partial, last, lrev = self._readbranchcache()
385 partial, last, lrev = self._readbranchcache()
387 else:
386 else:
388 lrev = self.changelog.rev(oldtip)
387 lrev = self.changelog.rev(oldtip)
389 partial = self._ubranchcache
388 partial = self._ubranchcache
390
389
391 self._branchtags(partial, lrev)
390 self._branchtags(partial, lrev)
392 # this private cache holds all heads (not just tips)
391 # this private cache holds all heads (not just tips)
393 self._ubranchcache = partial
392 self._ubranchcache = partial
394
393
395 # the branch cache is stored on disk as UTF-8, but in the local
394 # the branch cache is stored on disk as UTF-8, but in the local
396 # charset internally
395 # charset internally
397 for k, v in partial.iteritems():
396 for k, v in partial.iteritems():
398 self.branchcache[encoding.tolocal(k)] = v
397 self.branchcache[encoding.tolocal(k)] = v
399 return self.branchcache
398 return self.branchcache
400
399
401
400
402 def branchtags(self):
401 def branchtags(self):
403 '''return a dict where branch names map to the tipmost head of
402 '''return a dict where branch names map to the tipmost head of
404 the branch, open heads come before closed'''
403 the branch, open heads come before closed'''
405 bt = {}
404 bt = {}
406 for bn, heads in self.branchmap().iteritems():
405 for bn, heads in self.branchmap().iteritems():
407 head = None
406 head = None
408 for i in range(len(heads)-1, -1, -1):
407 for i in range(len(heads)-1, -1, -1):
409 h = heads[i]
408 h = heads[i]
410 if 'close' not in self.changelog.read(h)[5]:
409 if 'close' not in self.changelog.read(h)[5]:
411 head = h
410 head = h
412 break
411 break
413 # no open heads were found
412 # no open heads were found
414 if head is None:
413 if head is None:
415 head = heads[-1]
414 head = heads[-1]
416 bt[bn] = head
415 bt[bn] = head
417 return bt
416 return bt
418
417
419
418
420 def _readbranchcache(self):
419 def _readbranchcache(self):
421 partial = {}
420 partial = {}
422 try:
421 try:
423 f = self.opener("branchheads.cache")
422 f = self.opener("branchheads.cache")
424 lines = f.read().split('\n')
423 lines = f.read().split('\n')
425 f.close()
424 f.close()
426 except (IOError, OSError):
425 except (IOError, OSError):
427 return {}, nullid, nullrev
426 return {}, nullid, nullrev
428
427
429 try:
428 try:
430 last, lrev = lines.pop(0).split(" ", 1)
429 last, lrev = lines.pop(0).split(" ", 1)
431 last, lrev = bin(last), int(lrev)
430 last, lrev = bin(last), int(lrev)
432 if lrev >= len(self) or self[lrev].node() != last:
431 if lrev >= len(self) or self[lrev].node() != last:
433 # invalidate the cache
432 # invalidate the cache
434 raise ValueError('invalidating branch cache (tip differs)')
433 raise ValueError('invalidating branch cache (tip differs)')
435 for l in lines:
434 for l in lines:
436 if not l: continue
435 if not l: continue
437 node, label = l.split(" ", 1)
436 node, label = l.split(" ", 1)
438 partial.setdefault(label.strip(), []).append(bin(node))
437 partial.setdefault(label.strip(), []).append(bin(node))
439 except KeyboardInterrupt:
438 except KeyboardInterrupt:
440 raise
439 raise
441 except Exception, inst:
440 except Exception, inst:
442 if self.ui.debugflag:
441 if self.ui.debugflag:
443 self.ui.warn(str(inst), '\n')
442 self.ui.warn(str(inst), '\n')
444 partial, last, lrev = {}, nullid, nullrev
443 partial, last, lrev = {}, nullid, nullrev
445 return partial, last, lrev
444 return partial, last, lrev
446
445
447 def _writebranchcache(self, branches, tip, tiprev):
446 def _writebranchcache(self, branches, tip, tiprev):
448 try:
447 try:
449 f = self.opener("branchheads.cache", "w", atomictemp=True)
448 f = self.opener("branchheads.cache", "w", atomictemp=True)
450 f.write("%s %s\n" % (hex(tip), tiprev))
449 f.write("%s %s\n" % (hex(tip), tiprev))
451 for label, nodes in branches.iteritems():
450 for label, nodes in branches.iteritems():
452 for node in nodes:
451 for node in nodes:
453 f.write("%s %s\n" % (hex(node), label))
452 f.write("%s %s\n" % (hex(node), label))
454 f.rename()
453 f.rename()
455 except (IOError, OSError):
454 except (IOError, OSError):
456 pass
455 pass
457
456
458 def _updatebranchcache(self, partial, start, end):
457 def _updatebranchcache(self, partial, start, end):
459 for r in xrange(start, end):
458 for r in xrange(start, end):
460 c = self[r]
459 c = self[r]
461 b = c.branch()
460 b = c.branch()
462 bheads = partial.setdefault(b, [])
461 bheads = partial.setdefault(b, [])
463 bheads.append(c.node())
462 bheads.append(c.node())
464 for p in c.parents():
463 for p in c.parents():
465 pn = p.node()
464 pn = p.node()
466 if pn in bheads:
465 if pn in bheads:
467 bheads.remove(pn)
466 bheads.remove(pn)
468
467
469 def lookup(self, key):
468 def lookup(self, key):
470 if isinstance(key, int):
469 if isinstance(key, int):
471 return self.changelog.node(key)
470 return self.changelog.node(key)
472 elif key == '.':
471 elif key == '.':
473 return self.dirstate.parents()[0]
472 return self.dirstate.parents()[0]
474 elif key == 'null':
473 elif key == 'null':
475 return nullid
474 return nullid
476 elif key == 'tip':
475 elif key == 'tip':
477 return self.changelog.tip()
476 return self.changelog.tip()
478 n = self.changelog._match(key)
477 n = self.changelog._match(key)
479 if n:
478 if n:
480 return n
479 return n
481 if key in self.tags():
480 if key in self.tags():
482 return self.tags()[key]
481 return self.tags()[key]
483 if key in self.branchtags():
482 if key in self.branchtags():
484 return self.branchtags()[key]
483 return self.branchtags()[key]
485 n = self.changelog._partialmatch(key)
484 n = self.changelog._partialmatch(key)
486 if n:
485 if n:
487 return n
486 return n
488
487
489 # can't find key, check if it might have come from damaged dirstate
488 # can't find key, check if it might have come from damaged dirstate
490 if key in self.dirstate.parents():
489 if key in self.dirstate.parents():
491 raise error.Abort(_("working directory has unknown parent '%s'!")
490 raise error.Abort(_("working directory has unknown parent '%s'!")
492 % short(key))
491 % short(key))
493 try:
492 try:
494 if len(key) == 20:
493 if len(key) == 20:
495 key = hex(key)
494 key = hex(key)
496 except:
495 except:
497 pass
496 pass
498 raise error.RepoError(_("unknown revision '%s'") % key)
497 raise error.RepoError(_("unknown revision '%s'") % key)
499
498
500 def local(self):
499 def local(self):
501 return True
500 return True
502
501
503 def join(self, f):
502 def join(self, f):
504 return os.path.join(self.path, f)
503 return os.path.join(self.path, f)
505
504
506 def wjoin(self, f):
505 def wjoin(self, f):
507 return os.path.join(self.root, f)
506 return os.path.join(self.root, f)
508
507
509 def rjoin(self, f):
508 def rjoin(self, f):
510 return os.path.join(self.root, util.pconvert(f))
509 return os.path.join(self.root, util.pconvert(f))
511
510
512 def file(self, f):
511 def file(self, f):
513 if f[0] == '/':
512 if f[0] == '/':
514 f = f[1:]
513 f = f[1:]
515 return filelog.filelog(self.sopener, f)
514 return filelog.filelog(self.sopener, f)
516
515
517 def changectx(self, changeid):
516 def changectx(self, changeid):
518 return self[changeid]
517 return self[changeid]
519
518
520 def parents(self, changeid=None):
519 def parents(self, changeid=None):
521 '''get list of changectxs for parents of changeid'''
520 '''get list of changectxs for parents of changeid'''
522 return self[changeid].parents()
521 return self[changeid].parents()
523
522
524 def filectx(self, path, changeid=None, fileid=None):
523 def filectx(self, path, changeid=None, fileid=None):
525 """changeid can be a changeset revision, node, or tag.
524 """changeid can be a changeset revision, node, or tag.
526 fileid can be a file revision or node."""
525 fileid can be a file revision or node."""
527 return context.filectx(self, path, changeid, fileid)
526 return context.filectx(self, path, changeid, fileid)
528
527
529 def getcwd(self):
528 def getcwd(self):
530 return self.dirstate.getcwd()
529 return self.dirstate.getcwd()
531
530
532 def pathto(self, f, cwd=None):
531 def pathto(self, f, cwd=None):
533 return self.dirstate.pathto(f, cwd)
532 return self.dirstate.pathto(f, cwd)
534
533
535 def wfile(self, f, mode='r'):
534 def wfile(self, f, mode='r'):
536 return self.wopener(f, mode)
535 return self.wopener(f, mode)
537
536
538 def _link(self, f):
537 def _link(self, f):
539 return os.path.islink(self.wjoin(f))
538 return os.path.islink(self.wjoin(f))
540
539
541 def _filter(self, filter, filename, data):
540 def _filter(self, filter, filename, data):
542 if filter not in self.filterpats:
541 if filter not in self.filterpats:
543 l = []
542 l = []
544 for pat, cmd in self.ui.configitems(filter):
543 for pat, cmd in self.ui.configitems(filter):
545 if cmd == '!':
544 if cmd == '!':
546 continue
545 continue
547 mf = match_.match(self.root, '', [pat])
546 mf = match_.match(self.root, '', [pat])
548 fn = None
547 fn = None
549 params = cmd
548 params = cmd
550 for name, filterfn in self._datafilters.iteritems():
549 for name, filterfn in self._datafilters.iteritems():
551 if cmd.startswith(name):
550 if cmd.startswith(name):
552 fn = filterfn
551 fn = filterfn
553 params = cmd[len(name):].lstrip()
552 params = cmd[len(name):].lstrip()
554 break
553 break
555 if not fn:
554 if not fn:
556 fn = lambda s, c, **kwargs: util.filter(s, c)
555 fn = lambda s, c, **kwargs: util.filter(s, c)
557 # Wrap old filters not supporting keyword arguments
556 # Wrap old filters not supporting keyword arguments
558 if not inspect.getargspec(fn)[2]:
557 if not inspect.getargspec(fn)[2]:
559 oldfn = fn
558 oldfn = fn
560 fn = lambda s, c, **kwargs: oldfn(s, c)
559 fn = lambda s, c, **kwargs: oldfn(s, c)
561 l.append((mf, fn, params))
560 l.append((mf, fn, params))
562 self.filterpats[filter] = l
561 self.filterpats[filter] = l
563
562
564 for mf, fn, cmd in self.filterpats[filter]:
563 for mf, fn, cmd in self.filterpats[filter]:
565 if mf(filename):
564 if mf(filename):
566 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
565 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
567 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
566 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
568 break
567 break
569
568
570 return data
569 return data
571
570
572 def adddatafilter(self, name, filter):
571 def adddatafilter(self, name, filter):
573 self._datafilters[name] = filter
572 self._datafilters[name] = filter
574
573
575 def wread(self, filename):
574 def wread(self, filename):
576 if self._link(filename):
575 if self._link(filename):
577 data = os.readlink(self.wjoin(filename))
576 data = os.readlink(self.wjoin(filename))
578 else:
577 else:
579 data = self.wopener(filename, 'r').read()
578 data = self.wopener(filename, 'r').read()
580 return self._filter("encode", filename, data)
579 return self._filter("encode", filename, data)
581
580
582 def wwrite(self, filename, data, flags):
581 def wwrite(self, filename, data, flags):
583 data = self._filter("decode", filename, data)
582 data = self._filter("decode", filename, data)
584 try:
583 try:
585 os.unlink(self.wjoin(filename))
584 os.unlink(self.wjoin(filename))
586 except OSError:
585 except OSError:
587 pass
586 pass
588 if 'l' in flags:
587 if 'l' in flags:
589 self.wopener.symlink(data, filename)
588 self.wopener.symlink(data, filename)
590 else:
589 else:
591 self.wopener(filename, 'w').write(data)
590 self.wopener(filename, 'w').write(data)
592 if 'x' in flags:
591 if 'x' in flags:
593 util.set_flags(self.wjoin(filename), False, True)
592 util.set_flags(self.wjoin(filename), False, True)
594
593
595 def wwritedata(self, filename, data):
594 def wwritedata(self, filename, data):
596 return self._filter("decode", filename, data)
595 return self._filter("decode", filename, data)
597
596
598 def transaction(self):
597 def transaction(self):
599 tr = self._transref and self._transref() or None
598 tr = self._transref and self._transref() or None
600 if tr and tr.running():
599 if tr and tr.running():
601 return tr.nest()
600 return tr.nest()
602
601
603 # abort here if the journal already exists
602 # abort here if the journal already exists
604 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
605 raise error.RepoError(_("journal already exists - run hg recover"))
604 raise error.RepoError(_("journal already exists - run hg recover"))
606
605
607 # save dirstate for rollback
606 # save dirstate for rollback
608 try:
607 try:
609 ds = self.opener("dirstate").read()
608 ds = self.opener("dirstate").read()
610 except IOError:
609 except IOError:
611 ds = ""
610 ds = ""
612 self.opener("journal.dirstate", "w").write(ds)
611 self.opener("journal.dirstate", "w").write(ds)
613 self.opener("journal.branch", "w").write(self.dirstate.branch())
612 self.opener("journal.branch", "w").write(self.dirstate.branch())
614
613
615 renames = [(self.sjoin("journal"), self.sjoin("undo")),
614 renames = [(self.sjoin("journal"), self.sjoin("undo")),
616 (self.join("journal.dirstate"), self.join("undo.dirstate")),
615 (self.join("journal.dirstate"), self.join("undo.dirstate")),
617 (self.join("journal.branch"), self.join("undo.branch"))]
616 (self.join("journal.branch"), self.join("undo.branch"))]
618 tr = transaction.transaction(self.ui.warn, self.sopener,
617 tr = transaction.transaction(self.ui.warn, self.sopener,
619 self.sjoin("journal"),
618 self.sjoin("journal"),
620 aftertrans(renames),
619 aftertrans(renames),
621 self.store.createmode)
620 self.store.createmode)
622 self._transref = weakref.ref(tr)
621 self._transref = weakref.ref(tr)
623 return tr
622 return tr
624
623
625 def recover(self):
624 def recover(self):
626 lock = self.lock()
625 lock = self.lock()
627 try:
626 try:
628 if os.path.exists(self.sjoin("journal")):
627 if os.path.exists(self.sjoin("journal")):
629 self.ui.status(_("rolling back interrupted transaction\n"))
628 self.ui.status(_("rolling back interrupted transaction\n"))
630 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
629 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
631 self.invalidate()
630 self.invalidate()
632 return True
631 return True
633 else:
632 else:
634 self.ui.warn(_("no interrupted transaction available\n"))
633 self.ui.warn(_("no interrupted transaction available\n"))
635 return False
634 return False
636 finally:
635 finally:
637 lock.release()
636 lock.release()
638
637
639 def rollback(self):
638 def rollback(self):
640 wlock = lock = None
639 wlock = lock = None
641 try:
640 try:
642 wlock = self.wlock()
641 wlock = self.wlock()
643 lock = self.lock()
642 lock = self.lock()
644 if os.path.exists(self.sjoin("undo")):
643 if os.path.exists(self.sjoin("undo")):
645 self.ui.status(_("rolling back last transaction\n"))
644 self.ui.status(_("rolling back last transaction\n"))
646 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
645 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
647 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
646 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
648 try:
647 try:
649 branch = self.opener("undo.branch").read()
648 branch = self.opener("undo.branch").read()
650 self.dirstate.setbranch(branch)
649 self.dirstate.setbranch(branch)
651 except IOError:
650 except IOError:
652 self.ui.warn(_("Named branch could not be reset, "
651 self.ui.warn(_("Named branch could not be reset, "
653 "current branch still is: %s\n")
652 "current branch still is: %s\n")
654 % encoding.tolocal(self.dirstate.branch()))
653 % encoding.tolocal(self.dirstate.branch()))
655 self.invalidate()
654 self.invalidate()
656 self.dirstate.invalidate()
655 self.dirstate.invalidate()
657 else:
656 else:
658 self.ui.warn(_("no rollback information available\n"))
657 self.ui.warn(_("no rollback information available\n"))
659 finally:
658 finally:
660 release(lock, wlock)
659 release(lock, wlock)
661
660
662 def invalidate(self):
661 def invalidate(self):
663 for a in "changelog manifest".split():
662 for a in "changelog manifest".split():
664 if a in self.__dict__:
663 if a in self.__dict__:
665 delattr(self, a)
664 delattr(self, a)
666 self.tagscache = None
665 self.tagscache = None
667 self._tagstypecache = None
666 self._tagstypecache = None
668 self.nodetagscache = None
667 self.nodetagscache = None
669 self.branchcache = None
668 self.branchcache = None
670 self._ubranchcache = None
669 self._ubranchcache = None
671 self._branchcachetip = None
670 self._branchcachetip = None
672
671
673 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
672 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
674 try:
673 try:
675 l = lock.lock(lockname, 0, releasefn, desc=desc)
674 l = lock.lock(lockname, 0, releasefn, desc=desc)
676 except error.LockHeld, inst:
675 except error.LockHeld, inst:
677 if not wait:
676 if not wait:
678 raise
677 raise
679 self.ui.warn(_("waiting for lock on %s held by %r\n") %
678 self.ui.warn(_("waiting for lock on %s held by %r\n") %
680 (desc, inst.locker))
679 (desc, inst.locker))
681 # default to 600 seconds timeout
680 # default to 600 seconds timeout
682 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
681 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
683 releasefn, desc=desc)
682 releasefn, desc=desc)
684 if acquirefn:
683 if acquirefn:
685 acquirefn()
684 acquirefn()
686 return l
685 return l
687
686
688 def lock(self, wait=True):
687 def lock(self, wait=True):
689 l = self._lockref and self._lockref()
688 l = self._lockref and self._lockref()
690 if l is not None and l.held:
689 if l is not None and l.held:
691 l.lock()
690 l.lock()
692 return l
691 return l
693
692
694 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
693 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
695 _('repository %s') % self.origroot)
694 _('repository %s') % self.origroot)
696 self._lockref = weakref.ref(l)
695 self._lockref = weakref.ref(l)
697 return l
696 return l
698
697
699 def wlock(self, wait=True):
698 def wlock(self, wait=True):
700 l = self._wlockref and self._wlockref()
699 l = self._wlockref and self._wlockref()
701 if l is not None and l.held:
700 if l is not None and l.held:
702 l.lock()
701 l.lock()
703 return l
702 return l
704
703
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 self.dirstate.invalidate, _('working directory of %s') %
705 self.dirstate.invalidate, _('working directory of %s') %
707 self.origroot)
706 self.origroot)
708 self._wlockref = weakref.ref(l)
707 self._wlockref = weakref.ref(l)
709 return l
708 return l
710
709
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
710 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 """
711 """
713 commit an individual file as part of a larger transaction
712 commit an individual file as part of a larger transaction
714 """
713 """
715
714
716 fname = fctx.path()
715 fname = fctx.path()
717 text = fctx.data()
716 text = fctx.data()
718 flog = self.file(fname)
717 flog = self.file(fname)
719 fparent1 = manifest1.get(fname, nullid)
718 fparent1 = manifest1.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
719 fparent2 = fparent2o = manifest2.get(fname, nullid)
721
720
722 meta = {}
721 meta = {}
723 copy = fctx.renamed()
722 copy = fctx.renamed()
724 if copy and copy[0] != fname:
723 if copy and copy[0] != fname:
725 # Mark the new revision of this file as a copy of another
724 # Mark the new revision of this file as a copy of another
726 # file. This copy data will effectively act as a parent
725 # file. This copy data will effectively act as a parent
727 # of this new revision. If this is a merge, the first
726 # of this new revision. If this is a merge, the first
728 # parent will be the nullid (meaning "look up the copy data")
727 # parent will be the nullid (meaning "look up the copy data")
729 # and the second one will be the other parent. For example:
728 # and the second one will be the other parent. For example:
730 #
729 #
731 # 0 --- 1 --- 3 rev1 changes file foo
730 # 0 --- 1 --- 3 rev1 changes file foo
732 # \ / rev2 renames foo to bar and changes it
731 # \ / rev2 renames foo to bar and changes it
733 # \- 2 -/ rev3 should have bar with all changes and
732 # \- 2 -/ rev3 should have bar with all changes and
734 # should record that bar descends from
733 # should record that bar descends from
735 # bar in rev2 and foo in rev1
734 # bar in rev2 and foo in rev1
736 #
735 #
737 # this allows this merge to succeed:
736 # this allows this merge to succeed:
738 #
737 #
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
739 # \ / merging rev3 and rev4 should use bar@rev2
741 # \- 2 --- 4 as the merge base
740 # \- 2 --- 4 as the merge base
742 #
741 #
743
742
744 cfname = copy[0]
743 cfname = copy[0]
745 crev = manifest1.get(cfname)
744 crev = manifest1.get(cfname)
746 newfparent = fparent2
745 newfparent = fparent2
747
746
748 if manifest2: # branch merge
747 if manifest2: # branch merge
749 if fparent2 == nullid or crev is None: # copied on remote side
748 if fparent2 == nullid or crev is None: # copied on remote side
750 if cfname in manifest2:
749 if cfname in manifest2:
751 crev = manifest2[cfname]
750 crev = manifest2[cfname]
752 newfparent = fparent1
751 newfparent = fparent1
753
752
754 # find source in nearest ancestor if we've lost track
753 # find source in nearest ancestor if we've lost track
755 if not crev:
754 if not crev:
756 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
755 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
757 (fname, cfname))
756 (fname, cfname))
758 for ancestor in self['.'].ancestors():
757 for ancestor in self['.'].ancestors():
759 if cfname in ancestor:
758 if cfname in ancestor:
760 crev = ancestor[cfname].filenode()
759 crev = ancestor[cfname].filenode()
761 break
760 break
762
761
763 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
762 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
764 meta["copy"] = cfname
763 meta["copy"] = cfname
765 meta["copyrev"] = hex(crev)
764 meta["copyrev"] = hex(crev)
766 fparent1, fparent2 = nullid, newfparent
765 fparent1, fparent2 = nullid, newfparent
767 elif fparent2 != nullid:
766 elif fparent2 != nullid:
768 # is one parent an ancestor of the other?
767 # is one parent an ancestor of the other?
769 fparentancestor = flog.ancestor(fparent1, fparent2)
768 fparentancestor = flog.ancestor(fparent1, fparent2)
770 if fparentancestor == fparent1:
769 if fparentancestor == fparent1:
771 fparent1, fparent2 = fparent2, nullid
770 fparent1, fparent2 = fparent2, nullid
772 elif fparentancestor == fparent2:
771 elif fparentancestor == fparent2:
773 fparent2 = nullid
772 fparent2 = nullid
774
773
775 # is the file changed?
774 # is the file changed?
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
775 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 changelist.append(fname)
776 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
777 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
778
780 # are just the flags changed during merge?
779 # are just the flags changed during merge?
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
780 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 changelist.append(fname)
781 changelist.append(fname)
783
782
784 return fparent1
783 return fparent1
785
784
786 def commit(self, text="", user=None, date=None, match=None, force=False,
785 def commit(self, text="", user=None, date=None, match=None, force=False,
787 editor=False, extra={}):
786 editor=False, extra={}):
788 """Add a new revision to current repository.
787 """Add a new revision to current repository.
789
788
790 Revision information is gathered from the working directory,
789 Revision information is gathered from the working directory,
791 match can be used to filter the committed files. If editor is
790 match can be used to filter the committed files. If editor is
792 supplied, it is called to get a commit message.
791 supplied, it is called to get a commit message.
793 """
792 """
794
793
795 def fail(f, msg):
794 def fail(f, msg):
796 raise util.Abort('%s: %s' % (f, msg))
795 raise util.Abort('%s: %s' % (f, msg))
797
796
798 if not match:
797 if not match:
799 match = match_.always(self.root, '')
798 match = match_.always(self.root, '')
800
799
801 if not force:
800 if not force:
802 vdirs = []
801 vdirs = []
803 match.dir = vdirs.append
802 match.dir = vdirs.append
804 match.bad = fail
803 match.bad = fail
805
804
806 wlock = self.wlock()
805 wlock = self.wlock()
807 try:
806 try:
808 p1, p2 = self.dirstate.parents()
807 p1, p2 = self.dirstate.parents()
809 wctx = self[None]
808 wctx = self[None]
810
809
811 if (not force and p2 != nullid and match and
810 if (not force and p2 != nullid and match and
812 (match.files() or match.anypats())):
811 (match.files() or match.anypats())):
813 raise util.Abort(_('cannot partially commit a merge '
812 raise util.Abort(_('cannot partially commit a merge '
814 '(do not specify files or patterns)'))
813 '(do not specify files or patterns)'))
815
814
816 changes = self.status(match=match, clean=force)
815 changes = self.status(match=match, clean=force)
817 if force:
816 if force:
818 changes[0].extend(changes[6]) # mq may commit unchanged files
817 changes[0].extend(changes[6]) # mq may commit unchanged files
819
818
820 # check subrepos
819 # check subrepos
821 subs = []
820 subs = []
822 for s in wctx.substate:
821 for s in wctx.substate:
823 if match(s) and wctx.sub(s).dirty():
822 if match(s) and wctx.sub(s).dirty():
824 subs.append(s)
823 subs.append(s)
825 if subs and '.hgsubstate' not in changes[0]:
824 if subs and '.hgsubstate' not in changes[0]:
826 changes[0].insert(0, '.hgsubstate')
825 changes[0].insert(0, '.hgsubstate')
827
826
828 # make sure all explicit patterns are matched
827 # make sure all explicit patterns are matched
829 if not force and match.files():
828 if not force and match.files():
830 matched = set(changes[0] + changes[1] + changes[2])
829 matched = set(changes[0] + changes[1] + changes[2])
831
830
832 for f in match.files():
831 for f in match.files():
833 if f == '.' or f in matched or f in wctx.substate:
832 if f == '.' or f in matched or f in wctx.substate:
834 continue
833 continue
835 if f in changes[3]: # missing
834 if f in changes[3]: # missing
836 fail(f, _('file not found!'))
835 fail(f, _('file not found!'))
837 if f in vdirs: # visited directory
836 if f in vdirs: # visited directory
838 d = f + '/'
837 d = f + '/'
839 for mf in matched:
838 for mf in matched:
840 if mf.startswith(d):
839 if mf.startswith(d):
841 break
840 break
842 else:
841 else:
843 fail(f, _("no match under directory!"))
842 fail(f, _("no match under directory!"))
844 elif f not in self.dirstate:
843 elif f not in self.dirstate:
845 fail(f, _("file not tracked!"))
844 fail(f, _("file not tracked!"))
846
845
847 if (not force and not extra.get("close") and p2 == nullid
846 if (not force and not extra.get("close") and p2 == nullid
848 and not (changes[0] or changes[1] or changes[2])
847 and not (changes[0] or changes[1] or changes[2])
849 and self[None].branch() == self['.'].branch()):
848 and self[None].branch() == self['.'].branch()):
850 self.ui.status(_("nothing changed\n"))
849 self.ui.status(_("nothing changed\n"))
851 return None
850 return None
852
851
853 ms = merge_.mergestate(self)
852 ms = merge_.mergestate(self)
854 for f in changes[0]:
853 for f in changes[0]:
855 if f in ms and ms[f] == 'u':
854 if f in ms and ms[f] == 'u':
856 raise util.Abort(_("unresolved merge conflicts "
855 raise util.Abort(_("unresolved merge conflicts "
857 "(see hg resolve)"))
856 "(see hg resolve)"))
858
857
859 cctx = context.workingctx(self, (p1, p2), text, user, date,
858 cctx = context.workingctx(self, (p1, p2), text, user, date,
860 extra, changes)
859 extra, changes)
861 if editor:
860 if editor:
862 cctx._text = editor(self, cctx)
861 cctx._text = editor(self, cctx)
863
862
864 # commit subs
863 # commit subs
865 if subs:
864 if subs:
866 state = wctx.substate.copy()
865 state = wctx.substate.copy()
867 for s in subs:
866 for s in subs:
868 self.ui.status(_('committing subrepository %s\n') % s)
867 self.ui.status(_('committing subrepository %s\n') % s)
869 sr = wctx.sub(s).commit(cctx._text, user, date)
868 sr = wctx.sub(s).commit(cctx._text, user, date)
870 state[s] = (state[s][0], sr)
869 state[s] = (state[s][0], sr)
871 subrepo.writestate(self, state)
870 subrepo.writestate(self, state)
872
871
873 ret = self.commitctx(cctx, True)
872 ret = self.commitctx(cctx, True)
874
873
875 # update dirstate and mergestate
874 # update dirstate and mergestate
876 for f in changes[0] + changes[1]:
875 for f in changes[0] + changes[1]:
877 self.dirstate.normal(f)
876 self.dirstate.normal(f)
878 for f in changes[2]:
877 for f in changes[2]:
879 self.dirstate.forget(f)
878 self.dirstate.forget(f)
880 self.dirstate.setparents(ret)
879 self.dirstate.setparents(ret)
881 ms.reset()
880 ms.reset()
882
881
883 return ret
882 return ret
884
883
885 finally:
884 finally:
886 wlock.release()
885 wlock.release()
887
886
888 def commitctx(self, ctx, error=False):
887 def commitctx(self, ctx, error=False):
889 """Add a new revision to current repository.
888 """Add a new revision to current repository.
890
889
891 Revision information is passed via the context argument.
890 Revision information is passed via the context argument.
892 """
891 """
893
892
894 tr = lock = None
893 tr = lock = None
895 removed = ctx.removed()
894 removed = ctx.removed()
896 p1, p2 = ctx.p1(), ctx.p2()
895 p1, p2 = ctx.p1(), ctx.p2()
897 m1 = p1.manifest().copy()
896 m1 = p1.manifest().copy()
898 m2 = p2.manifest()
897 m2 = p2.manifest()
899 user = ctx.user()
898 user = ctx.user()
900
899
901 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
900 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
902 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
901 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
903
902
904 lock = self.lock()
903 lock = self.lock()
905 try:
904 try:
906 tr = self.transaction()
905 tr = self.transaction()
907 trp = weakref.proxy(tr)
906 trp = weakref.proxy(tr)
908
907
909 # check in files
908 # check in files
910 new = {}
909 new = {}
911 changed = []
910 changed = []
912 linkrev = len(self)
911 linkrev = len(self)
913 for f in sorted(ctx.modified() + ctx.added()):
912 for f in sorted(ctx.modified() + ctx.added()):
914 self.ui.note(f + "\n")
913 self.ui.note(f + "\n")
915 try:
914 try:
916 fctx = ctx[f]
915 fctx = ctx[f]
917 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
916 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
918 changed)
917 changed)
919 m1.set(f, fctx.flags())
918 m1.set(f, fctx.flags())
920 except (OSError, IOError):
919 except (OSError, IOError):
921 if error:
920 if error:
922 self.ui.warn(_("trouble committing %s!\n") % f)
921 self.ui.warn(_("trouble committing %s!\n") % f)
923 raise
922 raise
924 else:
923 else:
925 removed.append(f)
924 removed.append(f)
926
925
927 # update manifest
926 # update manifest
928 m1.update(new)
927 m1.update(new)
929 removed = [f for f in sorted(removed) if f in m1 or f in m2]
928 removed = [f for f in sorted(removed) if f in m1 or f in m2]
930 drop = [f for f in removed if f in m1]
929 drop = [f for f in removed if f in m1]
931 for f in drop:
930 for f in drop:
932 del m1[f]
931 del m1[f]
933 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
932 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
934 p2.manifestnode(), (new, drop))
933 p2.manifestnode(), (new, drop))
935
934
936 # update changelog
935 # update changelog
937 self.changelog.delayupdate()
936 self.changelog.delayupdate()
938 n = self.changelog.add(mn, changed + removed, ctx.description(),
937 n = self.changelog.add(mn, changed + removed, ctx.description(),
939 trp, p1.node(), p2.node(),
938 trp, p1.node(), p2.node(),
940 user, ctx.date(), ctx.extra().copy())
939 user, ctx.date(), ctx.extra().copy())
941 p = lambda: self.changelog.writepending() and self.root or ""
940 p = lambda: self.changelog.writepending() and self.root or ""
942 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
941 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
943 parent2=xp2, pending=p)
942 parent2=xp2, pending=p)
944 self.changelog.finalize(trp)
943 self.changelog.finalize(trp)
945 tr.close()
944 tr.close()
946
945
947 if self.branchcache:
946 if self.branchcache:
948 self.branchtags()
947 self.branchtags()
949
948
950 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
949 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
951 return n
950 return n
952 finally:
951 finally:
953 del tr
952 del tr
954 lock.release()
953 lock.release()
955
954
956 def walk(self, match, node=None):
955 def walk(self, match, node=None):
957 '''
956 '''
958 walk recursively through the directory tree or a given
957 walk recursively through the directory tree or a given
959 changeset, finding all files matched by the match
958 changeset, finding all files matched by the match
960 function
959 function
961 '''
960 '''
962 return self[node].walk(match)
961 return self[node].walk(match)
963
962
964 def status(self, node1='.', node2=None, match=None,
963 def status(self, node1='.', node2=None, match=None,
965 ignored=False, clean=False, unknown=False):
964 ignored=False, clean=False, unknown=False):
966 """return status of files between two nodes or node and working directory
965 """return status of files between two nodes or node and working directory
967
966
968 If node1 is None, use the first dirstate parent instead.
967 If node1 is None, use the first dirstate parent instead.
969 If node2 is None, compare node1 with working directory.
968 If node2 is None, compare node1 with working directory.
970 """
969 """
971
970
972 def mfmatches(ctx):
971 def mfmatches(ctx):
973 mf = ctx.manifest().copy()
972 mf = ctx.manifest().copy()
974 for fn in mf.keys():
973 for fn in mf.keys():
975 if not match(fn):
974 if not match(fn):
976 del mf[fn]
975 del mf[fn]
977 return mf
976 return mf
978
977
979 if isinstance(node1, context.changectx):
978 if isinstance(node1, context.changectx):
980 ctx1 = node1
979 ctx1 = node1
981 else:
980 else:
982 ctx1 = self[node1]
981 ctx1 = self[node1]
983 if isinstance(node2, context.changectx):
982 if isinstance(node2, context.changectx):
984 ctx2 = node2
983 ctx2 = node2
985 else:
984 else:
986 ctx2 = self[node2]
985 ctx2 = self[node2]
987
986
988 working = ctx2.rev() is None
987 working = ctx2.rev() is None
989 parentworking = working and ctx1 == self['.']
988 parentworking = working and ctx1 == self['.']
990 match = match or match_.always(self.root, self.getcwd())
989 match = match or match_.always(self.root, self.getcwd())
991 listignored, listclean, listunknown = ignored, clean, unknown
990 listignored, listclean, listunknown = ignored, clean, unknown
992
991
993 # load earliest manifest first for caching reasons
992 # load earliest manifest first for caching reasons
994 if not working and ctx2.rev() < ctx1.rev():
993 if not working and ctx2.rev() < ctx1.rev():
995 ctx2.manifest()
994 ctx2.manifest()
996
995
997 if not parentworking:
996 if not parentworking:
998 def bad(f, msg):
997 def bad(f, msg):
999 if f not in ctx1:
998 if f not in ctx1:
1000 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
999 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1001 match.bad = bad
1000 match.bad = bad
1002
1001
1003 if working: # we need to scan the working dir
1002 if working: # we need to scan the working dir
1004 s = self.dirstate.status(match, listignored, listclean, listunknown)
1003 s = self.dirstate.status(match, listignored, listclean, listunknown)
1005 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1004 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1006
1005
1007 # check for any possibly clean files
1006 # check for any possibly clean files
1008 if parentworking and cmp:
1007 if parentworking and cmp:
1009 fixup = []
1008 fixup = []
1010 # do a full compare of any files that might have changed
1009 # do a full compare of any files that might have changed
1011 for f in sorted(cmp):
1010 for f in sorted(cmp):
1012 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1011 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1013 or ctx1[f].cmp(ctx2[f].data())):
1012 or ctx1[f].cmp(ctx2[f].data())):
1014 modified.append(f)
1013 modified.append(f)
1015 else:
1014 else:
1016 fixup.append(f)
1015 fixup.append(f)
1017
1016
1018 if listclean:
1017 if listclean:
1019 clean += fixup
1018 clean += fixup
1020
1019
1021 # update dirstate for files that are actually clean
1020 # update dirstate for files that are actually clean
1022 if fixup:
1021 if fixup:
1023 try:
1022 try:
1024 # updating the dirstate is optional
1023 # updating the dirstate is optional
1025 # so we don't wait on the lock
1024 # so we don't wait on the lock
1026 wlock = self.wlock(False)
1025 wlock = self.wlock(False)
1027 try:
1026 try:
1028 for f in fixup:
1027 for f in fixup:
1029 self.dirstate.normal(f)
1028 self.dirstate.normal(f)
1030 finally:
1029 finally:
1031 wlock.release()
1030 wlock.release()
1032 except error.LockError:
1031 except error.LockError:
1033 pass
1032 pass
1034
1033
1035 if not parentworking:
1034 if not parentworking:
1036 mf1 = mfmatches(ctx1)
1035 mf1 = mfmatches(ctx1)
1037 if working:
1036 if working:
1038 # we are comparing working dir against non-parent
1037 # we are comparing working dir against non-parent
1039 # generate a pseudo-manifest for the working dir
1038 # generate a pseudo-manifest for the working dir
1040 mf2 = mfmatches(self['.'])
1039 mf2 = mfmatches(self['.'])
1041 for f in cmp + modified + added:
1040 for f in cmp + modified + added:
1042 mf2[f] = None
1041 mf2[f] = None
1043 mf2.set(f, ctx2.flags(f))
1042 mf2.set(f, ctx2.flags(f))
1044 for f in removed:
1043 for f in removed:
1045 if f in mf2:
1044 if f in mf2:
1046 del mf2[f]
1045 del mf2[f]
1047 else:
1046 else:
1048 # we are comparing two revisions
1047 # we are comparing two revisions
1049 deleted, unknown, ignored = [], [], []
1048 deleted, unknown, ignored = [], [], []
1050 mf2 = mfmatches(ctx2)
1049 mf2 = mfmatches(ctx2)
1051
1050
1052 modified, added, clean = [], [], []
1051 modified, added, clean = [], [], []
1053 for fn in mf2:
1052 for fn in mf2:
1054 if fn in mf1:
1053 if fn in mf1:
1055 if (mf1.flags(fn) != mf2.flags(fn) or
1054 if (mf1.flags(fn) != mf2.flags(fn) or
1056 (mf1[fn] != mf2[fn] and
1055 (mf1[fn] != mf2[fn] and
1057 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1056 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1058 modified.append(fn)
1057 modified.append(fn)
1059 elif listclean:
1058 elif listclean:
1060 clean.append(fn)
1059 clean.append(fn)
1061 del mf1[fn]
1060 del mf1[fn]
1062 else:
1061 else:
1063 added.append(fn)
1062 added.append(fn)
1064 removed = mf1.keys()
1063 removed = mf1.keys()
1065
1064
1066 r = modified, added, removed, deleted, unknown, ignored, clean
1065 r = modified, added, removed, deleted, unknown, ignored, clean
1067 [l.sort() for l in r]
1066 [l.sort() for l in r]
1068 return r
1067 return r
1069
1068
1070 def add(self, list):
1069 def add(self, list):
1071 wlock = self.wlock()
1070 wlock = self.wlock()
1072 try:
1071 try:
1073 rejected = []
1072 rejected = []
1074 for f in list:
1073 for f in list:
1075 p = self.wjoin(f)
1074 p = self.wjoin(f)
1076 try:
1075 try:
1077 st = os.lstat(p)
1076 st = os.lstat(p)
1078 except:
1077 except:
1079 self.ui.warn(_("%s does not exist!\n") % f)
1078 self.ui.warn(_("%s does not exist!\n") % f)
1080 rejected.append(f)
1079 rejected.append(f)
1081 continue
1080 continue
1082 if st.st_size > 10000000:
1081 if st.st_size > 10000000:
1083 self.ui.warn(_("%s: files over 10MB may cause memory and"
1082 self.ui.warn(_("%s: files over 10MB may cause memory and"
1084 " performance problems\n"
1083 " performance problems\n"
1085 "(use 'hg revert %s' to unadd the file)\n")
1084 "(use 'hg revert %s' to unadd the file)\n")
1086 % (f, f))
1085 % (f, f))
1087 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1086 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1088 self.ui.warn(_("%s not added: only files and symlinks "
1087 self.ui.warn(_("%s not added: only files and symlinks "
1089 "supported currently\n") % f)
1088 "supported currently\n") % f)
1090 rejected.append(p)
1089 rejected.append(p)
1091 elif self.dirstate[f] in 'amn':
1090 elif self.dirstate[f] in 'amn':
1092 self.ui.warn(_("%s already tracked!\n") % f)
1091 self.ui.warn(_("%s already tracked!\n") % f)
1093 elif self.dirstate[f] == 'r':
1092 elif self.dirstate[f] == 'r':
1094 self.dirstate.normallookup(f)
1093 self.dirstate.normallookup(f)
1095 else:
1094 else:
1096 self.dirstate.add(f)
1095 self.dirstate.add(f)
1097 return rejected
1096 return rejected
1098 finally:
1097 finally:
1099 wlock.release()
1098 wlock.release()
1100
1099
1101 def forget(self, list):
1100 def forget(self, list):
1102 wlock = self.wlock()
1101 wlock = self.wlock()
1103 try:
1102 try:
1104 for f in list:
1103 for f in list:
1105 if self.dirstate[f] != 'a':
1104 if self.dirstate[f] != 'a':
1106 self.ui.warn(_("%s not added!\n") % f)
1105 self.ui.warn(_("%s not added!\n") % f)
1107 else:
1106 else:
1108 self.dirstate.forget(f)
1107 self.dirstate.forget(f)
1109 finally:
1108 finally:
1110 wlock.release()
1109 wlock.release()
1111
1110
1112 def remove(self, list, unlink=False):
1111 def remove(self, list, unlink=False):
1113 if unlink:
1112 if unlink:
1114 for f in list:
1113 for f in list:
1115 try:
1114 try:
1116 util.unlink(self.wjoin(f))
1115 util.unlink(self.wjoin(f))
1117 except OSError, inst:
1116 except OSError, inst:
1118 if inst.errno != errno.ENOENT:
1117 if inst.errno != errno.ENOENT:
1119 raise
1118 raise
1120 wlock = self.wlock()
1119 wlock = self.wlock()
1121 try:
1120 try:
1122 for f in list:
1121 for f in list:
1123 if unlink and os.path.exists(self.wjoin(f)):
1122 if unlink and os.path.exists(self.wjoin(f)):
1124 self.ui.warn(_("%s still exists!\n") % f)
1123 self.ui.warn(_("%s still exists!\n") % f)
1125 elif self.dirstate[f] == 'a':
1124 elif self.dirstate[f] == 'a':
1126 self.dirstate.forget(f)
1125 self.dirstate.forget(f)
1127 elif f not in self.dirstate:
1126 elif f not in self.dirstate:
1128 self.ui.warn(_("%s not tracked!\n") % f)
1127 self.ui.warn(_("%s not tracked!\n") % f)
1129 else:
1128 else:
1130 self.dirstate.remove(f)
1129 self.dirstate.remove(f)
1131 finally:
1130 finally:
1132 wlock.release()
1131 wlock.release()
1133
1132
1134 def undelete(self, list):
1133 def undelete(self, list):
1135 manifests = [self.manifest.read(self.changelog.read(p)[0])
1134 manifests = [self.manifest.read(self.changelog.read(p)[0])
1136 for p in self.dirstate.parents() if p != nullid]
1135 for p in self.dirstate.parents() if p != nullid]
1137 wlock = self.wlock()
1136 wlock = self.wlock()
1138 try:
1137 try:
1139 for f in list:
1138 for f in list:
1140 if self.dirstate[f] != 'r':
1139 if self.dirstate[f] != 'r':
1141 self.ui.warn(_("%s not removed!\n") % f)
1140 self.ui.warn(_("%s not removed!\n") % f)
1142 else:
1141 else:
1143 m = f in manifests[0] and manifests[0] or manifests[1]
1142 m = f in manifests[0] and manifests[0] or manifests[1]
1144 t = self.file(f).read(m[f])
1143 t = self.file(f).read(m[f])
1145 self.wwrite(f, t, m.flags(f))
1144 self.wwrite(f, t, m.flags(f))
1146 self.dirstate.normal(f)
1145 self.dirstate.normal(f)
1147 finally:
1146 finally:
1148 wlock.release()
1147 wlock.release()
1149
1148
1150 def copy(self, source, dest):
1149 def copy(self, source, dest):
1151 p = self.wjoin(dest)
1150 p = self.wjoin(dest)
1152 if not (os.path.exists(p) or os.path.islink(p)):
1151 if not (os.path.exists(p) or os.path.islink(p)):
1153 self.ui.warn(_("%s does not exist!\n") % dest)
1152 self.ui.warn(_("%s does not exist!\n") % dest)
1154 elif not (os.path.isfile(p) or os.path.islink(p)):
1153 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 self.ui.warn(_("copy failed: %s is not a file or a "
1154 self.ui.warn(_("copy failed: %s is not a file or a "
1156 "symbolic link\n") % dest)
1155 "symbolic link\n") % dest)
1157 else:
1156 else:
1158 wlock = self.wlock()
1157 wlock = self.wlock()
1159 try:
1158 try:
1160 if self.dirstate[dest] in '?r':
1159 if self.dirstate[dest] in '?r':
1161 self.dirstate.add(dest)
1160 self.dirstate.add(dest)
1162 self.dirstate.copy(source, dest)
1161 self.dirstate.copy(source, dest)
1163 finally:
1162 finally:
1164 wlock.release()
1163 wlock.release()
1165
1164
1166 def heads(self, start=None):
1165 def heads(self, start=None):
1167 heads = self.changelog.heads(start)
1166 heads = self.changelog.heads(start)
1168 # sort the output in rev descending order
1167 # sort the output in rev descending order
1169 heads = [(-self.changelog.rev(h), h) for h in heads]
1168 heads = [(-self.changelog.rev(h), h) for h in heads]
1170 return [n for (r, n) in sorted(heads)]
1169 return [n for (r, n) in sorted(heads)]
1171
1170
1172 def branchheads(self, branch=None, start=None, closed=False):
1171 def branchheads(self, branch=None, start=None, closed=False):
1173 if branch is None:
1172 if branch is None:
1174 branch = self[None].branch()
1173 branch = self[None].branch()
1175 branches = self.branchmap()
1174 branches = self.branchmap()
1176 if branch not in branches:
1175 if branch not in branches:
1177 return []
1176 return []
1178 bheads = branches[branch]
1177 bheads = branches[branch]
1179 # the cache returns heads ordered lowest to highest
1178 # the cache returns heads ordered lowest to highest
1180 bheads.reverse()
1179 bheads.reverse()
1181 if start is not None:
1180 if start is not None:
1182 # filter out the heads that cannot be reached from startrev
1181 # filter out the heads that cannot be reached from startrev
1183 bheads = self.changelog.nodesbetween([start], bheads)[2]
1182 bheads = self.changelog.nodesbetween([start], bheads)[2]
1184 if not closed:
1183 if not closed:
1185 bheads = [h for h in bheads if
1184 bheads = [h for h in bheads if
1186 ('close' not in self.changelog.read(h)[5])]
1185 ('close' not in self.changelog.read(h)[5])]
1187 return bheads
1186 return bheads
1188
1187
1189 def branches(self, nodes):
1188 def branches(self, nodes):
1190 if not nodes:
1189 if not nodes:
1191 nodes = [self.changelog.tip()]
1190 nodes = [self.changelog.tip()]
1192 b = []
1191 b = []
1193 for n in nodes:
1192 for n in nodes:
1194 t = n
1193 t = n
1195 while 1:
1194 while 1:
1196 p = self.changelog.parents(n)
1195 p = self.changelog.parents(n)
1197 if p[1] != nullid or p[0] == nullid:
1196 if p[1] != nullid or p[0] == nullid:
1198 b.append((t, n, p[0], p[1]))
1197 b.append((t, n, p[0], p[1]))
1199 break
1198 break
1200 n = p[0]
1199 n = p[0]
1201 return b
1200 return b
1202
1201
1203 def between(self, pairs):
1202 def between(self, pairs):
1204 r = []
1203 r = []
1205
1204
1206 for top, bottom in pairs:
1205 for top, bottom in pairs:
1207 n, l, i = top, [], 0
1206 n, l, i = top, [], 0
1208 f = 1
1207 f = 1
1209
1208
1210 while n != bottom and n != nullid:
1209 while n != bottom and n != nullid:
1211 p = self.changelog.parents(n)[0]
1210 p = self.changelog.parents(n)[0]
1212 if i == f:
1211 if i == f:
1213 l.append(n)
1212 l.append(n)
1214 f = f * 2
1213 f = f * 2
1215 n = p
1214 n = p
1216 i += 1
1215 i += 1
1217
1216
1218 r.append(l)
1217 r.append(l)
1219
1218
1220 return r
1219 return r
1221
1220
1222 def findincoming(self, remote, base=None, heads=None, force=False):
1221 def findincoming(self, remote, base=None, heads=None, force=False):
1223 """Return list of roots of the subsets of missing nodes from remote
1222 """Return list of roots of the subsets of missing nodes from remote
1224
1223
1225 If base dict is specified, assume that these nodes and their parents
1224 If base dict is specified, assume that these nodes and their parents
1226 exist on the remote side and that no child of a node of base exists
1225 exist on the remote side and that no child of a node of base exists
1227 in both remote and self.
1226 in both remote and self.
1228 Furthermore base will be updated to include the nodes that exists
1227 Furthermore base will be updated to include the nodes that exists
1229 in self and remote but no children exists in self and remote.
1228 in self and remote but no children exists in self and remote.
1230 If a list of heads is specified, return only nodes which are heads
1229 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads.
1230 or ancestors of these heads.
1232
1231
1233 All the ancestors of base are in self and in remote.
1232 All the ancestors of base are in self and in remote.
1234 All the descendants of the list returned are missing in self.
1233 All the descendants of the list returned are missing in self.
1235 (and so we know that the rest of the nodes are missing in remote, see
1234 (and so we know that the rest of the nodes are missing in remote, see
1236 outgoing)
1235 outgoing)
1237 """
1236 """
1238 return self.findcommonincoming(remote, base, heads, force)[1]
1237 return self.findcommonincoming(remote, base, heads, force)[1]
1239
1238
1240 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1239 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1241 """Return a tuple (common, missing roots, heads) used to identify
1240 """Return a tuple (common, missing roots, heads) used to identify
1242 missing nodes from remote.
1241 missing nodes from remote.
1243
1242
1244 If base dict is specified, assume that these nodes and their parents
1243 If base dict is specified, assume that these nodes and their parents
1245 exist on the remote side and that no child of a node of base exists
1244 exist on the remote side and that no child of a node of base exists
1246 in both remote and self.
1245 in both remote and self.
1247 Furthermore base will be updated to include the nodes that exists
1246 Furthermore base will be updated to include the nodes that exists
1248 in self and remote but no children exists in self and remote.
1247 in self and remote but no children exists in self and remote.
1249 If a list of heads is specified, return only nodes which are heads
1248 If a list of heads is specified, return only nodes which are heads
1250 or ancestors of these heads.
1249 or ancestors of these heads.
1251
1250
1252 All the ancestors of base are in self and in remote.
1251 All the ancestors of base are in self and in remote.
1253 """
1252 """
1254 m = self.changelog.nodemap
1253 m = self.changelog.nodemap
1255 search = []
1254 search = []
1256 fetch = set()
1255 fetch = set()
1257 seen = set()
1256 seen = set()
1258 seenbranch = set()
1257 seenbranch = set()
1259 if base is None:
1258 if base is None:
1260 base = {}
1259 base = {}
1261
1260
1262 if not heads:
1261 if not heads:
1263 heads = remote.heads()
1262 heads = remote.heads()
1264
1263
1265 if self.changelog.tip() == nullid:
1264 if self.changelog.tip() == nullid:
1266 base[nullid] = 1
1265 base[nullid] = 1
1267 if heads != [nullid]:
1266 if heads != [nullid]:
1268 return [nullid], [nullid], list(heads)
1267 return [nullid], [nullid], list(heads)
1269 return [nullid], [], []
1268 return [nullid], [], []
1270
1269
1271 # assume we're closer to the tip than the root
1270 # assume we're closer to the tip than the root
1272 # and start by examining the heads
1271 # and start by examining the heads
1273 self.ui.status(_("searching for changes\n"))
1272 self.ui.status(_("searching for changes\n"))
1274
1273
1275 unknown = []
1274 unknown = []
1276 for h in heads:
1275 for h in heads:
1277 if h not in m:
1276 if h not in m:
1278 unknown.append(h)
1277 unknown.append(h)
1279 else:
1278 else:
1280 base[h] = 1
1279 base[h] = 1
1281
1280
1282 heads = unknown
1281 heads = unknown
1283 if not unknown:
1282 if not unknown:
1284 return base.keys(), [], []
1283 return base.keys(), [], []
1285
1284
1286 req = set(unknown)
1285 req = set(unknown)
1287 reqcnt = 0
1286 reqcnt = 0
1288
1287
1289 # search through remote branches
1288 # search through remote branches
1290 # a 'branch' here is a linear segment of history, with four parts:
1289 # a 'branch' here is a linear segment of history, with four parts:
1291 # head, root, first parent, second parent
1290 # head, root, first parent, second parent
1292 # (a branch always has two parents (or none) by definition)
1291 # (a branch always has two parents (or none) by definition)
1293 unknown = remote.branches(unknown)
1292 unknown = remote.branches(unknown)
1294 while unknown:
1293 while unknown:
1295 r = []
1294 r = []
1296 while unknown:
1295 while unknown:
1297 n = unknown.pop(0)
1296 n = unknown.pop(0)
1298 if n[0] in seen:
1297 if n[0] in seen:
1299 continue
1298 continue
1300
1299
1301 self.ui.debug(_("examining %s:%s\n")
1300 self.ui.debug(_("examining %s:%s\n")
1302 % (short(n[0]), short(n[1])))
1301 % (short(n[0]), short(n[1])))
1303 if n[0] == nullid: # found the end of the branch
1302 if n[0] == nullid: # found the end of the branch
1304 pass
1303 pass
1305 elif n in seenbranch:
1304 elif n in seenbranch:
1306 self.ui.debug(_("branch already found\n"))
1305 self.ui.debug(_("branch already found\n"))
1307 continue
1306 continue
1308 elif n[1] and n[1] in m: # do we know the base?
1307 elif n[1] and n[1] in m: # do we know the base?
1309 self.ui.debug(_("found incomplete branch %s:%s\n")
1308 self.ui.debug(_("found incomplete branch %s:%s\n")
1310 % (short(n[0]), short(n[1])))
1309 % (short(n[0]), short(n[1])))
1311 search.append(n[0:2]) # schedule branch range for scanning
1310 search.append(n[0:2]) # schedule branch range for scanning
1312 seenbranch.add(n)
1311 seenbranch.add(n)
1313 else:
1312 else:
1314 if n[1] not in seen and n[1] not in fetch:
1313 if n[1] not in seen and n[1] not in fetch:
1315 if n[2] in m and n[3] in m:
1314 if n[2] in m and n[3] in m:
1316 self.ui.debug(_("found new changeset %s\n") %
1315 self.ui.debug(_("found new changeset %s\n") %
1317 short(n[1]))
1316 short(n[1]))
1318 fetch.add(n[1]) # earliest unknown
1317 fetch.add(n[1]) # earliest unknown
1319 for p in n[2:4]:
1318 for p in n[2:4]:
1320 if p in m:
1319 if p in m:
1321 base[p] = 1 # latest known
1320 base[p] = 1 # latest known
1322
1321
1323 for p in n[2:4]:
1322 for p in n[2:4]:
1324 if p not in req and p not in m:
1323 if p not in req and p not in m:
1325 r.append(p)
1324 r.append(p)
1326 req.add(p)
1325 req.add(p)
1327 seen.add(n[0])
1326 seen.add(n[0])
1328
1327
1329 if r:
1328 if r:
1330 reqcnt += 1
1329 reqcnt += 1
1331 self.ui.debug(_("request %d: %s\n") %
1330 self.ui.debug(_("request %d: %s\n") %
1332 (reqcnt, " ".join(map(short, r))))
1331 (reqcnt, " ".join(map(short, r))))
1333 for p in xrange(0, len(r), 10):
1332 for p in xrange(0, len(r), 10):
1334 for b in remote.branches(r[p:p+10]):
1333 for b in remote.branches(r[p:p+10]):
1335 self.ui.debug(_("received %s:%s\n") %
1334 self.ui.debug(_("received %s:%s\n") %
1336 (short(b[0]), short(b[1])))
1335 (short(b[0]), short(b[1])))
1337 unknown.append(b)
1336 unknown.append(b)
1338
1337
1339 # do binary search on the branches we found
1338 # do binary search on the branches we found
1340 while search:
1339 while search:
1341 newsearch = []
1340 newsearch = []
1342 reqcnt += 1
1341 reqcnt += 1
1343 for n, l in zip(search, remote.between(search)):
1342 for n, l in zip(search, remote.between(search)):
1344 l.append(n[1])
1343 l.append(n[1])
1345 p = n[0]
1344 p = n[0]
1346 f = 1
1345 f = 1
1347 for i in l:
1346 for i in l:
1348 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1347 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1349 if i in m:
1348 if i in m:
1350 if f <= 2:
1349 if f <= 2:
1351 self.ui.debug(_("found new branch changeset %s\n") %
1350 self.ui.debug(_("found new branch changeset %s\n") %
1352 short(p))
1351 short(p))
1353 fetch.add(p)
1352 fetch.add(p)
1354 base[i] = 1
1353 base[i] = 1
1355 else:
1354 else:
1356 self.ui.debug(_("narrowed branch search to %s:%s\n")
1355 self.ui.debug(_("narrowed branch search to %s:%s\n")
1357 % (short(p), short(i)))
1356 % (short(p), short(i)))
1358 newsearch.append((p, i))
1357 newsearch.append((p, i))
1359 break
1358 break
1360 p, f = i, f * 2
1359 p, f = i, f * 2
1361 search = newsearch
1360 search = newsearch
1362
1361
1363 # sanity check our fetch list
1362 # sanity check our fetch list
1364 for f in fetch:
1363 for f in fetch:
1365 if f in m:
1364 if f in m:
1366 raise error.RepoError(_("already have changeset ")
1365 raise error.RepoError(_("already have changeset ")
1367 + short(f[:4]))
1366 + short(f[:4]))
1368
1367
1369 if base.keys() == [nullid]:
1368 if base.keys() == [nullid]:
1370 if force:
1369 if force:
1371 self.ui.warn(_("warning: repository is unrelated\n"))
1370 self.ui.warn(_("warning: repository is unrelated\n"))
1372 else:
1371 else:
1373 raise util.Abort(_("repository is unrelated"))
1372 raise util.Abort(_("repository is unrelated"))
1374
1373
1375 self.ui.debug(_("found new changesets starting at ") +
1374 self.ui.debug(_("found new changesets starting at ") +
1376 " ".join([short(f) for f in fetch]) + "\n")
1375 " ".join([short(f) for f in fetch]) + "\n")
1377
1376
1378 self.ui.debug(_("%d total queries\n") % reqcnt)
1377 self.ui.debug(_("%d total queries\n") % reqcnt)
1379
1378
1380 return base.keys(), list(fetch), heads
1379 return base.keys(), list(fetch), heads
1381
1380
1382 def findoutgoing(self, remote, base=None, heads=None, force=False):
1381 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 """Return list of nodes that are roots of subsets not in remote
1382 """Return list of nodes that are roots of subsets not in remote
1384
1383
1385 If base dict is specified, assume that these nodes and their parents
1384 If base dict is specified, assume that these nodes and their parents
1386 exist on the remote side.
1385 exist on the remote side.
1387 If a list of heads is specified, return only nodes which are heads
1386 If a list of heads is specified, return only nodes which are heads
1388 or ancestors of these heads, and return a second element which
1387 or ancestors of these heads, and return a second element which
1389 contains all remote heads which get new children.
1388 contains all remote heads which get new children.
1390 """
1389 """
1391 if base is None:
1390 if base is None:
1392 base = {}
1391 base = {}
1393 self.findincoming(remote, base, heads, force=force)
1392 self.findincoming(remote, base, heads, force=force)
1394
1393
1395 self.ui.debug(_("common changesets up to ")
1394 self.ui.debug(_("common changesets up to ")
1396 + " ".join(map(short, base.keys())) + "\n")
1395 + " ".join(map(short, base.keys())) + "\n")
1397
1396
1398 remain = set(self.changelog.nodemap)
1397 remain = set(self.changelog.nodemap)
1399
1398
1400 # prune everything remote has from the tree
1399 # prune everything remote has from the tree
1401 remain.remove(nullid)
1400 remain.remove(nullid)
1402 remove = base.keys()
1401 remove = base.keys()
1403 while remove:
1402 while remove:
1404 n = remove.pop(0)
1403 n = remove.pop(0)
1405 if n in remain:
1404 if n in remain:
1406 remain.remove(n)
1405 remain.remove(n)
1407 for p in self.changelog.parents(n):
1406 for p in self.changelog.parents(n):
1408 remove.append(p)
1407 remove.append(p)
1409
1408
1410 # find every node whose parents have been pruned
1409 # find every node whose parents have been pruned
1411 subset = []
1410 subset = []
1412 # find every remote head that will get new children
1411 # find every remote head that will get new children
1413 updated_heads = set()
1412 updated_heads = set()
1414 for n in remain:
1413 for n in remain:
1415 p1, p2 = self.changelog.parents(n)
1414 p1, p2 = self.changelog.parents(n)
1416 if p1 not in remain and p2 not in remain:
1415 if p1 not in remain and p2 not in remain:
1417 subset.append(n)
1416 subset.append(n)
1418 if heads:
1417 if heads:
1419 if p1 in heads:
1418 if p1 in heads:
1420 updated_heads.add(p1)
1419 updated_heads.add(p1)
1421 if p2 in heads:
1420 if p2 in heads:
1422 updated_heads.add(p2)
1421 updated_heads.add(p2)
1423
1422
1424 # this is the set of all roots we have to push
1423 # this is the set of all roots we have to push
1425 if heads:
1424 if heads:
1426 return subset, list(updated_heads)
1425 return subset, list(updated_heads)
1427 else:
1426 else:
1428 return subset
1427 return subset
1429
1428
1430 def pull(self, remote, heads=None, force=False):
1429 def pull(self, remote, heads=None, force=False):
1431 lock = self.lock()
1430 lock = self.lock()
1432 try:
1431 try:
1433 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1432 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1434 force=force)
1433 force=force)
1435 if fetch == [nullid]:
1434 if fetch == [nullid]:
1436 self.ui.status(_("requesting all changes\n"))
1435 self.ui.status(_("requesting all changes\n"))
1437
1436
1438 if not fetch:
1437 if not fetch:
1439 self.ui.status(_("no changes found\n"))
1438 self.ui.status(_("no changes found\n"))
1440 return 0
1439 return 0
1441
1440
1442 if heads is None and remote.capable('changegroupsubset'):
1441 if heads is None and remote.capable('changegroupsubset'):
1443 heads = rheads
1442 heads = rheads
1444
1443
1445 if heads is None:
1444 if heads is None:
1446 cg = remote.changegroup(fetch, 'pull')
1445 cg = remote.changegroup(fetch, 'pull')
1447 else:
1446 else:
1448 if not remote.capable('changegroupsubset'):
1447 if not remote.capable('changegroupsubset'):
1449 raise util.Abort(_("Partial pull cannot be done because "
1448 raise util.Abort(_("Partial pull cannot be done because "
1450 "other repository doesn't support "
1449 "other repository doesn't support "
1451 "changegroupsubset."))
1450 "changegroupsubset."))
1452 cg = remote.changegroupsubset(fetch, heads, 'pull')
1451 cg = remote.changegroupsubset(fetch, heads, 'pull')
1453 return self.addchangegroup(cg, 'pull', remote.url())
1452 return self.addchangegroup(cg, 'pull', remote.url())
1454 finally:
1453 finally:
1455 lock.release()
1454 lock.release()
1456
1455
1457 def push(self, remote, force=False, revs=None):
1456 def push(self, remote, force=False, revs=None):
1458 # there are two ways to push to remote repo:
1457 # there are two ways to push to remote repo:
1459 #
1458 #
1460 # addchangegroup assumes local user can lock remote
1459 # addchangegroup assumes local user can lock remote
1461 # repo (local filesystem, old ssh servers).
1460 # repo (local filesystem, old ssh servers).
1462 #
1461 #
1463 # unbundle assumes local user cannot lock remote repo (new ssh
1462 # unbundle assumes local user cannot lock remote repo (new ssh
1464 # servers, http servers).
1463 # servers, http servers).
1465
1464
1466 if remote.capable('unbundle'):
1465 if remote.capable('unbundle'):
1467 return self.push_unbundle(remote, force, revs)
1466 return self.push_unbundle(remote, force, revs)
1468 return self.push_addchangegroup(remote, force, revs)
1467 return self.push_addchangegroup(remote, force, revs)
1469
1468
1470 def prepush(self, remote, force, revs):
1469 def prepush(self, remote, force, revs):
1471 common = {}
1470 common = {}
1472 remote_heads = remote.heads()
1471 remote_heads = remote.heads()
1473 inc = self.findincoming(remote, common, remote_heads, force=force)
1472 inc = self.findincoming(remote, common, remote_heads, force=force)
1474
1473
1475 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1474 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1476 if revs is not None:
1475 if revs is not None:
1477 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1476 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1478 else:
1477 else:
1479 bases, heads = update, self.changelog.heads()
1478 bases, heads = update, self.changelog.heads()
1480
1479
1481 def checkbranch(lheads, rheads, updatelh):
1480 def checkbranch(lheads, rheads, updatelh):
1482 '''
1481 '''
1483 check whether there are more local heads than remote heads on
1482 check whether there are more local heads than remote heads on
1484 a specific branch.
1483 a specific branch.
1485
1484
1486 lheads: local branch heads
1485 lheads: local branch heads
1487 rheads: remote branch heads
1486 rheads: remote branch heads
1488 updatelh: outgoing local branch heads
1487 updatelh: outgoing local branch heads
1489 '''
1488 '''
1490
1489
1491 warn = 0
1490 warn = 0
1492
1491
1493 if not revs and len(lheads) > len(rheads):
1492 if not revs and len(lheads) > len(rheads):
1494 warn = 1
1493 warn = 1
1495 else:
1494 else:
1496 updatelheads = [self.changelog.heads(x, lheads)
1495 updatelheads = [self.changelog.heads(x, lheads)
1497 for x in updatelh]
1496 for x in updatelh]
1498 newheads = set(sum(updatelheads, [])) & set(lheads)
1497 newheads = set(sum(updatelheads, [])) & set(lheads)
1499
1498
1500 if not newheads:
1499 if not newheads:
1501 return True
1500 return True
1502
1501
1503 for r in rheads:
1502 for r in rheads:
1504 if r in self.changelog.nodemap:
1503 if r in self.changelog.nodemap:
1505 desc = self.changelog.heads(r, heads)
1504 desc = self.changelog.heads(r, heads)
1506 l = [h for h in heads if h in desc]
1505 l = [h for h in heads if h in desc]
1507 if not l:
1506 if not l:
1508 newheads.add(r)
1507 newheads.add(r)
1509 else:
1508 else:
1510 newheads.add(r)
1509 newheads.add(r)
1511 if len(newheads) > len(rheads):
1510 if len(newheads) > len(rheads):
1512 warn = 1
1511 warn = 1
1513
1512
1514 if warn:
1513 if warn:
1515 if not rheads: # new branch requires --force
1514 if not rheads: # new branch requires --force
1516 self.ui.warn(_("abort: push creates new"
1515 self.ui.warn(_("abort: push creates new"
1517 " remote branch '%s'!\n" %
1516 " remote branch '%s'!\n" %
1518 self[updatelh[0]].branch()))
1517 self[updatelh[0]].branch()))
1519 else:
1518 else:
1520 self.ui.warn(_("abort: push creates new remote heads!\n"))
1519 self.ui.warn(_("abort: push creates new remote heads!\n"))
1521
1520
1522 self.ui.status(_("(did you forget to merge?"
1521 self.ui.status(_("(did you forget to merge?"
1523 " use push -f to force)\n"))
1522 " use push -f to force)\n"))
1524 return False
1523 return False
1525 return True
1524 return True
1526
1525
1527 if not bases:
1526 if not bases:
1528 self.ui.status(_("no changes found\n"))
1527 self.ui.status(_("no changes found\n"))
1529 return None, 1
1528 return None, 1
1530 elif not force:
1529 elif not force:
1531 # Check for each named branch if we're creating new remote heads.
1530 # Check for each named branch if we're creating new remote heads.
1532 # To be a remote head after push, node must be either:
1531 # To be a remote head after push, node must be either:
1533 # - unknown locally
1532 # - unknown locally
1534 # - a local outgoing head descended from update
1533 # - a local outgoing head descended from update
1535 # - a remote head that's known locally and not
1534 # - a remote head that's known locally and not
1536 # ancestral to an outgoing head
1535 # ancestral to an outgoing head
1537 #
1536 #
1538 # New named branches cannot be created without --force.
1537 # New named branches cannot be created without --force.
1539
1538
1540 if remote_heads != [nullid]:
1539 if remote_heads != [nullid]:
1541 if remote.capable('branchmap'):
1540 if remote.capable('branchmap'):
1542 localhds = {}
1541 localhds = {}
1543 if not revs:
1542 if not revs:
1544 localhds = self.branchmap()
1543 localhds = self.branchmap()
1545 else:
1544 else:
1546 for n in heads:
1545 for n in heads:
1547 branch = self[n].branch()
1546 branch = self[n].branch()
1548 if branch in localhds:
1547 if branch in localhds:
1549 localhds[branch].append(n)
1548 localhds[branch].append(n)
1550 else:
1549 else:
1551 localhds[branch] = [n]
1550 localhds[branch] = [n]
1552
1551
1553 remotehds = remote.branchmap()
1552 remotehds = remote.branchmap()
1554
1553
1555 for lh in localhds:
1554 for lh in localhds:
1556 if lh in remotehds:
1555 if lh in remotehds:
1557 rheads = remotehds[lh]
1556 rheads = remotehds[lh]
1558 else:
1557 else:
1559 rheads = []
1558 rheads = []
1560 lheads = localhds[lh]
1559 lheads = localhds[lh]
1561 updatelh = [upd for upd in update
1560 updatelh = [upd for upd in update
1562 if self[upd].branch() == lh]
1561 if self[upd].branch() == lh]
1563 if not updatelh:
1562 if not updatelh:
1564 continue
1563 continue
1565 if not checkbranch(lheads, rheads, updatelh):
1564 if not checkbranch(lheads, rheads, updatelh):
1566 return None, 0
1565 return None, 0
1567 else:
1566 else:
1568 if not checkbranch(heads, remote_heads, update):
1567 if not checkbranch(heads, remote_heads, update):
1569 return None, 0
1568 return None, 0
1570
1569
1571 if inc:
1570 if inc:
1572 self.ui.warn(_("note: unsynced remote changes!\n"))
1571 self.ui.warn(_("note: unsynced remote changes!\n"))
1573
1572
1574
1573
1575 if revs is None:
1574 if revs is None:
1576 # use the fast path, no race possible on push
1575 # use the fast path, no race possible on push
1577 cg = self._changegroup(common.keys(), 'push')
1576 cg = self._changegroup(common.keys(), 'push')
1578 else:
1577 else:
1579 cg = self.changegroupsubset(update, revs, 'push')
1578 cg = self.changegroupsubset(update, revs, 'push')
1580 return cg, remote_heads
1579 return cg, remote_heads
1581
1580
1582 def push_addchangegroup(self, remote, force, revs):
1581 def push_addchangegroup(self, remote, force, revs):
1583 lock = remote.lock()
1582 lock = remote.lock()
1584 try:
1583 try:
1585 ret = self.prepush(remote, force, revs)
1584 ret = self.prepush(remote, force, revs)
1586 if ret[0] is not None:
1585 if ret[0] is not None:
1587 cg, remote_heads = ret
1586 cg, remote_heads = ret
1588 return remote.addchangegroup(cg, 'push', self.url())
1587 return remote.addchangegroup(cg, 'push', self.url())
1589 return ret[1]
1588 return ret[1]
1590 finally:
1589 finally:
1591 lock.release()
1590 lock.release()
1592
1591
1593 def push_unbundle(self, remote, force, revs):
1592 def push_unbundle(self, remote, force, revs):
1594 # local repo finds heads on server, finds out what revs it
1593 # local repo finds heads on server, finds out what revs it
1595 # must push. once revs transferred, if server finds it has
1594 # must push. once revs transferred, if server finds it has
1596 # different heads (someone else won commit/push race), server
1595 # different heads (someone else won commit/push race), server
1597 # aborts.
1596 # aborts.
1598
1597
1599 ret = self.prepush(remote, force, revs)
1598 ret = self.prepush(remote, force, revs)
1600 if ret[0] is not None:
1599 if ret[0] is not None:
1601 cg, remote_heads = ret
1600 cg, remote_heads = ret
1602 if force: remote_heads = ['force']
1601 if force: remote_heads = ['force']
1603 return remote.unbundle(cg, remote_heads, 'push')
1602 return remote.unbundle(cg, remote_heads, 'push')
1604 return ret[1]
1603 return ret[1]
1605
1604
1606 def changegroupinfo(self, nodes, source):
1605 def changegroupinfo(self, nodes, source):
1607 if self.ui.verbose or source == 'bundle':
1606 if self.ui.verbose or source == 'bundle':
1608 self.ui.status(_("%d changesets found\n") % len(nodes))
1607 self.ui.status(_("%d changesets found\n") % len(nodes))
1609 if self.ui.debugflag:
1608 if self.ui.debugflag:
1610 self.ui.debug(_("list of changesets:\n"))
1609 self.ui.debug(_("list of changesets:\n"))
1611 for node in nodes:
1610 for node in nodes:
1612 self.ui.debug("%s\n" % hex(node))
1611 self.ui.debug("%s\n" % hex(node))
1613
1612
1614 def changegroupsubset(self, bases, heads, source, extranodes=None):
1613 def changegroupsubset(self, bases, heads, source, extranodes=None):
1615 """This function generates a changegroup consisting of all the nodes
1614 """This function generates a changegroup consisting of all the nodes
1616 that are descendents of any of the bases, and ancestors of any of
1615 that are descendents of any of the bases, and ancestors of any of
1617 the heads.
1616 the heads.
1618
1617
1619 It is fairly complex as determining which filenodes and which
1618 It is fairly complex as determining which filenodes and which
1620 manifest nodes need to be included for the changeset to be complete
1619 manifest nodes need to be included for the changeset to be complete
1621 is non-trivial.
1620 is non-trivial.
1622
1621
1623 Another wrinkle is doing the reverse, figuring out which changeset in
1622 Another wrinkle is doing the reverse, figuring out which changeset in
1624 the changegroup a particular filenode or manifestnode belongs to.
1623 the changegroup a particular filenode or manifestnode belongs to.
1625
1624
1626 The caller can specify some nodes that must be included in the
1625 The caller can specify some nodes that must be included in the
1627 changegroup using the extranodes argument. It should be a dict
1626 changegroup using the extranodes argument. It should be a dict
1628 where the keys are the filenames (or 1 for the manifest), and the
1627 where the keys are the filenames (or 1 for the manifest), and the
1629 values are lists of (node, linknode) tuples, where node is a wanted
1628 values are lists of (node, linknode) tuples, where node is a wanted
1630 node and linknode is the changelog node that should be transmitted as
1629 node and linknode is the changelog node that should be transmitted as
1631 the linkrev.
1630 the linkrev.
1632 """
1631 """
1633
1632
1634 if extranodes is None:
1633 if extranodes is None:
1635 # can we go through the fast path ?
1634 # can we go through the fast path ?
1636 heads.sort()
1635 heads.sort()
1637 allheads = self.heads()
1636 allheads = self.heads()
1638 allheads.sort()
1637 allheads.sort()
1639 if heads == allheads:
1638 if heads == allheads:
1640 common = []
1639 common = []
1641 # parents of bases are known from both sides
1640 # parents of bases are known from both sides
1642 for n in bases:
1641 for n in bases:
1643 for p in self.changelog.parents(n):
1642 for p in self.changelog.parents(n):
1644 if p != nullid:
1643 if p != nullid:
1645 common.append(p)
1644 common.append(p)
1646 return self._changegroup(common, source)
1645 return self._changegroup(common, source)
1647
1646
1648 self.hook('preoutgoing', throw=True, source=source)
1647 self.hook('preoutgoing', throw=True, source=source)
1649
1648
1650 # Set up some initial variables
1649 # Set up some initial variables
1651 # Make it easy to refer to self.changelog
1650 # Make it easy to refer to self.changelog
1652 cl = self.changelog
1651 cl = self.changelog
1653 # msng is short for missing - compute the list of changesets in this
1652 # msng is short for missing - compute the list of changesets in this
1654 # changegroup.
1653 # changegroup.
1655 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1654 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1656 self.changegroupinfo(msng_cl_lst, source)
1655 self.changegroupinfo(msng_cl_lst, source)
1657 # Some bases may turn out to be superfluous, and some heads may be
1656 # Some bases may turn out to be superfluous, and some heads may be
1658 # too. nodesbetween will return the minimal set of bases and heads
1657 # too. nodesbetween will return the minimal set of bases and heads
1659 # necessary to re-create the changegroup.
1658 # necessary to re-create the changegroup.
1660
1659
1661 # Known heads are the list of heads that it is assumed the recipient
1660 # Known heads are the list of heads that it is assumed the recipient
1662 # of this changegroup will know about.
1661 # of this changegroup will know about.
1663 knownheads = set()
1662 knownheads = set()
1664 # We assume that all parents of bases are known heads.
1663 # We assume that all parents of bases are known heads.
1665 for n in bases:
1664 for n in bases:
1666 knownheads.update(cl.parents(n))
1665 knownheads.update(cl.parents(n))
1667 knownheads.discard(nullid)
1666 knownheads.discard(nullid)
1668 knownheads = list(knownheads)
1667 knownheads = list(knownheads)
1669 if knownheads:
1668 if knownheads:
1670 # Now that we know what heads are known, we can compute which
1669 # Now that we know what heads are known, we can compute which
1671 # changesets are known. The recipient must know about all
1670 # changesets are known. The recipient must know about all
1672 # changesets required to reach the known heads from the null
1671 # changesets required to reach the known heads from the null
1673 # changeset.
1672 # changeset.
1674 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1673 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1675 junk = None
1674 junk = None
1676 # Transform the list into a set.
1675 # Transform the list into a set.
1677 has_cl_set = set(has_cl_set)
1676 has_cl_set = set(has_cl_set)
1678 else:
1677 else:
1679 # If there were no known heads, the recipient cannot be assumed to
1678 # If there were no known heads, the recipient cannot be assumed to
1680 # know about any changesets.
1679 # know about any changesets.
1681 has_cl_set = set()
1680 has_cl_set = set()
1682
1681
1683 # Make it easy to refer to self.manifest
1682 # Make it easy to refer to self.manifest
1684 mnfst = self.manifest
1683 mnfst = self.manifest
1685 # We don't know which manifests are missing yet
1684 # We don't know which manifests are missing yet
1686 msng_mnfst_set = {}
1685 msng_mnfst_set = {}
1687 # Nor do we know which filenodes are missing.
1686 # Nor do we know which filenodes are missing.
1688 msng_filenode_set = {}
1687 msng_filenode_set = {}
1689
1688
1690 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1689 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1691 junk = None
1690 junk = None
1692
1691
1693 # A changeset always belongs to itself, so the changenode lookup
1692 # A changeset always belongs to itself, so the changenode lookup
1694 # function for a changenode is identity.
1693 # function for a changenode is identity.
1695 def identity(x):
1694 def identity(x):
1696 return x
1695 return x
1697
1696
1698 # A function generating function. Sets up an environment for the
1697 # A function generating function. Sets up an environment for the
1699 # inner function.
1698 # inner function.
1700 def cmp_by_rev_func(revlog):
1699 def cmp_by_rev_func(revlog):
1701 # Compare two nodes by their revision number in the environment's
1700 # Compare two nodes by their revision number in the environment's
1702 # revision history. Since the revision number both represents the
1701 # revision history. Since the revision number both represents the
1703 # most efficient order to read the nodes in, and represents a
1702 # most efficient order to read the nodes in, and represents a
1704 # topological sorting of the nodes, this function is often useful.
1703 # topological sorting of the nodes, this function is often useful.
1705 def cmp_by_rev(a, b):
1704 def cmp_by_rev(a, b):
1706 return cmp(revlog.rev(a), revlog.rev(b))
1705 return cmp(revlog.rev(a), revlog.rev(b))
1707 return cmp_by_rev
1706 return cmp_by_rev
1708
1707
1709 # If we determine that a particular file or manifest node must be a
1708 # If we determine that a particular file or manifest node must be a
1710 # node that the recipient of the changegroup will already have, we can
1709 # node that the recipient of the changegroup will already have, we can
1711 # also assume the recipient will have all the parents. This function
1710 # also assume the recipient will have all the parents. This function
1712 # prunes them from the set of missing nodes.
1711 # prunes them from the set of missing nodes.
1713 def prune_parents(revlog, hasset, msngset):
1712 def prune_parents(revlog, hasset, msngset):
1714 haslst = list(hasset)
1713 haslst = list(hasset)
1715 haslst.sort(cmp_by_rev_func(revlog))
1714 haslst.sort(cmp_by_rev_func(revlog))
1716 for node in haslst:
1715 for node in haslst:
1717 parentlst = [p for p in revlog.parents(node) if p != nullid]
1716 parentlst = [p for p in revlog.parents(node) if p != nullid]
1718 while parentlst:
1717 while parentlst:
1719 n = parentlst.pop()
1718 n = parentlst.pop()
1720 if n not in hasset:
1719 if n not in hasset:
1721 hasset.add(n)
1720 hasset.add(n)
1722 p = [p for p in revlog.parents(n) if p != nullid]
1721 p = [p for p in revlog.parents(n) if p != nullid]
1723 parentlst.extend(p)
1722 parentlst.extend(p)
1724 for n in hasset:
1723 for n in hasset:
1725 msngset.pop(n, None)
1724 msngset.pop(n, None)
1726
1725
1727 # This is a function generating function used to set up an environment
1726 # This is a function generating function used to set up an environment
1728 # for the inner function to execute in.
1727 # for the inner function to execute in.
1729 def manifest_and_file_collector(changedfileset):
1728 def manifest_and_file_collector(changedfileset):
1730 # This is an information gathering function that gathers
1729 # This is an information gathering function that gathers
1731 # information from each changeset node that goes out as part of
1730 # information from each changeset node that goes out as part of
1732 # the changegroup. The information gathered is a list of which
1731 # the changegroup. The information gathered is a list of which
1733 # manifest nodes are potentially required (the recipient may
1732 # manifest nodes are potentially required (the recipient may
1734 # already have them) and total list of all files which were
1733 # already have them) and total list of all files which were
1735 # changed in any changeset in the changegroup.
1734 # changed in any changeset in the changegroup.
1736 #
1735 #
1737 # We also remember the first changenode we saw any manifest
1736 # We also remember the first changenode we saw any manifest
1738 # referenced by so we can later determine which changenode 'owns'
1737 # referenced by so we can later determine which changenode 'owns'
1739 # the manifest.
1738 # the manifest.
1740 def collect_manifests_and_files(clnode):
1739 def collect_manifests_and_files(clnode):
1741 c = cl.read(clnode)
1740 c = cl.read(clnode)
1742 for f in c[3]:
1741 for f in c[3]:
1743 # This is to make sure we only have one instance of each
1742 # This is to make sure we only have one instance of each
1744 # filename string for each filename.
1743 # filename string for each filename.
1745 changedfileset.setdefault(f, f)
1744 changedfileset.setdefault(f, f)
1746 msng_mnfst_set.setdefault(c[0], clnode)
1745 msng_mnfst_set.setdefault(c[0], clnode)
1747 return collect_manifests_and_files
1746 return collect_manifests_and_files
1748
1747
1749 # Figure out which manifest nodes (of the ones we think might be part
1748 # Figure out which manifest nodes (of the ones we think might be part
1750 # of the changegroup) the recipient must know about and remove them
1749 # of the changegroup) the recipient must know about and remove them
1751 # from the changegroup.
1750 # from the changegroup.
1752 def prune_manifests():
1751 def prune_manifests():
1753 has_mnfst_set = set()
1752 has_mnfst_set = set()
1754 for n in msng_mnfst_set:
1753 for n in msng_mnfst_set:
1755 # If a 'missing' manifest thinks it belongs to a changenode
1754 # If a 'missing' manifest thinks it belongs to a changenode
1756 # the recipient is assumed to have, obviously the recipient
1755 # the recipient is assumed to have, obviously the recipient
1757 # must have that manifest.
1756 # must have that manifest.
1758 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1757 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1759 if linknode in has_cl_set:
1758 if linknode in has_cl_set:
1760 has_mnfst_set.add(n)
1759 has_mnfst_set.add(n)
1761 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1760 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1762
1761
1763 # Use the information collected in collect_manifests_and_files to say
1762 # Use the information collected in collect_manifests_and_files to say
1764 # which changenode any manifestnode belongs to.
1763 # which changenode any manifestnode belongs to.
1765 def lookup_manifest_link(mnfstnode):
1764 def lookup_manifest_link(mnfstnode):
1766 return msng_mnfst_set[mnfstnode]
1765 return msng_mnfst_set[mnfstnode]
1767
1766
1768 # A function generating function that sets up the initial environment
1767 # A function generating function that sets up the initial environment
1769 # the inner function.
1768 # the inner function.
1770 def filenode_collector(changedfiles):
1769 def filenode_collector(changedfiles):
1771 next_rev = [0]
1770 next_rev = [0]
1772 # This gathers information from each manifestnode included in the
1771 # This gathers information from each manifestnode included in the
1773 # changegroup about which filenodes the manifest node references
1772 # changegroup about which filenodes the manifest node references
1774 # so we can include those in the changegroup too.
1773 # so we can include those in the changegroup too.
1775 #
1774 #
1776 # It also remembers which changenode each filenode belongs to. It
1775 # It also remembers which changenode each filenode belongs to. It
1777 # does this by assuming the a filenode belongs to the changenode
1776 # does this by assuming the a filenode belongs to the changenode
1778 # the first manifest that references it belongs to.
1777 # the first manifest that references it belongs to.
1779 def collect_msng_filenodes(mnfstnode):
1778 def collect_msng_filenodes(mnfstnode):
1780 r = mnfst.rev(mnfstnode)
1779 r = mnfst.rev(mnfstnode)
1781 if r == next_rev[0]:
1780 if r == next_rev[0]:
1782 # If the last rev we looked at was the one just previous,
1781 # If the last rev we looked at was the one just previous,
1783 # we only need to see a diff.
1782 # we only need to see a diff.
1784 deltamf = mnfst.readdelta(mnfstnode)
1783 deltamf = mnfst.readdelta(mnfstnode)
1785 # For each line in the delta
1784 # For each line in the delta
1786 for f, fnode in deltamf.iteritems():
1785 for f, fnode in deltamf.iteritems():
1787 f = changedfiles.get(f, None)
1786 f = changedfiles.get(f, None)
1788 # And if the file is in the list of files we care
1787 # And if the file is in the list of files we care
1789 # about.
1788 # about.
1790 if f is not None:
1789 if f is not None:
1791 # Get the changenode this manifest belongs to
1790 # Get the changenode this manifest belongs to
1792 clnode = msng_mnfst_set[mnfstnode]
1791 clnode = msng_mnfst_set[mnfstnode]
1793 # Create the set of filenodes for the file if
1792 # Create the set of filenodes for the file if
1794 # there isn't one already.
1793 # there isn't one already.
1795 ndset = msng_filenode_set.setdefault(f, {})
1794 ndset = msng_filenode_set.setdefault(f, {})
1796 # And set the filenode's changelog node to the
1795 # And set the filenode's changelog node to the
1797 # manifest's if it hasn't been set already.
1796 # manifest's if it hasn't been set already.
1798 ndset.setdefault(fnode, clnode)
1797 ndset.setdefault(fnode, clnode)
1799 else:
1798 else:
1800 # Otherwise we need a full manifest.
1799 # Otherwise we need a full manifest.
1801 m = mnfst.read(mnfstnode)
1800 m = mnfst.read(mnfstnode)
1802 # For every file in we care about.
1801 # For every file in we care about.
1803 for f in changedfiles:
1802 for f in changedfiles:
1804 fnode = m.get(f, None)
1803 fnode = m.get(f, None)
1805 # If it's in the manifest
1804 # If it's in the manifest
1806 if fnode is not None:
1805 if fnode is not None:
1807 # See comments above.
1806 # See comments above.
1808 clnode = msng_mnfst_set[mnfstnode]
1807 clnode = msng_mnfst_set[mnfstnode]
1809 ndset = msng_filenode_set.setdefault(f, {})
1808 ndset = msng_filenode_set.setdefault(f, {})
1810 ndset.setdefault(fnode, clnode)
1809 ndset.setdefault(fnode, clnode)
1811 # Remember the revision we hope to see next.
1810 # Remember the revision we hope to see next.
1812 next_rev[0] = r + 1
1811 next_rev[0] = r + 1
1813 return collect_msng_filenodes
1812 return collect_msng_filenodes
1814
1813
1815 # We have a list of filenodes we think we need for a file, lets remove
1814 # We have a list of filenodes we think we need for a file, lets remove
1816 # all those we know the recipient must have.
1815 # all those we know the recipient must have.
1817 def prune_filenodes(f, filerevlog):
1816 def prune_filenodes(f, filerevlog):
1818 msngset = msng_filenode_set[f]
1817 msngset = msng_filenode_set[f]
1819 hasset = set()
1818 hasset = set()
1820 # If a 'missing' filenode thinks it belongs to a changenode we
1819 # If a 'missing' filenode thinks it belongs to a changenode we
1821 # assume the recipient must have, then the recipient must have
1820 # assume the recipient must have, then the recipient must have
1822 # that filenode.
1821 # that filenode.
1823 for n in msngset:
1822 for n in msngset:
1824 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1823 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1825 if clnode in has_cl_set:
1824 if clnode in has_cl_set:
1826 hasset.add(n)
1825 hasset.add(n)
1827 prune_parents(filerevlog, hasset, msngset)
1826 prune_parents(filerevlog, hasset, msngset)
1828
1827
1829 # A function generator function that sets up the a context for the
1828 # A function generator function that sets up the a context for the
1830 # inner function.
1829 # inner function.
1831 def lookup_filenode_link_func(fname):
1830 def lookup_filenode_link_func(fname):
1832 msngset = msng_filenode_set[fname]
1831 msngset = msng_filenode_set[fname]
1833 # Lookup the changenode the filenode belongs to.
1832 # Lookup the changenode the filenode belongs to.
1834 def lookup_filenode_link(fnode):
1833 def lookup_filenode_link(fnode):
1835 return msngset[fnode]
1834 return msngset[fnode]
1836 return lookup_filenode_link
1835 return lookup_filenode_link
1837
1836
1838 # Add the nodes that were explicitly requested.
1837 # Add the nodes that were explicitly requested.
1839 def add_extra_nodes(name, nodes):
1838 def add_extra_nodes(name, nodes):
1840 if not extranodes or name not in extranodes:
1839 if not extranodes or name not in extranodes:
1841 return
1840 return
1842
1841
1843 for node, linknode in extranodes[name]:
1842 for node, linknode in extranodes[name]:
1844 if node not in nodes:
1843 if node not in nodes:
1845 nodes[node] = linknode
1844 nodes[node] = linknode
1846
1845
1847 # Now that we have all theses utility functions to help out and
1846 # Now that we have all theses utility functions to help out and
1848 # logically divide up the task, generate the group.
1847 # logically divide up the task, generate the group.
1849 def gengroup():
1848 def gengroup():
1850 # The set of changed files starts empty.
1849 # The set of changed files starts empty.
1851 changedfiles = {}
1850 changedfiles = {}
1852 # Create a changenode group generator that will call our functions
1851 # Create a changenode group generator that will call our functions
1853 # back to lookup the owning changenode and collect information.
1852 # back to lookup the owning changenode and collect information.
1854 group = cl.group(msng_cl_lst, identity,
1853 group = cl.group(msng_cl_lst, identity,
1855 manifest_and_file_collector(changedfiles))
1854 manifest_and_file_collector(changedfiles))
1856 for chnk in group:
1855 for chnk in group:
1857 yield chnk
1856 yield chnk
1858
1857
1859 # The list of manifests has been collected by the generator
1858 # The list of manifests has been collected by the generator
1860 # calling our functions back.
1859 # calling our functions back.
1861 prune_manifests()
1860 prune_manifests()
1862 add_extra_nodes(1, msng_mnfst_set)
1861 add_extra_nodes(1, msng_mnfst_set)
1863 msng_mnfst_lst = msng_mnfst_set.keys()
1862 msng_mnfst_lst = msng_mnfst_set.keys()
1864 # Sort the manifestnodes by revision number.
1863 # Sort the manifestnodes by revision number.
1865 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1864 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1866 # Create a generator for the manifestnodes that calls our lookup
1865 # Create a generator for the manifestnodes that calls our lookup
1867 # and data collection functions back.
1866 # and data collection functions back.
1868 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1867 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1869 filenode_collector(changedfiles))
1868 filenode_collector(changedfiles))
1870 for chnk in group:
1869 for chnk in group:
1871 yield chnk
1870 yield chnk
1872
1871
1873 # These are no longer needed, dereference and toss the memory for
1872 # These are no longer needed, dereference and toss the memory for
1874 # them.
1873 # them.
1875 msng_mnfst_lst = None
1874 msng_mnfst_lst = None
1876 msng_mnfst_set.clear()
1875 msng_mnfst_set.clear()
1877
1876
1878 if extranodes:
1877 if extranodes:
1879 for fname in extranodes:
1878 for fname in extranodes:
1880 if isinstance(fname, int):
1879 if isinstance(fname, int):
1881 continue
1880 continue
1882 msng_filenode_set.setdefault(fname, {})
1881 msng_filenode_set.setdefault(fname, {})
1883 changedfiles[fname] = 1
1882 changedfiles[fname] = 1
1884 # Go through all our files in order sorted by name.
1883 # Go through all our files in order sorted by name.
1885 for fname in sorted(changedfiles):
1884 for fname in sorted(changedfiles):
1886 filerevlog = self.file(fname)
1885 filerevlog = self.file(fname)
1887 if not len(filerevlog):
1886 if not len(filerevlog):
1888 raise util.Abort(_("empty or missing revlog for %s") % fname)
1887 raise util.Abort(_("empty or missing revlog for %s") % fname)
1889 # Toss out the filenodes that the recipient isn't really
1888 # Toss out the filenodes that the recipient isn't really
1890 # missing.
1889 # missing.
1891 if fname in msng_filenode_set:
1890 if fname in msng_filenode_set:
1892 prune_filenodes(fname, filerevlog)
1891 prune_filenodes(fname, filerevlog)
1893 add_extra_nodes(fname, msng_filenode_set[fname])
1892 add_extra_nodes(fname, msng_filenode_set[fname])
1894 msng_filenode_lst = msng_filenode_set[fname].keys()
1893 msng_filenode_lst = msng_filenode_set[fname].keys()
1895 else:
1894 else:
1896 msng_filenode_lst = []
1895 msng_filenode_lst = []
1897 # If any filenodes are left, generate the group for them,
1896 # If any filenodes are left, generate the group for them,
1898 # otherwise don't bother.
1897 # otherwise don't bother.
1899 if len(msng_filenode_lst) > 0:
1898 if len(msng_filenode_lst) > 0:
1900 yield changegroup.chunkheader(len(fname))
1899 yield changegroup.chunkheader(len(fname))
1901 yield fname
1900 yield fname
1902 # Sort the filenodes by their revision #
1901 # Sort the filenodes by their revision #
1903 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1902 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1904 # Create a group generator and only pass in a changenode
1903 # Create a group generator and only pass in a changenode
1905 # lookup function as we need to collect no information
1904 # lookup function as we need to collect no information
1906 # from filenodes.
1905 # from filenodes.
1907 group = filerevlog.group(msng_filenode_lst,
1906 group = filerevlog.group(msng_filenode_lst,
1908 lookup_filenode_link_func(fname))
1907 lookup_filenode_link_func(fname))
1909 for chnk in group:
1908 for chnk in group:
1910 yield chnk
1909 yield chnk
1911 if fname in msng_filenode_set:
1910 if fname in msng_filenode_set:
1912 # Don't need this anymore, toss it to free memory.
1911 # Don't need this anymore, toss it to free memory.
1913 del msng_filenode_set[fname]
1912 del msng_filenode_set[fname]
1914 # Signal that no more groups are left.
1913 # Signal that no more groups are left.
1915 yield changegroup.closechunk()
1914 yield changegroup.closechunk()
1916
1915
1917 if msng_cl_lst:
1916 if msng_cl_lst:
1918 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1917 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1919
1918
1920 return util.chunkbuffer(gengroup())
1919 return util.chunkbuffer(gengroup())
1921
1920
1922 def changegroup(self, basenodes, source):
1921 def changegroup(self, basenodes, source):
1923 # to avoid a race we use changegroupsubset() (issue1320)
1922 # to avoid a race we use changegroupsubset() (issue1320)
1924 return self.changegroupsubset(basenodes, self.heads(), source)
1923 return self.changegroupsubset(basenodes, self.heads(), source)
1925
1924
1926 def _changegroup(self, common, source):
1925 def _changegroup(self, common, source):
1927 """Generate a changegroup of all nodes that we have that a recipient
1926 """Generate a changegroup of all nodes that we have that a recipient
1928 doesn't.
1927 doesn't.
1929
1928
1930 This is much easier than the previous function as we can assume that
1929 This is much easier than the previous function as we can assume that
1931 the recipient has any changenode we aren't sending them.
1930 the recipient has any changenode we aren't sending them.
1932
1931
1933 common is the set of common nodes between remote and self"""
1932 common is the set of common nodes between remote and self"""
1934
1933
1935 self.hook('preoutgoing', throw=True, source=source)
1934 self.hook('preoutgoing', throw=True, source=source)
1936
1935
1937 cl = self.changelog
1936 cl = self.changelog
1938 nodes = cl.findmissing(common)
1937 nodes = cl.findmissing(common)
1939 revset = set([cl.rev(n) for n in nodes])
1938 revset = set([cl.rev(n) for n in nodes])
1940 self.changegroupinfo(nodes, source)
1939 self.changegroupinfo(nodes, source)
1941
1940
1942 def identity(x):
1941 def identity(x):
1943 return x
1942 return x
1944
1943
1945 def gennodelst(log):
1944 def gennodelst(log):
1946 for r in log:
1945 for r in log:
1947 if log.linkrev(r) in revset:
1946 if log.linkrev(r) in revset:
1948 yield log.node(r)
1947 yield log.node(r)
1949
1948
1950 def changed_file_collector(changedfileset):
1949 def changed_file_collector(changedfileset):
1951 def collect_changed_files(clnode):
1950 def collect_changed_files(clnode):
1952 c = cl.read(clnode)
1951 c = cl.read(clnode)
1953 changedfileset.update(c[3])
1952 changedfileset.update(c[3])
1954 return collect_changed_files
1953 return collect_changed_files
1955
1954
1956 def lookuprevlink_func(revlog):
1955 def lookuprevlink_func(revlog):
1957 def lookuprevlink(n):
1956 def lookuprevlink(n):
1958 return cl.node(revlog.linkrev(revlog.rev(n)))
1957 return cl.node(revlog.linkrev(revlog.rev(n)))
1959 return lookuprevlink
1958 return lookuprevlink
1960
1959
1961 def gengroup():
1960 def gengroup():
1962 # construct a list of all changed files
1961 # construct a list of all changed files
1963 changedfiles = set()
1962 changedfiles = set()
1964
1963
1965 for chnk in cl.group(nodes, identity,
1964 for chnk in cl.group(nodes, identity,
1966 changed_file_collector(changedfiles)):
1965 changed_file_collector(changedfiles)):
1967 yield chnk
1966 yield chnk
1968
1967
1969 mnfst = self.manifest
1968 mnfst = self.manifest
1970 nodeiter = gennodelst(mnfst)
1969 nodeiter = gennodelst(mnfst)
1971 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1970 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1972 yield chnk
1971 yield chnk
1973
1972
1974 for fname in sorted(changedfiles):
1973 for fname in sorted(changedfiles):
1975 filerevlog = self.file(fname)
1974 filerevlog = self.file(fname)
1976 if not len(filerevlog):
1975 if not len(filerevlog):
1977 raise util.Abort(_("empty or missing revlog for %s") % fname)
1976 raise util.Abort(_("empty or missing revlog for %s") % fname)
1978 nodeiter = gennodelst(filerevlog)
1977 nodeiter = gennodelst(filerevlog)
1979 nodeiter = list(nodeiter)
1978 nodeiter = list(nodeiter)
1980 if nodeiter:
1979 if nodeiter:
1981 yield changegroup.chunkheader(len(fname))
1980 yield changegroup.chunkheader(len(fname))
1982 yield fname
1981 yield fname
1983 lookup = lookuprevlink_func(filerevlog)
1982 lookup = lookuprevlink_func(filerevlog)
1984 for chnk in filerevlog.group(nodeiter, lookup):
1983 for chnk in filerevlog.group(nodeiter, lookup):
1985 yield chnk
1984 yield chnk
1986
1985
1987 yield changegroup.closechunk()
1986 yield changegroup.closechunk()
1988
1987
1989 if nodes:
1988 if nodes:
1990 self.hook('outgoing', node=hex(nodes[0]), source=source)
1989 self.hook('outgoing', node=hex(nodes[0]), source=source)
1991
1990
1992 return util.chunkbuffer(gengroup())
1991 return util.chunkbuffer(gengroup())
1993
1992
1994 def addchangegroup(self, source, srctype, url, emptyok=False):
1993 def addchangegroup(self, source, srctype, url, emptyok=False):
1995 """add changegroup to repo.
1994 """add changegroup to repo.
1996
1995
1997 return values:
1996 return values:
1998 - nothing changed or no source: 0
1997 - nothing changed or no source: 0
1999 - more heads than before: 1+added heads (2..n)
1998 - more heads than before: 1+added heads (2..n)
2000 - less heads than before: -1-removed heads (-2..-n)
1999 - less heads than before: -1-removed heads (-2..-n)
2001 - number of heads stays the same: 1
2000 - number of heads stays the same: 1
2002 """
2001 """
2003 def csmap(x):
2002 def csmap(x):
2004 self.ui.debug(_("add changeset %s\n") % short(x))
2003 self.ui.debug(_("add changeset %s\n") % short(x))
2005 return len(cl)
2004 return len(cl)
2006
2005
2007 def revmap(x):
2006 def revmap(x):
2008 return cl.rev(x)
2007 return cl.rev(x)
2009
2008
2010 if not source:
2009 if not source:
2011 return 0
2010 return 0
2012
2011
2013 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2012 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2014
2013
2015 changesets = files = revisions = 0
2014 changesets = files = revisions = 0
2016
2015
2017 # write changelog data to temp files so concurrent readers will not see
2016 # write changelog data to temp files so concurrent readers will not see
2018 # inconsistent view
2017 # inconsistent view
2019 cl = self.changelog
2018 cl = self.changelog
2020 cl.delayupdate()
2019 cl.delayupdate()
2021 oldheads = len(cl.heads())
2020 oldheads = len(cl.heads())
2022
2021
2023 tr = self.transaction()
2022 tr = self.transaction()
2024 try:
2023 try:
2025 trp = weakref.proxy(tr)
2024 trp = weakref.proxy(tr)
2026 # pull off the changeset group
2025 # pull off the changeset group
2027 self.ui.status(_("adding changesets\n"))
2026 self.ui.status(_("adding changesets\n"))
2028 clstart = len(cl)
2027 clstart = len(cl)
2029 chunkiter = changegroup.chunkiter(source)
2028 chunkiter = changegroup.chunkiter(source)
2030 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2029 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2031 raise util.Abort(_("received changelog group is empty"))
2030 raise util.Abort(_("received changelog group is empty"))
2032 clend = len(cl)
2031 clend = len(cl)
2033 changesets = clend - clstart
2032 changesets = clend - clstart
2034
2033
2035 # pull off the manifest group
2034 # pull off the manifest group
2036 self.ui.status(_("adding manifests\n"))
2035 self.ui.status(_("adding manifests\n"))
2037 chunkiter = changegroup.chunkiter(source)
2036 chunkiter = changegroup.chunkiter(source)
2038 # no need to check for empty manifest group here:
2037 # no need to check for empty manifest group here:
2039 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2038 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2040 # no new manifest will be created and the manifest group will
2039 # no new manifest will be created and the manifest group will
2041 # be empty during the pull
2040 # be empty during the pull
2042 self.manifest.addgroup(chunkiter, revmap, trp)
2041 self.manifest.addgroup(chunkiter, revmap, trp)
2043
2042
2044 # process the files
2043 # process the files
2045 self.ui.status(_("adding file changes\n"))
2044 self.ui.status(_("adding file changes\n"))
2046 while 1:
2045 while 1:
2047 f = changegroup.getchunk(source)
2046 f = changegroup.getchunk(source)
2048 if not f:
2047 if not f:
2049 break
2048 break
2050 self.ui.debug(_("adding %s revisions\n") % f)
2049 self.ui.debug(_("adding %s revisions\n") % f)
2051 fl = self.file(f)
2050 fl = self.file(f)
2052 o = len(fl)
2051 o = len(fl)
2053 chunkiter = changegroup.chunkiter(source)
2052 chunkiter = changegroup.chunkiter(source)
2054 if fl.addgroup(chunkiter, revmap, trp) is None:
2053 if fl.addgroup(chunkiter, revmap, trp) is None:
2055 raise util.Abort(_("received file revlog group is empty"))
2054 raise util.Abort(_("received file revlog group is empty"))
2056 revisions += len(fl) - o
2055 revisions += len(fl) - o
2057 files += 1
2056 files += 1
2058
2057
2059 newheads = len(cl.heads())
2058 newheads = len(cl.heads())
2060 heads = ""
2059 heads = ""
2061 if oldheads and newheads != oldheads:
2060 if oldheads and newheads != oldheads:
2062 heads = _(" (%+d heads)") % (newheads - oldheads)
2061 heads = _(" (%+d heads)") % (newheads - oldheads)
2063
2062
2064 self.ui.status(_("added %d changesets"
2063 self.ui.status(_("added %d changesets"
2065 " with %d changes to %d files%s\n")
2064 " with %d changes to %d files%s\n")
2066 % (changesets, revisions, files, heads))
2065 % (changesets, revisions, files, heads))
2067
2066
2068 if changesets > 0:
2067 if changesets > 0:
2069 p = lambda: cl.writepending() and self.root or ""
2068 p = lambda: cl.writepending() and self.root or ""
2070 self.hook('pretxnchangegroup', throw=True,
2069 self.hook('pretxnchangegroup', throw=True,
2071 node=hex(cl.node(clstart)), source=srctype,
2070 node=hex(cl.node(clstart)), source=srctype,
2072 url=url, pending=p)
2071 url=url, pending=p)
2073
2072
2074 # make changelog see real files again
2073 # make changelog see real files again
2075 cl.finalize(trp)
2074 cl.finalize(trp)
2076
2075
2077 tr.close()
2076 tr.close()
2078 finally:
2077 finally:
2079 del tr
2078 del tr
2080
2079
2081 if changesets > 0:
2080 if changesets > 0:
2082 # forcefully update the on-disk branch cache
2081 # forcefully update the on-disk branch cache
2083 self.ui.debug(_("updating the branch cache\n"))
2082 self.ui.debug(_("updating the branch cache\n"))
2084 self.branchtags()
2083 self.branchtags()
2085 self.hook("changegroup", node=hex(cl.node(clstart)),
2084 self.hook("changegroup", node=hex(cl.node(clstart)),
2086 source=srctype, url=url)
2085 source=srctype, url=url)
2087
2086
2088 for i in xrange(clstart, clend):
2087 for i in xrange(clstart, clend):
2089 self.hook("incoming", node=hex(cl.node(i)),
2088 self.hook("incoming", node=hex(cl.node(i)),
2090 source=srctype, url=url)
2089 source=srctype, url=url)
2091
2090
2092 # never return 0 here:
2091 # never return 0 here:
2093 if newheads < oldheads:
2092 if newheads < oldheads:
2094 return newheads - oldheads - 1
2093 return newheads - oldheads - 1
2095 else:
2094 else:
2096 return newheads - oldheads + 1
2095 return newheads - oldheads + 1
2097
2096
2098
2097
2099 def stream_in(self, remote):
2098 def stream_in(self, remote):
2100 fp = remote.stream_out()
2099 fp = remote.stream_out()
2101 l = fp.readline()
2100 l = fp.readline()
2102 try:
2101 try:
2103 resp = int(l)
2102 resp = int(l)
2104 except ValueError:
2103 except ValueError:
2105 raise error.ResponseError(
2104 raise error.ResponseError(
2106 _('Unexpected response from remote server:'), l)
2105 _('Unexpected response from remote server:'), l)
2107 if resp == 1:
2106 if resp == 1:
2108 raise util.Abort(_('operation forbidden by server'))
2107 raise util.Abort(_('operation forbidden by server'))
2109 elif resp == 2:
2108 elif resp == 2:
2110 raise util.Abort(_('locking the remote repository failed'))
2109 raise util.Abort(_('locking the remote repository failed'))
2111 elif resp != 0:
2110 elif resp != 0:
2112 raise util.Abort(_('the server sent an unknown error code'))
2111 raise util.Abort(_('the server sent an unknown error code'))
2113 self.ui.status(_('streaming all changes\n'))
2112 self.ui.status(_('streaming all changes\n'))
2114 l = fp.readline()
2113 l = fp.readline()
2115 try:
2114 try:
2116 total_files, total_bytes = map(int, l.split(' ', 1))
2115 total_files, total_bytes = map(int, l.split(' ', 1))
2117 except (ValueError, TypeError):
2116 except (ValueError, TypeError):
2118 raise error.ResponseError(
2117 raise error.ResponseError(
2119 _('Unexpected response from remote server:'), l)
2118 _('Unexpected response from remote server:'), l)
2120 self.ui.status(_('%d files to transfer, %s of data\n') %
2119 self.ui.status(_('%d files to transfer, %s of data\n') %
2121 (total_files, util.bytecount(total_bytes)))
2120 (total_files, util.bytecount(total_bytes)))
2122 start = time.time()
2121 start = time.time()
2123 for i in xrange(total_files):
2122 for i in xrange(total_files):
2124 # XXX doesn't support '\n' or '\r' in filenames
2123 # XXX doesn't support '\n' or '\r' in filenames
2125 l = fp.readline()
2124 l = fp.readline()
2126 try:
2125 try:
2127 name, size = l.split('\0', 1)
2126 name, size = l.split('\0', 1)
2128 size = int(size)
2127 size = int(size)
2129 except (ValueError, TypeError):
2128 except (ValueError, TypeError):
2130 raise error.ResponseError(
2129 raise error.ResponseError(
2131 _('Unexpected response from remote server:'), l)
2130 _('Unexpected response from remote server:'), l)
2132 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2131 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2133 # for backwards compat, name was partially encoded
2132 # for backwards compat, name was partially encoded
2134 ofp = self.sopener(store.decodedir(name), 'w')
2133 ofp = self.sopener(store.decodedir(name), 'w')
2135 for chunk in util.filechunkiter(fp, limit=size):
2134 for chunk in util.filechunkiter(fp, limit=size):
2136 ofp.write(chunk)
2135 ofp.write(chunk)
2137 ofp.close()
2136 ofp.close()
2138 elapsed = time.time() - start
2137 elapsed = time.time() - start
2139 if elapsed <= 0:
2138 if elapsed <= 0:
2140 elapsed = 0.001
2139 elapsed = 0.001
2141 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2140 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2142 (util.bytecount(total_bytes), elapsed,
2141 (util.bytecount(total_bytes), elapsed,
2143 util.bytecount(total_bytes / elapsed)))
2142 util.bytecount(total_bytes / elapsed)))
2144 self.invalidate()
2143 self.invalidate()
2145 return len(self.heads()) + 1
2144 return len(self.heads()) + 1
2146
2145
2147 def clone(self, remote, heads=[], stream=False):
2146 def clone(self, remote, heads=[], stream=False):
2148 '''clone remote repository.
2147 '''clone remote repository.
2149
2148
2150 keyword arguments:
2149 keyword arguments:
2151 heads: list of revs to clone (forces use of pull)
2150 heads: list of revs to clone (forces use of pull)
2152 stream: use streaming clone if possible'''
2151 stream: use streaming clone if possible'''
2153
2152
2154 # now, all clients that can request uncompressed clones can
2153 # now, all clients that can request uncompressed clones can
2155 # read repo formats supported by all servers that can serve
2154 # read repo formats supported by all servers that can serve
2156 # them.
2155 # them.
2157
2156
2158 # if revlog format changes, client will have to check version
2157 # if revlog format changes, client will have to check version
2159 # and format flags on "stream" capability, and use
2158 # and format flags on "stream" capability, and use
2160 # uncompressed only if compatible.
2159 # uncompressed only if compatible.
2161
2160
2162 if stream and not heads and remote.capable('stream'):
2161 if stream and not heads and remote.capable('stream'):
2163 return self.stream_in(remote)
2162 return self.stream_in(remote)
2164 return self.pull(remote, heads)
2163 return self.pull(remote, heads)
2165
2164
2166 # used to avoid circular references so destructors work
2165 # used to avoid circular references so destructors work
2167 def aftertrans(files):
2166 def aftertrans(files):
2168 renamefiles = [tuple(t) for t in files]
2167 renamefiles = [tuple(t) for t in files]
2169 def a():
2168 def a():
2170 for src, dest in renamefiles:
2169 for src, dest in renamefiles:
2171 util.rename(src, dest)
2170 util.rename(src, dest)
2172 return a
2171 return a
2173
2172
2174 def instance(ui, path, create):
2173 def instance(ui, path, create):
2175 return localrepository(ui, util.drop_scheme('file', path), create)
2174 return localrepository(ui, util.drop_scheme('file', path), create)
2176
2175
2177 def islocal(path):
2176 def islocal(path):
2178 return True
2177 return True
General Comments 0
You need to be logged in to leave comments. Login now