##// END OF EJS Templates
findincoming: build the correct list from the start
Benoit Boissinot -
r7237:b9bf3690 default
parent child Browse files
Show More
@@ -1,2127 +1,2126 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise repo.RepoError(_("repository %s not found") % path)
50 raise repo.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise repo.RepoError(_("repository %s already exists") % path)
52 raise repo.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError(name)
101 raise AttributeError(name)
102
102
103 def __getitem__(self, changeid):
103 def __getitem__(self, changeid):
104 if changeid == None:
104 if changeid == None:
105 return context.workingctx(self)
105 return context.workingctx(self)
106 return context.changectx(self, changeid)
106 return context.changectx(self, changeid)
107
107
108 def __nonzero__(self):
108 def __nonzero__(self):
109 return True
109 return True
110
110
111 def __len__(self):
111 def __len__(self):
112 return len(self.changelog)
112 return len(self.changelog)
113
113
114 def __iter__(self):
114 def __iter__(self):
115 for i in xrange(len(self)):
115 for i in xrange(len(self)):
116 yield i
116 yield i
117
117
118 def url(self):
118 def url(self):
119 return 'file:' + self.root
119 return 'file:' + self.root
120
120
121 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
123
123
124 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
125
125
126 def _tag(self, names, node, message, local, user, date, parent=None,
126 def _tag(self, names, node, message, local, user, date, parent=None,
127 extra={}):
127 extra={}):
128 use_dirstate = parent is None
128 use_dirstate = parent is None
129
129
130 if isinstance(names, str):
130 if isinstance(names, str):
131 allchars = names
131 allchars = names
132 names = (names,)
132 names = (names,)
133 else:
133 else:
134 allchars = ''.join(names)
134 allchars = ''.join(names)
135 for c in self.tag_disallowed:
135 for c in self.tag_disallowed:
136 if c in allchars:
136 if c in allchars:
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138
138
139 for name in names:
139 for name in names:
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 local=local)
141 local=local)
142
142
143 def writetags(fp, names, munge, prevtags):
143 def writetags(fp, names, munge, prevtags):
144 fp.seek(0, 2)
144 fp.seek(0, 2)
145 if prevtags and prevtags[-1] != '\n':
145 if prevtags and prevtags[-1] != '\n':
146 fp.write('\n')
146 fp.write('\n')
147 for name in names:
147 for name in names:
148 m = munge and munge(name) or name
148 m = munge and munge(name) or name
149 if self._tagstypecache and name in self._tagstypecache:
149 if self._tagstypecache and name in self._tagstypecache:
150 old = self.tagscache.get(name, nullid)
150 old = self.tagscache.get(name, nullid)
151 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(old), m))
152 fp.write('%s %s\n' % (hex(node), m))
152 fp.write('%s %s\n' % (hex(node), m))
153 fp.close()
153 fp.close()
154
154
155 prevtags = ''
155 prevtags = ''
156 if local:
156 if local:
157 try:
157 try:
158 fp = self.opener('localtags', 'r+')
158 fp = self.opener('localtags', 'r+')
159 except IOError, err:
159 except IOError, err:
160 fp = self.opener('localtags', 'a')
160 fp = self.opener('localtags', 'a')
161 else:
161 else:
162 prevtags = fp.read()
162 prevtags = fp.read()
163
163
164 # local tags are stored in the current charset
164 # local tags are stored in the current charset
165 writetags(fp, names, None, prevtags)
165 writetags(fp, names, None, prevtags)
166 for name in names:
166 for name in names:
167 self.hook('tag', node=hex(node), tag=name, local=local)
167 self.hook('tag', node=hex(node), tag=name, local=local)
168 return
168 return
169
169
170 if use_dirstate:
170 if use_dirstate:
171 try:
171 try:
172 fp = self.wfile('.hgtags', 'rb+')
172 fp = self.wfile('.hgtags', 'rb+')
173 except IOError, err:
173 except IOError, err:
174 fp = self.wfile('.hgtags', 'ab')
174 fp = self.wfile('.hgtags', 'ab')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177 else:
177 else:
178 try:
178 try:
179 prevtags = self.filectx('.hgtags', parent).data()
179 prevtags = self.filectx('.hgtags', parent).data()
180 except revlog.LookupError:
180 except revlog.LookupError:
181 pass
181 pass
182 fp = self.wfile('.hgtags', 'wb')
182 fp = self.wfile('.hgtags', 'wb')
183 if prevtags:
183 if prevtags:
184 fp.write(prevtags)
184 fp.write(prevtags)
185
185
186 # committed tags are stored in UTF-8
186 # committed tags are stored in UTF-8
187 writetags(fp, names, util.fromlocal, prevtags)
187 writetags(fp, names, util.fromlocal, prevtags)
188
188
189 if use_dirstate and '.hgtags' not in self.dirstate:
189 if use_dirstate and '.hgtags' not in self.dirstate:
190 self.add(['.hgtags'])
190 self.add(['.hgtags'])
191
191
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 extra=extra)
193 extra=extra)
194
194
195 for name in names:
195 for name in names:
196 self.hook('tag', node=hex(node), tag=name, local=local)
196 self.hook('tag', node=hex(node), tag=name, local=local)
197
197
198 return tagnode
198 return tagnode
199
199
200 def tag(self, names, node, message, local, user, date):
200 def tag(self, names, node, message, local, user, date):
201 '''tag a revision with one or more symbolic names.
201 '''tag a revision with one or more symbolic names.
202
202
203 names is a list of strings or, when adding a single tag, names may be a
203 names is a list of strings or, when adding a single tag, names may be a
204 string.
204 string.
205
205
206 if local is True, the tags are stored in a per-repository file.
206 if local is True, the tags are stored in a per-repository file.
207 otherwise, they are stored in the .hgtags file, and a new
207 otherwise, they are stored in the .hgtags file, and a new
208 changeset is committed with the change.
208 changeset is committed with the change.
209
209
210 keyword arguments:
210 keyword arguments:
211
211
212 local: whether to store tags in non-version-controlled file
212 local: whether to store tags in non-version-controlled file
213 (default False)
213 (default False)
214
214
215 message: commit message to use if committing
215 message: commit message to use if committing
216
216
217 user: name of user to use if committing
217 user: name of user to use if committing
218
218
219 date: date tuple to use if committing'''
219 date: date tuple to use if committing'''
220
220
221 for x in self.status()[:5]:
221 for x in self.status()[:5]:
222 if '.hgtags' in x:
222 if '.hgtags' in x:
223 raise util.Abort(_('working copy of .hgtags is changed '
223 raise util.Abort(_('working copy of .hgtags is changed '
224 '(please commit .hgtags manually)'))
224 '(please commit .hgtags manually)'))
225
225
226 self._tag(names, node, message, local, user, date)
226 self._tag(names, node, message, local, user, date)
227
227
228 def tags(self):
228 def tags(self):
229 '''return a mapping of tag to node'''
229 '''return a mapping of tag to node'''
230 if self.tagscache:
230 if self.tagscache:
231 return self.tagscache
231 return self.tagscache
232
232
233 globaltags = {}
233 globaltags = {}
234 tagtypes = {}
234 tagtypes = {}
235
235
236 def readtags(lines, fn, tagtype):
236 def readtags(lines, fn, tagtype):
237 filetags = {}
237 filetags = {}
238 count = 0
238 count = 0
239
239
240 def warn(msg):
240 def warn(msg):
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242
242
243 for l in lines:
243 for l in lines:
244 count += 1
244 count += 1
245 if not l:
245 if not l:
246 continue
246 continue
247 s = l.split(" ", 1)
247 s = l.split(" ", 1)
248 if len(s) != 2:
248 if len(s) != 2:
249 warn(_("cannot parse entry"))
249 warn(_("cannot parse entry"))
250 continue
250 continue
251 node, key = s
251 node, key = s
252 key = util.tolocal(key.strip()) # stored in UTF-8
252 key = util.tolocal(key.strip()) # stored in UTF-8
253 try:
253 try:
254 bin_n = bin(node)
254 bin_n = bin(node)
255 except TypeError:
255 except TypeError:
256 warn(_("node '%s' is not well formed") % node)
256 warn(_("node '%s' is not well formed") % node)
257 continue
257 continue
258 if bin_n not in self.changelog.nodemap:
258 if bin_n not in self.changelog.nodemap:
259 warn(_("tag '%s' refers to unknown node") % key)
259 warn(_("tag '%s' refers to unknown node") % key)
260 continue
260 continue
261
261
262 h = []
262 h = []
263 if key in filetags:
263 if key in filetags:
264 n, h = filetags[key]
264 n, h = filetags[key]
265 h.append(n)
265 h.append(n)
266 filetags[key] = (bin_n, h)
266 filetags[key] = (bin_n, h)
267
267
268 for k, nh in filetags.items():
268 for k, nh in filetags.items():
269 if k not in globaltags:
269 if k not in globaltags:
270 globaltags[k] = nh
270 globaltags[k] = nh
271 tagtypes[k] = tagtype
271 tagtypes[k] = tagtype
272 continue
272 continue
273
273
274 # we prefer the global tag if:
274 # we prefer the global tag if:
275 # it supercedes us OR
275 # it supercedes us OR
276 # mutual supercedes and it has a higher rank
276 # mutual supercedes and it has a higher rank
277 # otherwise we win because we're tip-most
277 # otherwise we win because we're tip-most
278 an, ah = nh
278 an, ah = nh
279 bn, bh = globaltags[k]
279 bn, bh = globaltags[k]
280 if (bn != an and an in bh and
280 if (bn != an and an in bh and
281 (bn not in ah or len(bh) > len(ah))):
281 (bn not in ah or len(bh) > len(ah))):
282 an = bn
282 an = bn
283 ah.extend([n for n in bh if n not in ah])
283 ah.extend([n for n in bh if n not in ah])
284 globaltags[k] = an, ah
284 globaltags[k] = an, ah
285 tagtypes[k] = tagtype
285 tagtypes[k] = tagtype
286
286
287 # read the tags file from each head, ending with the tip
287 # read the tags file from each head, ending with the tip
288 f = None
288 f = None
289 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
292 readtags(f.data().splitlines(), f, "global")
292 readtags(f.data().splitlines(), f, "global")
293
293
294 try:
294 try:
295 data = util.fromlocal(self.opener("localtags").read())
295 data = util.fromlocal(self.opener("localtags").read())
296 # localtags are stored in the local character set
296 # localtags are stored in the local character set
297 # while the internal tag table is stored in UTF-8
297 # while the internal tag table is stored in UTF-8
298 readtags(data.splitlines(), "localtags", "local")
298 readtags(data.splitlines(), "localtags", "local")
299 except IOError:
299 except IOError:
300 pass
300 pass
301
301
302 self.tagscache = {}
302 self.tagscache = {}
303 self._tagstypecache = {}
303 self._tagstypecache = {}
304 for k,nh in globaltags.items():
304 for k,nh in globaltags.items():
305 n = nh[0]
305 n = nh[0]
306 if n != nullid:
306 if n != nullid:
307 self.tagscache[k] = n
307 self.tagscache[k] = n
308 self._tagstypecache[k] = tagtypes[k]
308 self._tagstypecache[k] = tagtypes[k]
309 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
310 return self.tagscache
310 return self.tagscache
311
311
312 def tagtype(self, tagname):
312 def tagtype(self, tagname):
313 '''
313 '''
314 return the type of the given tag. result can be:
314 return the type of the given tag. result can be:
315
315
316 'local' : a local tag
316 'local' : a local tag
317 'global' : a global tag
317 'global' : a global tag
318 None : tag does not exist
318 None : tag does not exist
319 '''
319 '''
320
320
321 self.tags()
321 self.tags()
322
322
323 return self._tagstypecache.get(tagname)
323 return self._tagstypecache.get(tagname)
324
324
325 def _hgtagsnodes(self):
325 def _hgtagsnodes(self):
326 heads = self.heads()
326 heads = self.heads()
327 heads.reverse()
327 heads.reverse()
328 last = {}
328 last = {}
329 ret = []
329 ret = []
330 for node in heads:
330 for node in heads:
331 c = self[node]
331 c = self[node]
332 rev = c.rev()
332 rev = c.rev()
333 try:
333 try:
334 fnode = c.filenode('.hgtags')
334 fnode = c.filenode('.hgtags')
335 except revlog.LookupError:
335 except revlog.LookupError:
336 continue
336 continue
337 ret.append((rev, node, fnode))
337 ret.append((rev, node, fnode))
338 if fnode in last:
338 if fnode in last:
339 ret[last[fnode]] = None
339 ret[last[fnode]] = None
340 last[fnode] = len(ret) - 1
340 last[fnode] = len(ret) - 1
341 return [item for item in ret if item]
341 return [item for item in ret if item]
342
342
343 def tagslist(self):
343 def tagslist(self):
344 '''return a list of tags ordered by revision'''
344 '''return a list of tags ordered by revision'''
345 l = []
345 l = []
346 for t, n in self.tags().items():
346 for t, n in self.tags().items():
347 try:
347 try:
348 r = self.changelog.rev(n)
348 r = self.changelog.rev(n)
349 except:
349 except:
350 r = -2 # sort to the beginning of the list if unknown
350 r = -2 # sort to the beginning of the list if unknown
351 l.append((r, t, n))
351 l.append((r, t, n))
352 return [(t, n) for r, t, n in util.sort(l)]
352 return [(t, n) for r, t, n in util.sort(l)]
353
353
354 def nodetags(self, node):
354 def nodetags(self, node):
355 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
356 if not self.nodetagscache:
356 if not self.nodetagscache:
357 self.nodetagscache = {}
357 self.nodetagscache = {}
358 for t, n in self.tags().items():
358 for t, n in self.tags().items():
359 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
361
361
362 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
363 tiprev = len(self) - 1
363 tiprev = len(self) - 1
364 if lrev != tiprev:
364 if lrev != tiprev:
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367
367
368 return partial
368 return partial
369
369
370 def branchtags(self):
370 def branchtags(self):
371 tip = self.changelog.tip()
371 tip = self.changelog.tip()
372 if self.branchcache is not None and self._branchcachetip == tip:
372 if self.branchcache is not None and self._branchcachetip == tip:
373 return self.branchcache
373 return self.branchcache
374
374
375 oldtip = self._branchcachetip
375 oldtip = self._branchcachetip
376 self._branchcachetip = tip
376 self._branchcachetip = tip
377 if self.branchcache is None:
377 if self.branchcache is None:
378 self.branchcache = {} # avoid recursion in changectx
378 self.branchcache = {} # avoid recursion in changectx
379 else:
379 else:
380 self.branchcache.clear() # keep using the same dict
380 self.branchcache.clear() # keep using the same dict
381 if oldtip is None or oldtip not in self.changelog.nodemap:
381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 partial, last, lrev = self._readbranchcache()
382 partial, last, lrev = self._readbranchcache()
383 else:
383 else:
384 lrev = self.changelog.rev(oldtip)
384 lrev = self.changelog.rev(oldtip)
385 partial = self._ubranchcache
385 partial = self._ubranchcache
386
386
387 self._branchtags(partial, lrev)
387 self._branchtags(partial, lrev)
388
388
389 # the branch cache is stored on disk as UTF-8, but in the local
389 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
390 # charset internally
391 for k, v in partial.items():
391 for k, v in partial.items():
392 self.branchcache[util.tolocal(k)] = v
392 self.branchcache[util.tolocal(k)] = v
393 self._ubranchcache = partial
393 self._ubranchcache = partial
394 return self.branchcache
394 return self.branchcache
395
395
396 def _readbranchcache(self):
396 def _readbranchcache(self):
397 partial = {}
397 partial = {}
398 try:
398 try:
399 f = self.opener("branch.cache")
399 f = self.opener("branch.cache")
400 lines = f.read().split('\n')
400 lines = f.read().split('\n')
401 f.close()
401 f.close()
402 except (IOError, OSError):
402 except (IOError, OSError):
403 return {}, nullid, nullrev
403 return {}, nullid, nullrev
404
404
405 try:
405 try:
406 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = lines.pop(0).split(" ", 1)
407 last, lrev = bin(last), int(lrev)
407 last, lrev = bin(last), int(lrev)
408 if lrev >= len(self) or self[lrev].node() != last:
408 if lrev >= len(self) or self[lrev].node() != last:
409 # invalidate the cache
409 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
411 for l in lines:
412 if not l: continue
412 if not l: continue
413 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
416 raise
417 except Exception, inst:
417 except Exception, inst:
418 if self.ui.debugflag:
418 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
421 return partial, last, lrev
422
422
423 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
424 try:
424 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
429 f.rename()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 pass
431 pass
432
432
433 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
434 for r in xrange(start, end):
435 c = self[r]
435 c = self[r]
436 b = c.branch()
436 b = c.branch()
437 partial[b] = c.node()
437 partial[b] = c.node()
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if key == '.':
440 if key == '.':
441 return self.dirstate.parents()[0]
441 return self.dirstate.parents()[0]
442 elif key == 'null':
442 elif key == 'null':
443 return nullid
443 return nullid
444 n = self.changelog._match(key)
444 n = self.changelog._match(key)
445 if n:
445 if n:
446 return n
446 return n
447 if key in self.tags():
447 if key in self.tags():
448 return self.tags()[key]
448 return self.tags()[key]
449 if key in self.branchtags():
449 if key in self.branchtags():
450 return self.branchtags()[key]
450 return self.branchtags()[key]
451 n = self.changelog._partialmatch(key)
451 n = self.changelog._partialmatch(key)
452 if n:
452 if n:
453 return n
453 return n
454 try:
454 try:
455 if len(key) == 20:
455 if len(key) == 20:
456 key = hex(key)
456 key = hex(key)
457 except:
457 except:
458 pass
458 pass
459 raise repo.RepoError(_("unknown revision '%s'") % key)
459 raise repo.RepoError(_("unknown revision '%s'") % key)
460
460
461 def local(self):
461 def local(self):
462 return True
462 return True
463
463
464 def join(self, f):
464 def join(self, f):
465 return os.path.join(self.path, f)
465 return os.path.join(self.path, f)
466
466
467 def wjoin(self, f):
467 def wjoin(self, f):
468 return os.path.join(self.root, f)
468 return os.path.join(self.root, f)
469
469
470 def rjoin(self, f):
470 def rjoin(self, f):
471 return os.path.join(self.root, util.pconvert(f))
471 return os.path.join(self.root, util.pconvert(f))
472
472
473 def file(self, f):
473 def file(self, f):
474 if f[0] == '/':
474 if f[0] == '/':
475 f = f[1:]
475 f = f[1:]
476 return filelog.filelog(self.sopener, f)
476 return filelog.filelog(self.sopener, f)
477
477
478 def changectx(self, changeid):
478 def changectx(self, changeid):
479 return self[changeid]
479 return self[changeid]
480
480
481 def parents(self, changeid=None):
481 def parents(self, changeid=None):
482 '''get list of changectxs for parents of changeid'''
482 '''get list of changectxs for parents of changeid'''
483 return self[changeid].parents()
483 return self[changeid].parents()
484
484
485 def filectx(self, path, changeid=None, fileid=None):
485 def filectx(self, path, changeid=None, fileid=None):
486 """changeid can be a changeset revision, node, or tag.
486 """changeid can be a changeset revision, node, or tag.
487 fileid can be a file revision or node."""
487 fileid can be a file revision or node."""
488 return context.filectx(self, path, changeid, fileid)
488 return context.filectx(self, path, changeid, fileid)
489
489
490 def getcwd(self):
490 def getcwd(self):
491 return self.dirstate.getcwd()
491 return self.dirstate.getcwd()
492
492
493 def pathto(self, f, cwd=None):
493 def pathto(self, f, cwd=None):
494 return self.dirstate.pathto(f, cwd)
494 return self.dirstate.pathto(f, cwd)
495
495
496 def wfile(self, f, mode='r'):
496 def wfile(self, f, mode='r'):
497 return self.wopener(f, mode)
497 return self.wopener(f, mode)
498
498
499 def _link(self, f):
499 def _link(self, f):
500 return os.path.islink(self.wjoin(f))
500 return os.path.islink(self.wjoin(f))
501
501
502 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
503 if filter not in self.filterpats:
504 l = []
504 l = []
505 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
506 if cmd == '!':
506 if cmd == '!':
507 continue
507 continue
508 mf = util.matcher(self.root, "", [pat], [], [])[1]
508 mf = util.matcher(self.root, "", [pat], [], [])[1]
509 fn = None
509 fn = None
510 params = cmd
510 params = cmd
511 for name, filterfn in self._datafilters.iteritems():
511 for name, filterfn in self._datafilters.iteritems():
512 if cmd.startswith(name):
512 if cmd.startswith(name):
513 fn = filterfn
513 fn = filterfn
514 params = cmd[len(name):].lstrip()
514 params = cmd[len(name):].lstrip()
515 break
515 break
516 if not fn:
516 if not fn:
517 fn = lambda s, c, **kwargs: util.filter(s, c)
517 fn = lambda s, c, **kwargs: util.filter(s, c)
518 # Wrap old filters not supporting keyword arguments
518 # Wrap old filters not supporting keyword arguments
519 if not inspect.getargspec(fn)[2]:
519 if not inspect.getargspec(fn)[2]:
520 oldfn = fn
520 oldfn = fn
521 fn = lambda s, c, **kwargs: oldfn(s, c)
521 fn = lambda s, c, **kwargs: oldfn(s, c)
522 l.append((mf, fn, params))
522 l.append((mf, fn, params))
523 self.filterpats[filter] = l
523 self.filterpats[filter] = l
524
524
525 for mf, fn, cmd in self.filterpats[filter]:
525 for mf, fn, cmd in self.filterpats[filter]:
526 if mf(filename):
526 if mf(filename):
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
529 break
529 break
530
530
531 return data
531 return data
532
532
533 def adddatafilter(self, name, filter):
533 def adddatafilter(self, name, filter):
534 self._datafilters[name] = filter
534 self._datafilters[name] = filter
535
535
536 def wread(self, filename):
536 def wread(self, filename):
537 if self._link(filename):
537 if self._link(filename):
538 data = os.readlink(self.wjoin(filename))
538 data = os.readlink(self.wjoin(filename))
539 else:
539 else:
540 data = self.wopener(filename, 'r').read()
540 data = self.wopener(filename, 'r').read()
541 return self._filter("encode", filename, data)
541 return self._filter("encode", filename, data)
542
542
543 def wwrite(self, filename, data, flags):
543 def wwrite(self, filename, data, flags):
544 data = self._filter("decode", filename, data)
544 data = self._filter("decode", filename, data)
545 try:
545 try:
546 os.unlink(self.wjoin(filename))
546 os.unlink(self.wjoin(filename))
547 except OSError:
547 except OSError:
548 pass
548 pass
549 if 'l' in flags:
549 if 'l' in flags:
550 self.wopener.symlink(data, filename)
550 self.wopener.symlink(data, filename)
551 else:
551 else:
552 self.wopener(filename, 'w').write(data)
552 self.wopener(filename, 'w').write(data)
553 if 'x' in flags:
553 if 'x' in flags:
554 util.set_flags(self.wjoin(filename), False, True)
554 util.set_flags(self.wjoin(filename), False, True)
555
555
556 def wwritedata(self, filename, data):
556 def wwritedata(self, filename, data):
557 return self._filter("decode", filename, data)
557 return self._filter("decode", filename, data)
558
558
559 def transaction(self):
559 def transaction(self):
560 if self._transref and self._transref():
560 if self._transref and self._transref():
561 return self._transref().nest()
561 return self._transref().nest()
562
562
563 # abort here if the journal already exists
563 # abort here if the journal already exists
564 if os.path.exists(self.sjoin("journal")):
564 if os.path.exists(self.sjoin("journal")):
565 raise repo.RepoError(_("journal already exists - run hg recover"))
565 raise repo.RepoError(_("journal already exists - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 l = self.lock()
586 l = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"))
590 transaction.rollback(self.sopener, self.sjoin("journal"))
591 self.invalidate()
591 self.invalidate()
592 return True
592 return True
593 else:
593 else:
594 self.ui.warn(_("no interrupted transaction available\n"))
594 self.ui.warn(_("no interrupted transaction available\n"))
595 return False
595 return False
596 finally:
596 finally:
597 del l
597 del l
598
598
599 def rollback(self):
599 def rollback(self):
600 wlock = lock = None
600 wlock = lock = None
601 try:
601 try:
602 wlock = self.wlock()
602 wlock = self.wlock()
603 lock = self.lock()
603 lock = self.lock()
604 if os.path.exists(self.sjoin("undo")):
604 if os.path.exists(self.sjoin("undo")):
605 self.ui.status(_("rolling back last transaction\n"))
605 self.ui.status(_("rolling back last transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("undo"))
606 transaction.rollback(self.sopener, self.sjoin("undo"))
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
608 try:
608 try:
609 branch = self.opener("undo.branch").read()
609 branch = self.opener("undo.branch").read()
610 self.dirstate.setbranch(branch)
610 self.dirstate.setbranch(branch)
611 except IOError:
611 except IOError:
612 self.ui.warn(_("Named branch could not be reset, "
612 self.ui.warn(_("Named branch could not be reset, "
613 "current branch still is: %s\n")
613 "current branch still is: %s\n")
614 % util.tolocal(self.dirstate.branch()))
614 % util.tolocal(self.dirstate.branch()))
615 self.invalidate()
615 self.invalidate()
616 self.dirstate.invalidate()
616 self.dirstate.invalidate()
617 else:
617 else:
618 self.ui.warn(_("no rollback information available\n"))
618 self.ui.warn(_("no rollback information available\n"))
619 finally:
619 finally:
620 del lock, wlock
620 del lock, wlock
621
621
622 def invalidate(self):
622 def invalidate(self):
623 for a in "changelog manifest".split():
623 for a in "changelog manifest".split():
624 if a in self.__dict__:
624 if a in self.__dict__:
625 delattr(self, a)
625 delattr(self, a)
626 self.tagscache = None
626 self.tagscache = None
627 self._tagstypecache = None
627 self._tagstypecache = None
628 self.nodetagscache = None
628 self.nodetagscache = None
629 self.branchcache = None
629 self.branchcache = None
630 self._ubranchcache = None
630 self._ubranchcache = None
631 self._branchcachetip = None
631 self._branchcachetip = None
632
632
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
634 try:
634 try:
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
636 except lock.LockHeld, inst:
636 except lock.LockHeld, inst:
637 if not wait:
637 if not wait:
638 raise
638 raise
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
640 (desc, inst.locker))
640 (desc, inst.locker))
641 # default to 600 seconds timeout
641 # default to 600 seconds timeout
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
643 releasefn, desc=desc)
643 releasefn, desc=desc)
644 if acquirefn:
644 if acquirefn:
645 acquirefn()
645 acquirefn()
646 return l
646 return l
647
647
648 def lock(self, wait=True):
648 def lock(self, wait=True):
649 if self._lockref and self._lockref():
649 if self._lockref and self._lockref():
650 return self._lockref()
650 return self._lockref()
651
651
652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
653 _('repository %s') % self.origroot)
653 _('repository %s') % self.origroot)
654 self._lockref = weakref.ref(l)
654 self._lockref = weakref.ref(l)
655 return l
655 return l
656
656
657 def wlock(self, wait=True):
657 def wlock(self, wait=True):
658 if self._wlockref and self._wlockref():
658 if self._wlockref and self._wlockref():
659 return self._wlockref()
659 return self._wlockref()
660
660
661 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
661 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
662 self.dirstate.invalidate, _('working directory of %s') %
662 self.dirstate.invalidate, _('working directory of %s') %
663 self.origroot)
663 self.origroot)
664 self._wlockref = weakref.ref(l)
664 self._wlockref = weakref.ref(l)
665 return l
665 return l
666
666
667 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
667 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
668 """
668 """
669 commit an individual file as part of a larger transaction
669 commit an individual file as part of a larger transaction
670 """
670 """
671
671
672 fn = fctx.path()
672 fn = fctx.path()
673 t = fctx.data()
673 t = fctx.data()
674 fl = self.file(fn)
674 fl = self.file(fn)
675 fp1 = manifest1.get(fn, nullid)
675 fp1 = manifest1.get(fn, nullid)
676 fp2 = manifest2.get(fn, nullid)
676 fp2 = manifest2.get(fn, nullid)
677
677
678 meta = {}
678 meta = {}
679 cp = fctx.renamed()
679 cp = fctx.renamed()
680 if cp and cp[0] != fn:
680 if cp and cp[0] != fn:
681 # Mark the new revision of this file as a copy of another
681 # Mark the new revision of this file as a copy of another
682 # file. This copy data will effectively act as a parent
682 # file. This copy data will effectively act as a parent
683 # of this new revision. If this is a merge, the first
683 # of this new revision. If this is a merge, the first
684 # parent will be the nullid (meaning "look up the copy data")
684 # parent will be the nullid (meaning "look up the copy data")
685 # and the second one will be the other parent. For example:
685 # and the second one will be the other parent. For example:
686 #
686 #
687 # 0 --- 1 --- 3 rev1 changes file foo
687 # 0 --- 1 --- 3 rev1 changes file foo
688 # \ / rev2 renames foo to bar and changes it
688 # \ / rev2 renames foo to bar and changes it
689 # \- 2 -/ rev3 should have bar with all changes and
689 # \- 2 -/ rev3 should have bar with all changes and
690 # should record that bar descends from
690 # should record that bar descends from
691 # bar in rev2 and foo in rev1
691 # bar in rev2 and foo in rev1
692 #
692 #
693 # this allows this merge to succeed:
693 # this allows this merge to succeed:
694 #
694 #
695 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
695 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
696 # \ / merging rev3 and rev4 should use bar@rev2
696 # \ / merging rev3 and rev4 should use bar@rev2
697 # \- 2 --- 4 as the merge base
697 # \- 2 --- 4 as the merge base
698 #
698 #
699
699
700 cf = cp[0]
700 cf = cp[0]
701 cr = manifest1.get(cf)
701 cr = manifest1.get(cf)
702 nfp = fp2
702 nfp = fp2
703
703
704 if manifest2: # branch merge
704 if manifest2: # branch merge
705 if fp2 == nullid: # copied on remote side
705 if fp2 == nullid: # copied on remote side
706 if fp1 != nullid or cf in manifest2:
706 if fp1 != nullid or cf in manifest2:
707 cr = manifest2[cf]
707 cr = manifest2[cf]
708 nfp = fp1
708 nfp = fp1
709
709
710 # find source in nearest ancestor if we've lost track
710 # find source in nearest ancestor if we've lost track
711 if not cr:
711 if not cr:
712 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
712 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
713 (fn, cf))
713 (fn, cf))
714 for a in self['.'].ancestors():
714 for a in self['.'].ancestors():
715 if cf in a:
715 if cf in a:
716 cr = a[cf].filenode()
716 cr = a[cf].filenode()
717 break
717 break
718
718
719 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
719 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
720 meta["copy"] = cf
720 meta["copy"] = cf
721 meta["copyrev"] = hex(cr)
721 meta["copyrev"] = hex(cr)
722 fp1, fp2 = nullid, nfp
722 fp1, fp2 = nullid, nfp
723 elif fp2 != nullid:
723 elif fp2 != nullid:
724 # is one parent an ancestor of the other?
724 # is one parent an ancestor of the other?
725 fpa = fl.ancestor(fp1, fp2)
725 fpa = fl.ancestor(fp1, fp2)
726 if fpa == fp1:
726 if fpa == fp1:
727 fp1, fp2 = fp2, nullid
727 fp1, fp2 = fp2, nullid
728 elif fpa == fp2:
728 elif fpa == fp2:
729 fp2 = nullid
729 fp2 = nullid
730
730
731 # is the file unmodified from the parent? report existing entry
731 # is the file unmodified from the parent? report existing entry
732 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
732 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
733 return fp1
733 return fp1
734
734
735 changelist.append(fn)
735 changelist.append(fn)
736 return fl.add(t, meta, tr, linkrev, fp1, fp2)
736 return fl.add(t, meta, tr, linkrev, fp1, fp2)
737
737
738 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
738 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
739 if p1 is None:
739 if p1 is None:
740 p1, p2 = self.dirstate.parents()
740 p1, p2 = self.dirstate.parents()
741 return self.commit(files=files, text=text, user=user, date=date,
741 return self.commit(files=files, text=text, user=user, date=date,
742 p1=p1, p2=p2, extra=extra, empty_ok=True)
742 p1=p1, p2=p2, extra=extra, empty_ok=True)
743
743
744 def commit(self, files=None, text="", user=None, date=None,
744 def commit(self, files=None, text="", user=None, date=None,
745 match=None, force=False, force_editor=False,
745 match=None, force=False, force_editor=False,
746 p1=None, p2=None, extra={}, empty_ok=False):
746 p1=None, p2=None, extra={}, empty_ok=False):
747 wlock = lock = None
747 wlock = lock = None
748 if files:
748 if files:
749 files = util.unique(files)
749 files = util.unique(files)
750 try:
750 try:
751 wlock = self.wlock()
751 wlock = self.wlock()
752 lock = self.lock()
752 lock = self.lock()
753 use_dirstate = (p1 is None) # not rawcommit
753 use_dirstate = (p1 is None) # not rawcommit
754
754
755 if use_dirstate:
755 if use_dirstate:
756 p1, p2 = self.dirstate.parents()
756 p1, p2 = self.dirstate.parents()
757 update_dirstate = True
757 update_dirstate = True
758
758
759 if (not force and p2 != nullid and
759 if (not force and p2 != nullid and
760 (match and (match.files() or match.anypats()))):
760 (match and (match.files() or match.anypats()))):
761 raise util.Abort(_('cannot partially commit a merge '
761 raise util.Abort(_('cannot partially commit a merge '
762 '(do not specify files or patterns)'))
762 '(do not specify files or patterns)'))
763
763
764 if files:
764 if files:
765 modified, removed = [], []
765 modified, removed = [], []
766 for f in files:
766 for f in files:
767 s = self.dirstate[f]
767 s = self.dirstate[f]
768 if s in 'nma':
768 if s in 'nma':
769 modified.append(f)
769 modified.append(f)
770 elif s == 'r':
770 elif s == 'r':
771 removed.append(f)
771 removed.append(f)
772 else:
772 else:
773 self.ui.warn(_("%s not tracked!\n") % f)
773 self.ui.warn(_("%s not tracked!\n") % f)
774 changes = [modified, [], removed, [], []]
774 changes = [modified, [], removed, [], []]
775 else:
775 else:
776 changes = self.status(match=match)
776 changes = self.status(match=match)
777 else:
777 else:
778 p1, p2 = p1, p2 or nullid
778 p1, p2 = p1, p2 or nullid
779 update_dirstate = (self.dirstate.parents()[0] == p1)
779 update_dirstate = (self.dirstate.parents()[0] == p1)
780 changes = [files, [], [], [], []]
780 changes = [files, [], [], [], []]
781
781
782 ms = merge_.mergestate(self)
782 ms = merge_.mergestate(self)
783 for f in changes[0]:
783 for f in changes[0]:
784 if f in ms and ms[f] == 'u':
784 if f in ms and ms[f] == 'u':
785 raise util.Abort(_("unresolved merge conflicts "
785 raise util.Abort(_("unresolved merge conflicts "
786 "(see hg resolve)"))
786 "(see hg resolve)"))
787 wctx = context.workingctx(self, (p1, p2), text, user, date,
787 wctx = context.workingctx(self, (p1, p2), text, user, date,
788 extra, changes)
788 extra, changes)
789 return self._commitctx(wctx, force, force_editor, empty_ok,
789 return self._commitctx(wctx, force, force_editor, empty_ok,
790 use_dirstate, update_dirstate)
790 use_dirstate, update_dirstate)
791 finally:
791 finally:
792 del lock, wlock
792 del lock, wlock
793
793
794 def commitctx(self, ctx):
794 def commitctx(self, ctx):
795 """Add a new revision to current repository.
795 """Add a new revision to current repository.
796
796
797 Revision information is passed in the context.memctx argument.
797 Revision information is passed in the context.memctx argument.
798 commitctx() does not touch the working directory.
798 commitctx() does not touch the working directory.
799 """
799 """
800 wlock = lock = None
800 wlock = lock = None
801 try:
801 try:
802 wlock = self.wlock()
802 wlock = self.wlock()
803 lock = self.lock()
803 lock = self.lock()
804 return self._commitctx(ctx, force=True, force_editor=False,
804 return self._commitctx(ctx, force=True, force_editor=False,
805 empty_ok=True, use_dirstate=False,
805 empty_ok=True, use_dirstate=False,
806 update_dirstate=False)
806 update_dirstate=False)
807 finally:
807 finally:
808 del lock, wlock
808 del lock, wlock
809
809
810 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
810 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
811 use_dirstate=True, update_dirstate=True):
811 use_dirstate=True, update_dirstate=True):
812 tr = None
812 tr = None
813 valid = 0 # don't save the dirstate if this isn't set
813 valid = 0 # don't save the dirstate if this isn't set
814 try:
814 try:
815 commit = util.sort(wctx.modified() + wctx.added())
815 commit = util.sort(wctx.modified() + wctx.added())
816 remove = wctx.removed()
816 remove = wctx.removed()
817 extra = wctx.extra().copy()
817 extra = wctx.extra().copy()
818 branchname = extra['branch']
818 branchname = extra['branch']
819 user = wctx.user()
819 user = wctx.user()
820 text = wctx.description()
820 text = wctx.description()
821
821
822 p1, p2 = [p.node() for p in wctx.parents()]
822 p1, p2 = [p.node() for p in wctx.parents()]
823 c1 = self.changelog.read(p1)
823 c1 = self.changelog.read(p1)
824 c2 = self.changelog.read(p2)
824 c2 = self.changelog.read(p2)
825 m1 = self.manifest.read(c1[0]).copy()
825 m1 = self.manifest.read(c1[0]).copy()
826 m2 = self.manifest.read(c2[0])
826 m2 = self.manifest.read(c2[0])
827
827
828 if use_dirstate:
828 if use_dirstate:
829 oldname = c1[5].get("branch") # stored in UTF-8
829 oldname = c1[5].get("branch") # stored in UTF-8
830 if (not commit and not remove and not force and p2 == nullid
830 if (not commit and not remove and not force and p2 == nullid
831 and branchname == oldname):
831 and branchname == oldname):
832 self.ui.status(_("nothing changed\n"))
832 self.ui.status(_("nothing changed\n"))
833 return None
833 return None
834
834
835 xp1 = hex(p1)
835 xp1 = hex(p1)
836 if p2 == nullid: xp2 = ''
836 if p2 == nullid: xp2 = ''
837 else: xp2 = hex(p2)
837 else: xp2 = hex(p2)
838
838
839 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
840
840
841 tr = self.transaction()
841 tr = self.transaction()
842 trp = weakref.proxy(tr)
842 trp = weakref.proxy(tr)
843
843
844 # check in files
844 # check in files
845 new = {}
845 new = {}
846 changed = []
846 changed = []
847 linkrev = len(self)
847 linkrev = len(self)
848 for f in commit:
848 for f in commit:
849 self.ui.note(f + "\n")
849 self.ui.note(f + "\n")
850 try:
850 try:
851 fctx = wctx.filectx(f)
851 fctx = wctx.filectx(f)
852 newflags = fctx.flags()
852 newflags = fctx.flags()
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
854 if ((not changed or changed[-1] != f) and
854 if ((not changed or changed[-1] != f) and
855 m2.get(f) != new[f]):
855 m2.get(f) != new[f]):
856 # mention the file in the changelog if some
856 # mention the file in the changelog if some
857 # flag changed, even if there was no content
857 # flag changed, even if there was no content
858 # change.
858 # change.
859 if m1.flags(f) != newflags:
859 if m1.flags(f) != newflags:
860 changed.append(f)
860 changed.append(f)
861 m1.set(f, newflags)
861 m1.set(f, newflags)
862 if use_dirstate:
862 if use_dirstate:
863 self.dirstate.normal(f)
863 self.dirstate.normal(f)
864
864
865 except (OSError, IOError):
865 except (OSError, IOError):
866 if use_dirstate:
866 if use_dirstate:
867 self.ui.warn(_("trouble committing %s!\n") % f)
867 self.ui.warn(_("trouble committing %s!\n") % f)
868 raise
868 raise
869 else:
869 else:
870 remove.append(f)
870 remove.append(f)
871
871
872 updated, added = [], []
872 updated, added = [], []
873 for f in util.sort(changed):
873 for f in util.sort(changed):
874 if f in m1 or f in m2:
874 if f in m1 or f in m2:
875 updated.append(f)
875 updated.append(f)
876 else:
876 else:
877 added.append(f)
877 added.append(f)
878
878
879 # update manifest
879 # update manifest
880 m1.update(new)
880 m1.update(new)
881 removed = []
881 removed = []
882
882
883 for f in util.sort(remove):
883 for f in util.sort(remove):
884 if f in m1:
884 if f in m1:
885 del m1[f]
885 del m1[f]
886 removed.append(f)
886 removed.append(f)
887 elif f in m2:
887 elif f in m2:
888 removed.append(f)
888 removed.append(f)
889 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
889 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
890 (new, removed))
890 (new, removed))
891
891
892 # add changeset
892 # add changeset
893 if (not empty_ok and not text) or force_editor:
893 if (not empty_ok and not text) or force_editor:
894 edittext = []
894 edittext = []
895 if text:
895 if text:
896 edittext.append(text)
896 edittext.append(text)
897 edittext.append("")
897 edittext.append("")
898 edittext.append("") # Empty line between message and comments.
898 edittext.append("") # Empty line between message and comments.
899 edittext.append(_("HG: Enter commit message."
899 edittext.append(_("HG: Enter commit message."
900 " Lines beginning with 'HG:' are removed."))
900 " Lines beginning with 'HG:' are removed."))
901 edittext.append("HG: --")
901 edittext.append("HG: --")
902 edittext.append("HG: user: %s" % user)
902 edittext.append("HG: user: %s" % user)
903 if p2 != nullid:
903 if p2 != nullid:
904 edittext.append("HG: branch merge")
904 edittext.append("HG: branch merge")
905 if branchname:
905 if branchname:
906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
907 edittext.extend(["HG: added %s" % f for f in added])
907 edittext.extend(["HG: added %s" % f for f in added])
908 edittext.extend(["HG: changed %s" % f for f in updated])
908 edittext.extend(["HG: changed %s" % f for f in updated])
909 edittext.extend(["HG: removed %s" % f for f in removed])
909 edittext.extend(["HG: removed %s" % f for f in removed])
910 if not added and not updated and not removed:
910 if not added and not updated and not removed:
911 edittext.append("HG: no files changed")
911 edittext.append("HG: no files changed")
912 edittext.append("")
912 edittext.append("")
913 # run editor in the repository root
913 # run editor in the repository root
914 olddir = os.getcwd()
914 olddir = os.getcwd()
915 os.chdir(self.root)
915 os.chdir(self.root)
916 text = self.ui.edit("\n".join(edittext), user)
916 text = self.ui.edit("\n".join(edittext), user)
917 os.chdir(olddir)
917 os.chdir(olddir)
918
918
919 lines = [line.rstrip() for line in text.rstrip().splitlines()]
919 lines = [line.rstrip() for line in text.rstrip().splitlines()]
920 while lines and not lines[0]:
920 while lines and not lines[0]:
921 del lines[0]
921 del lines[0]
922 if not lines and use_dirstate:
922 if not lines and use_dirstate:
923 raise util.Abort(_("empty commit message"))
923 raise util.Abort(_("empty commit message"))
924 text = '\n'.join(lines)
924 text = '\n'.join(lines)
925
925
926 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
926 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
927 user, wctx.date(), extra)
927 user, wctx.date(), extra)
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
929 parent2=xp2)
929 parent2=xp2)
930 tr.close()
930 tr.close()
931
931
932 if self.branchcache:
932 if self.branchcache:
933 self.branchtags()
933 self.branchtags()
934
934
935 if use_dirstate or update_dirstate:
935 if use_dirstate or update_dirstate:
936 self.dirstate.setparents(n)
936 self.dirstate.setparents(n)
937 if use_dirstate:
937 if use_dirstate:
938 for f in removed:
938 for f in removed:
939 self.dirstate.forget(f)
939 self.dirstate.forget(f)
940 valid = 1 # our dirstate updates are complete
940 valid = 1 # our dirstate updates are complete
941
941
942 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
942 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
943 return n
943 return n
944 finally:
944 finally:
945 if not valid: # don't save our updated dirstate
945 if not valid: # don't save our updated dirstate
946 self.dirstate.invalidate()
946 self.dirstate.invalidate()
947 del tr
947 del tr
948
948
949 def walk(self, match, node=None):
949 def walk(self, match, node=None):
950 '''
950 '''
951 walk recursively through the directory tree or a given
951 walk recursively through the directory tree or a given
952 changeset, finding all files matched by the match
952 changeset, finding all files matched by the match
953 function
953 function
954 '''
954 '''
955 return self[node].walk(match)
955 return self[node].walk(match)
956
956
957 def status(self, node1='.', node2=None, match=None,
957 def status(self, node1='.', node2=None, match=None,
958 ignored=False, clean=False, unknown=False):
958 ignored=False, clean=False, unknown=False):
959 """return status of files between two nodes or node and working directory
959 """return status of files between two nodes or node and working directory
960
960
961 If node1 is None, use the first dirstate parent instead.
961 If node1 is None, use the first dirstate parent instead.
962 If node2 is None, compare node1 with working directory.
962 If node2 is None, compare node1 with working directory.
963 """
963 """
964
964
965 def mfmatches(ctx):
965 def mfmatches(ctx):
966 mf = ctx.manifest().copy()
966 mf = ctx.manifest().copy()
967 for fn in mf.keys():
967 for fn in mf.keys():
968 if not match(fn):
968 if not match(fn):
969 del mf[fn]
969 del mf[fn]
970 return mf
970 return mf
971
971
972 if isinstance(node1, context.changectx):
972 if isinstance(node1, context.changectx):
973 ctx1 = node1
973 ctx1 = node1
974 else:
974 else:
975 ctx1 = self[node1]
975 ctx1 = self[node1]
976 if isinstance(node2, context.changectx):
976 if isinstance(node2, context.changectx):
977 ctx2 = node2
977 ctx2 = node2
978 else:
978 else:
979 ctx2 = self[node2]
979 ctx2 = self[node2]
980
980
981 working = ctx2 == self[None]
981 working = ctx2 == self[None]
982 parentworking = working and ctx1 == self['.']
982 parentworking = working and ctx1 == self['.']
983 match = match or match_.always(self.root, self.getcwd())
983 match = match or match_.always(self.root, self.getcwd())
984 listignored, listclean, listunknown = ignored, clean, unknown
984 listignored, listclean, listunknown = ignored, clean, unknown
985
985
986 # load earliest manifest first for caching reasons
986 # load earliest manifest first for caching reasons
987 if not working and ctx2.rev() < ctx1.rev():
987 if not working and ctx2.rev() < ctx1.rev():
988 ctx2.manifest()
988 ctx2.manifest()
989
989
990 if not parentworking:
990 if not parentworking:
991 def bad(f, msg):
991 def bad(f, msg):
992 if f not in ctx1:
992 if f not in ctx1:
993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
994 return False
994 return False
995 match.bad = bad
995 match.bad = bad
996
996
997 if working: # we need to scan the working dir
997 if working: # we need to scan the working dir
998 s = self.dirstate.status(match, listignored, listclean, listunknown)
998 s = self.dirstate.status(match, listignored, listclean, listunknown)
999 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
999 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1000
1000
1001 # check for any possibly clean files
1001 # check for any possibly clean files
1002 if parentworking and cmp:
1002 if parentworking and cmp:
1003 fixup = []
1003 fixup = []
1004 # do a full compare of any files that might have changed
1004 # do a full compare of any files that might have changed
1005 for f in cmp:
1005 for f in cmp:
1006 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1006 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1007 or ctx1[f].cmp(ctx2[f].data())):
1007 or ctx1[f].cmp(ctx2[f].data())):
1008 modified.append(f)
1008 modified.append(f)
1009 else:
1009 else:
1010 fixup.append(f)
1010 fixup.append(f)
1011
1011
1012 if listclean:
1012 if listclean:
1013 clean += fixup
1013 clean += fixup
1014
1014
1015 # update dirstate for files that are actually clean
1015 # update dirstate for files that are actually clean
1016 if fixup:
1016 if fixup:
1017 wlock = None
1017 wlock = None
1018 try:
1018 try:
1019 try:
1019 try:
1020 wlock = self.wlock(False)
1020 wlock = self.wlock(False)
1021 for f in fixup:
1021 for f in fixup:
1022 self.dirstate.normal(f)
1022 self.dirstate.normal(f)
1023 except lock.LockException:
1023 except lock.LockException:
1024 pass
1024 pass
1025 finally:
1025 finally:
1026 del wlock
1026 del wlock
1027
1027
1028 if not parentworking:
1028 if not parentworking:
1029 mf1 = mfmatches(ctx1)
1029 mf1 = mfmatches(ctx1)
1030 if working:
1030 if working:
1031 # we are comparing working dir against non-parent
1031 # we are comparing working dir against non-parent
1032 # generate a pseudo-manifest for the working dir
1032 # generate a pseudo-manifest for the working dir
1033 mf2 = mfmatches(self['.'])
1033 mf2 = mfmatches(self['.'])
1034 for f in cmp + modified + added:
1034 for f in cmp + modified + added:
1035 mf2[f] = None
1035 mf2[f] = None
1036 mf2.set(f, ctx2.flags(f))
1036 mf2.set(f, ctx2.flags(f))
1037 for f in removed:
1037 for f in removed:
1038 if f in mf2:
1038 if f in mf2:
1039 del mf2[f]
1039 del mf2[f]
1040 else:
1040 else:
1041 # we are comparing two revisions
1041 # we are comparing two revisions
1042 deleted, unknown, ignored = [], [], []
1042 deleted, unknown, ignored = [], [], []
1043 mf2 = mfmatches(ctx2)
1043 mf2 = mfmatches(ctx2)
1044
1044
1045 modified, added, clean = [], [], []
1045 modified, added, clean = [], [], []
1046 for fn in mf2:
1046 for fn in mf2:
1047 if fn in mf1:
1047 if fn in mf1:
1048 if (mf1.flags(fn) != mf2.flags(fn) or
1048 if (mf1.flags(fn) != mf2.flags(fn) or
1049 (mf1[fn] != mf2[fn] and
1049 (mf1[fn] != mf2[fn] and
1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1051 modified.append(fn)
1051 modified.append(fn)
1052 elif listclean:
1052 elif listclean:
1053 clean.append(fn)
1053 clean.append(fn)
1054 del mf1[fn]
1054 del mf1[fn]
1055 else:
1055 else:
1056 added.append(fn)
1056 added.append(fn)
1057 removed = mf1.keys()
1057 removed = mf1.keys()
1058
1058
1059 r = modified, added, removed, deleted, unknown, ignored, clean
1059 r = modified, added, removed, deleted, unknown, ignored, clean
1060 [l.sort() for l in r]
1060 [l.sort() for l in r]
1061 return r
1061 return r
1062
1062
1063 def add(self, list):
1063 def add(self, list):
1064 wlock = self.wlock()
1064 wlock = self.wlock()
1065 try:
1065 try:
1066 rejected = []
1066 rejected = []
1067 for f in list:
1067 for f in list:
1068 p = self.wjoin(f)
1068 p = self.wjoin(f)
1069 try:
1069 try:
1070 st = os.lstat(p)
1070 st = os.lstat(p)
1071 except:
1071 except:
1072 self.ui.warn(_("%s does not exist!\n") % f)
1072 self.ui.warn(_("%s does not exist!\n") % f)
1073 rejected.append(f)
1073 rejected.append(f)
1074 continue
1074 continue
1075 if st.st_size > 10000000:
1075 if st.st_size > 10000000:
1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1077 " performance problems\n"
1077 " performance problems\n"
1078 "(use 'hg revert %s' to unadd the file)\n")
1078 "(use 'hg revert %s' to unadd the file)\n")
1079 % (f, f))
1079 % (f, f))
1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1081 self.ui.warn(_("%s not added: only files and symlinks "
1081 self.ui.warn(_("%s not added: only files and symlinks "
1082 "supported currently\n") % f)
1082 "supported currently\n") % f)
1083 rejected.append(p)
1083 rejected.append(p)
1084 elif self.dirstate[f] in 'amn':
1084 elif self.dirstate[f] in 'amn':
1085 self.ui.warn(_("%s already tracked!\n") % f)
1085 self.ui.warn(_("%s already tracked!\n") % f)
1086 elif self.dirstate[f] == 'r':
1086 elif self.dirstate[f] == 'r':
1087 self.dirstate.normallookup(f)
1087 self.dirstate.normallookup(f)
1088 else:
1088 else:
1089 self.dirstate.add(f)
1089 self.dirstate.add(f)
1090 return rejected
1090 return rejected
1091 finally:
1091 finally:
1092 del wlock
1092 del wlock
1093
1093
1094 def forget(self, list):
1094 def forget(self, list):
1095 wlock = self.wlock()
1095 wlock = self.wlock()
1096 try:
1096 try:
1097 for f in list:
1097 for f in list:
1098 if self.dirstate[f] != 'a':
1098 if self.dirstate[f] != 'a':
1099 self.ui.warn(_("%s not added!\n") % f)
1099 self.ui.warn(_("%s not added!\n") % f)
1100 else:
1100 else:
1101 self.dirstate.forget(f)
1101 self.dirstate.forget(f)
1102 finally:
1102 finally:
1103 del wlock
1103 del wlock
1104
1104
1105 def remove(self, list, unlink=False):
1105 def remove(self, list, unlink=False):
1106 wlock = None
1106 wlock = None
1107 try:
1107 try:
1108 if unlink:
1108 if unlink:
1109 for f in list:
1109 for f in list:
1110 try:
1110 try:
1111 util.unlink(self.wjoin(f))
1111 util.unlink(self.wjoin(f))
1112 except OSError, inst:
1112 except OSError, inst:
1113 if inst.errno != errno.ENOENT:
1113 if inst.errno != errno.ENOENT:
1114 raise
1114 raise
1115 wlock = self.wlock()
1115 wlock = self.wlock()
1116 for f in list:
1116 for f in list:
1117 if unlink and os.path.exists(self.wjoin(f)):
1117 if unlink and os.path.exists(self.wjoin(f)):
1118 self.ui.warn(_("%s still exists!\n") % f)
1118 self.ui.warn(_("%s still exists!\n") % f)
1119 elif self.dirstate[f] == 'a':
1119 elif self.dirstate[f] == 'a':
1120 self.dirstate.forget(f)
1120 self.dirstate.forget(f)
1121 elif f not in self.dirstate:
1121 elif f not in self.dirstate:
1122 self.ui.warn(_("%s not tracked!\n") % f)
1122 self.ui.warn(_("%s not tracked!\n") % f)
1123 else:
1123 else:
1124 self.dirstate.remove(f)
1124 self.dirstate.remove(f)
1125 finally:
1125 finally:
1126 del wlock
1126 del wlock
1127
1127
1128 def undelete(self, list):
1128 def undelete(self, list):
1129 wlock = None
1129 wlock = None
1130 try:
1130 try:
1131 manifests = [self.manifest.read(self.changelog.read(p)[0])
1131 manifests = [self.manifest.read(self.changelog.read(p)[0])
1132 for p in self.dirstate.parents() if p != nullid]
1132 for p in self.dirstate.parents() if p != nullid]
1133 wlock = self.wlock()
1133 wlock = self.wlock()
1134 for f in list:
1134 for f in list:
1135 if self.dirstate[f] != 'r':
1135 if self.dirstate[f] != 'r':
1136 self.ui.warn(_("%s not removed!\n") % f)
1136 self.ui.warn(_("%s not removed!\n") % f)
1137 else:
1137 else:
1138 m = f in manifests[0] and manifests[0] or manifests[1]
1138 m = f in manifests[0] and manifests[0] or manifests[1]
1139 t = self.file(f).read(m[f])
1139 t = self.file(f).read(m[f])
1140 self.wwrite(f, t, m.flags(f))
1140 self.wwrite(f, t, m.flags(f))
1141 self.dirstate.normal(f)
1141 self.dirstate.normal(f)
1142 finally:
1142 finally:
1143 del wlock
1143 del wlock
1144
1144
1145 def copy(self, source, dest):
1145 def copy(self, source, dest):
1146 wlock = None
1146 wlock = None
1147 try:
1147 try:
1148 p = self.wjoin(dest)
1148 p = self.wjoin(dest)
1149 if not (os.path.exists(p) or os.path.islink(p)):
1149 if not (os.path.exists(p) or os.path.islink(p)):
1150 self.ui.warn(_("%s does not exist!\n") % dest)
1150 self.ui.warn(_("%s does not exist!\n") % dest)
1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1152 self.ui.warn(_("copy failed: %s is not a file or a "
1152 self.ui.warn(_("copy failed: %s is not a file or a "
1153 "symbolic link\n") % dest)
1153 "symbolic link\n") % dest)
1154 else:
1154 else:
1155 wlock = self.wlock()
1155 wlock = self.wlock()
1156 if self.dirstate[dest] in '?r':
1156 if self.dirstate[dest] in '?r':
1157 self.dirstate.add(dest)
1157 self.dirstate.add(dest)
1158 self.dirstate.copy(source, dest)
1158 self.dirstate.copy(source, dest)
1159 finally:
1159 finally:
1160 del wlock
1160 del wlock
1161
1161
1162 def heads(self, start=None):
1162 def heads(self, start=None):
1163 heads = self.changelog.heads(start)
1163 heads = self.changelog.heads(start)
1164 # sort the output in rev descending order
1164 # sort the output in rev descending order
1165 heads = [(-self.changelog.rev(h), h) for h in heads]
1165 heads = [(-self.changelog.rev(h), h) for h in heads]
1166 return [n for (r, n) in util.sort(heads)]
1166 return [n for (r, n) in util.sort(heads)]
1167
1167
1168 def branchheads(self, branch=None, start=None):
1168 def branchheads(self, branch=None, start=None):
1169 if branch is None:
1169 if branch is None:
1170 branch = self[None].branch()
1170 branch = self[None].branch()
1171 branches = self.branchtags()
1171 branches = self.branchtags()
1172 if branch not in branches:
1172 if branch not in branches:
1173 return []
1173 return []
1174 # The basic algorithm is this:
1174 # The basic algorithm is this:
1175 #
1175 #
1176 # Start from the branch tip since there are no later revisions that can
1176 # Start from the branch tip since there are no later revisions that can
1177 # possibly be in this branch, and the tip is a guaranteed head.
1177 # possibly be in this branch, and the tip is a guaranteed head.
1178 #
1178 #
1179 # Remember the tip's parents as the first ancestors, since these by
1179 # Remember the tip's parents as the first ancestors, since these by
1180 # definition are not heads.
1180 # definition are not heads.
1181 #
1181 #
1182 # Step backwards from the brach tip through all the revisions. We are
1182 # Step backwards from the brach tip through all the revisions. We are
1183 # guaranteed by the rules of Mercurial that we will now be visiting the
1183 # guaranteed by the rules of Mercurial that we will now be visiting the
1184 # nodes in reverse topological order (children before parents).
1184 # nodes in reverse topological order (children before parents).
1185 #
1185 #
1186 # If a revision is one of the ancestors of a head then we can toss it
1186 # If a revision is one of the ancestors of a head then we can toss it
1187 # out of the ancestors set (we've already found it and won't be
1187 # out of the ancestors set (we've already found it and won't be
1188 # visiting it again) and put its parents in the ancestors set.
1188 # visiting it again) and put its parents in the ancestors set.
1189 #
1189 #
1190 # Otherwise, if a revision is in the branch it's another head, since it
1190 # Otherwise, if a revision is in the branch it's another head, since it
1191 # wasn't in the ancestor list of an existing head. So add it to the
1191 # wasn't in the ancestor list of an existing head. So add it to the
1192 # head list, and add its parents to the ancestor list.
1192 # head list, and add its parents to the ancestor list.
1193 #
1193 #
1194 # If it is not in the branch ignore it.
1194 # If it is not in the branch ignore it.
1195 #
1195 #
1196 # Once we have a list of heads, use nodesbetween to filter out all the
1196 # Once we have a list of heads, use nodesbetween to filter out all the
1197 # heads that cannot be reached from startrev. There may be a more
1197 # heads that cannot be reached from startrev. There may be a more
1198 # efficient way to do this as part of the previous algorithm.
1198 # efficient way to do this as part of the previous algorithm.
1199
1199
1200 set = util.set
1200 set = util.set
1201 heads = [self.changelog.rev(branches[branch])]
1201 heads = [self.changelog.rev(branches[branch])]
1202 # Don't care if ancestors contains nullrev or not.
1202 # Don't care if ancestors contains nullrev or not.
1203 ancestors = set(self.changelog.parentrevs(heads[0]))
1203 ancestors = set(self.changelog.parentrevs(heads[0]))
1204 for rev in xrange(heads[0] - 1, nullrev, -1):
1204 for rev in xrange(heads[0] - 1, nullrev, -1):
1205 if rev in ancestors:
1205 if rev in ancestors:
1206 ancestors.update(self.changelog.parentrevs(rev))
1206 ancestors.update(self.changelog.parentrevs(rev))
1207 ancestors.remove(rev)
1207 ancestors.remove(rev)
1208 elif self[rev].branch() == branch:
1208 elif self[rev].branch() == branch:
1209 heads.append(rev)
1209 heads.append(rev)
1210 ancestors.update(self.changelog.parentrevs(rev))
1210 ancestors.update(self.changelog.parentrevs(rev))
1211 heads = [self.changelog.node(rev) for rev in heads]
1211 heads = [self.changelog.node(rev) for rev in heads]
1212 if start is not None:
1212 if start is not None:
1213 heads = self.changelog.nodesbetween([start], heads)[2]
1213 heads = self.changelog.nodesbetween([start], heads)[2]
1214 return heads
1214 return heads
1215
1215
1216 def branches(self, nodes):
1216 def branches(self, nodes):
1217 if not nodes:
1217 if not nodes:
1218 nodes = [self.changelog.tip()]
1218 nodes = [self.changelog.tip()]
1219 b = []
1219 b = []
1220 for n in nodes:
1220 for n in nodes:
1221 t = n
1221 t = n
1222 while 1:
1222 while 1:
1223 p = self.changelog.parents(n)
1223 p = self.changelog.parents(n)
1224 if p[1] != nullid or p[0] == nullid:
1224 if p[1] != nullid or p[0] == nullid:
1225 b.append((t, n, p[0], p[1]))
1225 b.append((t, n, p[0], p[1]))
1226 break
1226 break
1227 n = p[0]
1227 n = p[0]
1228 return b
1228 return b
1229
1229
1230 def between(self, pairs):
1230 def between(self, pairs):
1231 r = []
1231 r = []
1232
1232
1233 for top, bottom in pairs:
1233 for top, bottom in pairs:
1234 n, l, i = top, [], 0
1234 n, l, i = top, [], 0
1235 f = 1
1235 f = 1
1236
1236
1237 while n != bottom:
1237 while n != bottom:
1238 p = self.changelog.parents(n)[0]
1238 p = self.changelog.parents(n)[0]
1239 if i == f:
1239 if i == f:
1240 l.append(n)
1240 l.append(n)
1241 f = f * 2
1241 f = f * 2
1242 n = p
1242 n = p
1243 i += 1
1243 i += 1
1244
1244
1245 r.append(l)
1245 r.append(l)
1246
1246
1247 return r
1247 return r
1248
1248
1249 def findincoming(self, remote, base=None, heads=None, force=False):
1249 def findincoming(self, remote, base=None, heads=None, force=False):
1250 """Return list of roots of the subsets of missing nodes from remote
1250 """Return list of roots of the subsets of missing nodes from remote
1251
1251
1252 If base dict is specified, assume that these nodes and their parents
1252 If base dict is specified, assume that these nodes and their parents
1253 exist on the remote side and that no child of a node of base exists
1253 exist on the remote side and that no child of a node of base exists
1254 in both remote and self.
1254 in both remote and self.
1255 Furthermore base will be updated to include the nodes that exists
1255 Furthermore base will be updated to include the nodes that exists
1256 in self and remote but no children exists in self and remote.
1256 in self and remote but no children exists in self and remote.
1257 If a list of heads is specified, return only nodes which are heads
1257 If a list of heads is specified, return only nodes which are heads
1258 or ancestors of these heads.
1258 or ancestors of these heads.
1259
1259
1260 All the ancestors of base are in self and in remote.
1260 All the ancestors of base are in self and in remote.
1261 All the descendants of the list returned are missing in self.
1261 All the descendants of the list returned are missing in self.
1262 (and so we know that the rest of the nodes are missing in remote, see
1262 (and so we know that the rest of the nodes are missing in remote, see
1263 outgoing)
1263 outgoing)
1264 """
1264 """
1265 m = self.changelog.nodemap
1265 m = self.changelog.nodemap
1266 search = []
1266 search = []
1267 fetch = {}
1267 fetch = {}
1268 seen = {}
1268 seen = {}
1269 seenbranch = {}
1269 seenbranch = {}
1270 if base == None:
1270 if base == None:
1271 base = {}
1271 base = {}
1272
1272
1273 if not heads:
1273 if not heads:
1274 heads = remote.heads()
1274 heads = remote.heads()
1275
1275
1276 if self.changelog.tip() == nullid:
1276 if self.changelog.tip() == nullid:
1277 base[nullid] = 1
1277 base[nullid] = 1
1278 if heads != [nullid]:
1278 if heads != [nullid]:
1279 return [nullid]
1279 return [nullid]
1280 return []
1280 return []
1281
1281
1282 # assume we're closer to the tip than the root
1282 # assume we're closer to the tip than the root
1283 # and start by examining the heads
1283 # and start by examining the heads
1284 self.ui.status(_("searching for changes\n"))
1284 self.ui.status(_("searching for changes\n"))
1285
1285
1286 unknown = []
1286 unknown = []
1287 for h in heads:
1287 for h in heads:
1288 if h not in m:
1288 if h not in m:
1289 unknown.append(h)
1289 unknown.append(h)
1290 else:
1290 else:
1291 base[h] = 1
1291 base[h] = 1
1292
1292
1293 if not unknown:
1293 if not unknown:
1294 return []
1294 return []
1295
1295
1296 req = dict.fromkeys(unknown)
1296 req = dict.fromkeys(unknown)
1297 reqcnt = 0
1297 reqcnt = 0
1298
1298
1299 # search through remote branches
1299 # search through remote branches
1300 # a 'branch' here is a linear segment of history, with four parts:
1300 # a 'branch' here is a linear segment of history, with four parts:
1301 # head, root, first parent, second parent
1301 # head, root, first parent, second parent
1302 # (a branch always has two parents (or none) by definition)
1302 # (a branch always has two parents (or none) by definition)
1303 unknown = remote.branches(unknown)
1303 unknown = remote.branches(unknown)
1304 while unknown:
1304 while unknown:
1305 r = []
1305 r = []
1306 while unknown:
1306 while unknown:
1307 n = unknown.pop(0)
1307 n = unknown.pop(0)
1308 if n[0] in seen:
1308 if n[0] in seen:
1309 continue
1309 continue
1310
1310
1311 self.ui.debug(_("examining %s:%s\n")
1311 self.ui.debug(_("examining %s:%s\n")
1312 % (short(n[0]), short(n[1])))
1312 % (short(n[0]), short(n[1])))
1313 if n[0] == nullid: # found the end of the branch
1313 if n[0] == nullid: # found the end of the branch
1314 pass
1314 pass
1315 elif n in seenbranch:
1315 elif n in seenbranch:
1316 self.ui.debug(_("branch already found\n"))
1316 self.ui.debug(_("branch already found\n"))
1317 continue
1317 continue
1318 elif n[1] and n[1] in m: # do we know the base?
1318 elif n[1] and n[1] in m: # do we know the base?
1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1320 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1321 search.append(n) # schedule branch range for scanning
1321 search.append(n[0:2]) # schedule branch range for scanning
1322 seenbranch[n] = 1
1322 seenbranch[n] = 1
1323 else:
1323 else:
1324 if n[1] not in seen and n[1] not in fetch:
1324 if n[1] not in seen and n[1] not in fetch:
1325 if n[2] in m and n[3] in m:
1325 if n[2] in m and n[3] in m:
1326 self.ui.debug(_("found new changeset %s\n") %
1326 self.ui.debug(_("found new changeset %s\n") %
1327 short(n[1]))
1327 short(n[1]))
1328 fetch[n[1]] = 1 # earliest unknown
1328 fetch[n[1]] = 1 # earliest unknown
1329 for p in n[2:4]:
1329 for p in n[2:4]:
1330 if p in m:
1330 if p in m:
1331 base[p] = 1 # latest known
1331 base[p] = 1 # latest known
1332
1332
1333 for p in n[2:4]:
1333 for p in n[2:4]:
1334 if p not in req and p not in m:
1334 if p not in req and p not in m:
1335 r.append(p)
1335 r.append(p)
1336 req[p] = 1
1336 req[p] = 1
1337 seen[n[0]] = 1
1337 seen[n[0]] = 1
1338
1338
1339 if r:
1339 if r:
1340 reqcnt += 1
1340 reqcnt += 1
1341 self.ui.debug(_("request %d: %s\n") %
1341 self.ui.debug(_("request %d: %s\n") %
1342 (reqcnt, " ".join(map(short, r))))
1342 (reqcnt, " ".join(map(short, r))))
1343 for p in xrange(0, len(r), 10):
1343 for p in xrange(0, len(r), 10):
1344 for b in remote.branches(r[p:p+10]):
1344 for b in remote.branches(r[p:p+10]):
1345 self.ui.debug(_("received %s:%s\n") %
1345 self.ui.debug(_("received %s:%s\n") %
1346 (short(b[0]), short(b[1])))
1346 (short(b[0]), short(b[1])))
1347 unknown.append(b)
1347 unknown.append(b)
1348
1348
1349 # do binary search on the branches we found
1349 # do binary search on the branches we found
1350 search = [(t, b) for (t, b, p1, p2) in search]
1351 while search:
1350 while search:
1352 newsearch = []
1351 newsearch = []
1353 reqcnt += 1
1352 reqcnt += 1
1354 for n, l in zip(search, remote.between(search)):
1353 for n, l in zip(search, remote.between(search)):
1355 l.append(n[1])
1354 l.append(n[1])
1356 p = n[0]
1355 p = n[0]
1357 f = 1
1356 f = 1
1358 for i in l:
1357 for i in l:
1359 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1360 if i in m:
1359 if i in m:
1361 if f <= 2:
1360 if f <= 2:
1362 self.ui.debug(_("found new branch changeset %s\n") %
1361 self.ui.debug(_("found new branch changeset %s\n") %
1363 short(p))
1362 short(p))
1364 fetch[p] = 1
1363 fetch[p] = 1
1365 base[i] = 1
1364 base[i] = 1
1366 else:
1365 else:
1367 self.ui.debug(_("narrowed branch search to %s:%s\n")
1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1368 % (short(p), short(i)))
1367 % (short(p), short(i)))
1369 newsearch.append((p, i))
1368 newsearch.append((p, i))
1370 break
1369 break
1371 p, f = i, f * 2
1370 p, f = i, f * 2
1372 search = newsearch
1371 search = newsearch
1373
1372
1374 # sanity check our fetch list
1373 # sanity check our fetch list
1375 for f in fetch.keys():
1374 for f in fetch.keys():
1376 if f in m:
1375 if f in m:
1377 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1376 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1378
1377
1379 if base.keys() == [nullid]:
1378 if base.keys() == [nullid]:
1380 if force:
1379 if force:
1381 self.ui.warn(_("warning: repository is unrelated\n"))
1380 self.ui.warn(_("warning: repository is unrelated\n"))
1382 else:
1381 else:
1383 raise util.Abort(_("repository is unrelated"))
1382 raise util.Abort(_("repository is unrelated"))
1384
1383
1385 self.ui.debug(_("found new changesets starting at ") +
1384 self.ui.debug(_("found new changesets starting at ") +
1386 " ".join([short(f) for f in fetch]) + "\n")
1385 " ".join([short(f) for f in fetch]) + "\n")
1387
1386
1388 self.ui.debug(_("%d total queries\n") % reqcnt)
1387 self.ui.debug(_("%d total queries\n") % reqcnt)
1389
1388
1390 return fetch.keys()
1389 return fetch.keys()
1391
1390
1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1391 def findoutgoing(self, remote, base=None, heads=None, force=False):
1393 """Return list of nodes that are roots of subsets not in remote
1392 """Return list of nodes that are roots of subsets not in remote
1394
1393
1395 If base dict is specified, assume that these nodes and their parents
1394 If base dict is specified, assume that these nodes and their parents
1396 exist on the remote side.
1395 exist on the remote side.
1397 If a list of heads is specified, return only nodes which are heads
1396 If a list of heads is specified, return only nodes which are heads
1398 or ancestors of these heads, and return a second element which
1397 or ancestors of these heads, and return a second element which
1399 contains all remote heads which get new children.
1398 contains all remote heads which get new children.
1400 """
1399 """
1401 if base == None:
1400 if base == None:
1402 base = {}
1401 base = {}
1403 self.findincoming(remote, base, heads, force=force)
1402 self.findincoming(remote, base, heads, force=force)
1404
1403
1405 self.ui.debug(_("common changesets up to ")
1404 self.ui.debug(_("common changesets up to ")
1406 + " ".join(map(short, base.keys())) + "\n")
1405 + " ".join(map(short, base.keys())) + "\n")
1407
1406
1408 remain = dict.fromkeys(self.changelog.nodemap)
1407 remain = dict.fromkeys(self.changelog.nodemap)
1409
1408
1410 # prune everything remote has from the tree
1409 # prune everything remote has from the tree
1411 del remain[nullid]
1410 del remain[nullid]
1412 remove = base.keys()
1411 remove = base.keys()
1413 while remove:
1412 while remove:
1414 n = remove.pop(0)
1413 n = remove.pop(0)
1415 if n in remain:
1414 if n in remain:
1416 del remain[n]
1415 del remain[n]
1417 for p in self.changelog.parents(n):
1416 for p in self.changelog.parents(n):
1418 remove.append(p)
1417 remove.append(p)
1419
1418
1420 # find every node whose parents have been pruned
1419 # find every node whose parents have been pruned
1421 subset = []
1420 subset = []
1422 # find every remote head that will get new children
1421 # find every remote head that will get new children
1423 updated_heads = {}
1422 updated_heads = {}
1424 for n in remain:
1423 for n in remain:
1425 p1, p2 = self.changelog.parents(n)
1424 p1, p2 = self.changelog.parents(n)
1426 if p1 not in remain and p2 not in remain:
1425 if p1 not in remain and p2 not in remain:
1427 subset.append(n)
1426 subset.append(n)
1428 if heads:
1427 if heads:
1429 if p1 in heads:
1428 if p1 in heads:
1430 updated_heads[p1] = True
1429 updated_heads[p1] = True
1431 if p2 in heads:
1430 if p2 in heads:
1432 updated_heads[p2] = True
1431 updated_heads[p2] = True
1433
1432
1434 # this is the set of all roots we have to push
1433 # this is the set of all roots we have to push
1435 if heads:
1434 if heads:
1436 return subset, updated_heads.keys()
1435 return subset, updated_heads.keys()
1437 else:
1436 else:
1438 return subset
1437 return subset
1439
1438
1440 def pull(self, remote, heads=None, force=False):
1439 def pull(self, remote, heads=None, force=False):
1441 lock = self.lock()
1440 lock = self.lock()
1442 try:
1441 try:
1443 fetch = self.findincoming(remote, heads=heads, force=force)
1442 fetch = self.findincoming(remote, heads=heads, force=force)
1444 if fetch == [nullid]:
1443 if fetch == [nullid]:
1445 self.ui.status(_("requesting all changes\n"))
1444 self.ui.status(_("requesting all changes\n"))
1446
1445
1447 if not fetch:
1446 if not fetch:
1448 self.ui.status(_("no changes found\n"))
1447 self.ui.status(_("no changes found\n"))
1449 return 0
1448 return 0
1450
1449
1451 if heads is None:
1450 if heads is None:
1452 cg = remote.changegroup(fetch, 'pull')
1451 cg = remote.changegroup(fetch, 'pull')
1453 else:
1452 else:
1454 if 'changegroupsubset' not in remote.capabilities:
1453 if 'changegroupsubset' not in remote.capabilities:
1455 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1454 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1456 cg = remote.changegroupsubset(fetch, heads, 'pull')
1455 cg = remote.changegroupsubset(fetch, heads, 'pull')
1457 return self.addchangegroup(cg, 'pull', remote.url())
1456 return self.addchangegroup(cg, 'pull', remote.url())
1458 finally:
1457 finally:
1459 del lock
1458 del lock
1460
1459
1461 def push(self, remote, force=False, revs=None):
1460 def push(self, remote, force=False, revs=None):
1462 # there are two ways to push to remote repo:
1461 # there are two ways to push to remote repo:
1463 #
1462 #
1464 # addchangegroup assumes local user can lock remote
1463 # addchangegroup assumes local user can lock remote
1465 # repo (local filesystem, old ssh servers).
1464 # repo (local filesystem, old ssh servers).
1466 #
1465 #
1467 # unbundle assumes local user cannot lock remote repo (new ssh
1466 # unbundle assumes local user cannot lock remote repo (new ssh
1468 # servers, http servers).
1467 # servers, http servers).
1469
1468
1470 if remote.capable('unbundle'):
1469 if remote.capable('unbundle'):
1471 return self.push_unbundle(remote, force, revs)
1470 return self.push_unbundle(remote, force, revs)
1472 return self.push_addchangegroup(remote, force, revs)
1471 return self.push_addchangegroup(remote, force, revs)
1473
1472
1474 def prepush(self, remote, force, revs):
1473 def prepush(self, remote, force, revs):
1475 base = {}
1474 base = {}
1476 remote_heads = remote.heads()
1475 remote_heads = remote.heads()
1477 inc = self.findincoming(remote, base, remote_heads, force=force)
1476 inc = self.findincoming(remote, base, remote_heads, force=force)
1478
1477
1479 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1478 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1480 if revs is not None:
1479 if revs is not None:
1481 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1480 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1482 else:
1481 else:
1483 bases, heads = update, self.changelog.heads()
1482 bases, heads = update, self.changelog.heads()
1484
1483
1485 if not bases:
1484 if not bases:
1486 self.ui.status(_("no changes found\n"))
1485 self.ui.status(_("no changes found\n"))
1487 return None, 1
1486 return None, 1
1488 elif not force:
1487 elif not force:
1489 # check if we're creating new remote heads
1488 # check if we're creating new remote heads
1490 # to be a remote head after push, node must be either
1489 # to be a remote head after push, node must be either
1491 # - unknown locally
1490 # - unknown locally
1492 # - a local outgoing head descended from update
1491 # - a local outgoing head descended from update
1493 # - a remote head that's known locally and not
1492 # - a remote head that's known locally and not
1494 # ancestral to an outgoing head
1493 # ancestral to an outgoing head
1495
1494
1496 warn = 0
1495 warn = 0
1497
1496
1498 if remote_heads == [nullid]:
1497 if remote_heads == [nullid]:
1499 warn = 0
1498 warn = 0
1500 elif not revs and len(heads) > len(remote_heads):
1499 elif not revs and len(heads) > len(remote_heads):
1501 warn = 1
1500 warn = 1
1502 else:
1501 else:
1503 newheads = list(heads)
1502 newheads = list(heads)
1504 for r in remote_heads:
1503 for r in remote_heads:
1505 if r in self.changelog.nodemap:
1504 if r in self.changelog.nodemap:
1506 desc = self.changelog.heads(r, heads)
1505 desc = self.changelog.heads(r, heads)
1507 l = [h for h in heads if h in desc]
1506 l = [h for h in heads if h in desc]
1508 if not l:
1507 if not l:
1509 newheads.append(r)
1508 newheads.append(r)
1510 else:
1509 else:
1511 newheads.append(r)
1510 newheads.append(r)
1512 if len(newheads) > len(remote_heads):
1511 if len(newheads) > len(remote_heads):
1513 warn = 1
1512 warn = 1
1514
1513
1515 if warn:
1514 if warn:
1516 self.ui.warn(_("abort: push creates new remote heads!\n"))
1515 self.ui.warn(_("abort: push creates new remote heads!\n"))
1517 self.ui.status(_("(did you forget to merge?"
1516 self.ui.status(_("(did you forget to merge?"
1518 " use push -f to force)\n"))
1517 " use push -f to force)\n"))
1519 return None, 0
1518 return None, 0
1520 elif inc:
1519 elif inc:
1521 self.ui.warn(_("note: unsynced remote changes!\n"))
1520 self.ui.warn(_("note: unsynced remote changes!\n"))
1522
1521
1523
1522
1524 if revs is None:
1523 if revs is None:
1525 cg = self.changegroup(update, 'push')
1524 cg = self.changegroup(update, 'push')
1526 else:
1525 else:
1527 cg = self.changegroupsubset(update, revs, 'push')
1526 cg = self.changegroupsubset(update, revs, 'push')
1528 return cg, remote_heads
1527 return cg, remote_heads
1529
1528
1530 def push_addchangegroup(self, remote, force, revs):
1529 def push_addchangegroup(self, remote, force, revs):
1531 lock = remote.lock()
1530 lock = remote.lock()
1532 try:
1531 try:
1533 ret = self.prepush(remote, force, revs)
1532 ret = self.prepush(remote, force, revs)
1534 if ret[0] is not None:
1533 if ret[0] is not None:
1535 cg, remote_heads = ret
1534 cg, remote_heads = ret
1536 return remote.addchangegroup(cg, 'push', self.url())
1535 return remote.addchangegroup(cg, 'push', self.url())
1537 return ret[1]
1536 return ret[1]
1538 finally:
1537 finally:
1539 del lock
1538 del lock
1540
1539
1541 def push_unbundle(self, remote, force, revs):
1540 def push_unbundle(self, remote, force, revs):
1542 # local repo finds heads on server, finds out what revs it
1541 # local repo finds heads on server, finds out what revs it
1543 # must push. once revs transferred, if server finds it has
1542 # must push. once revs transferred, if server finds it has
1544 # different heads (someone else won commit/push race), server
1543 # different heads (someone else won commit/push race), server
1545 # aborts.
1544 # aborts.
1546
1545
1547 ret = self.prepush(remote, force, revs)
1546 ret = self.prepush(remote, force, revs)
1548 if ret[0] is not None:
1547 if ret[0] is not None:
1549 cg, remote_heads = ret
1548 cg, remote_heads = ret
1550 if force: remote_heads = ['force']
1549 if force: remote_heads = ['force']
1551 return remote.unbundle(cg, remote_heads, 'push')
1550 return remote.unbundle(cg, remote_heads, 'push')
1552 return ret[1]
1551 return ret[1]
1553
1552
1554 def changegroupinfo(self, nodes, source):
1553 def changegroupinfo(self, nodes, source):
1555 if self.ui.verbose or source == 'bundle':
1554 if self.ui.verbose or source == 'bundle':
1556 self.ui.status(_("%d changesets found\n") % len(nodes))
1555 self.ui.status(_("%d changesets found\n") % len(nodes))
1557 if self.ui.debugflag:
1556 if self.ui.debugflag:
1558 self.ui.debug(_("List of changesets:\n"))
1557 self.ui.debug(_("List of changesets:\n"))
1559 for node in nodes:
1558 for node in nodes:
1560 self.ui.debug("%s\n" % hex(node))
1559 self.ui.debug("%s\n" % hex(node))
1561
1560
1562 def changegroupsubset(self, bases, heads, source, extranodes=None):
1561 def changegroupsubset(self, bases, heads, source, extranodes=None):
1563 """This function generates a changegroup consisting of all the nodes
1562 """This function generates a changegroup consisting of all the nodes
1564 that are descendents of any of the bases, and ancestors of any of
1563 that are descendents of any of the bases, and ancestors of any of
1565 the heads.
1564 the heads.
1566
1565
1567 It is fairly complex as determining which filenodes and which
1566 It is fairly complex as determining which filenodes and which
1568 manifest nodes need to be included for the changeset to be complete
1567 manifest nodes need to be included for the changeset to be complete
1569 is non-trivial.
1568 is non-trivial.
1570
1569
1571 Another wrinkle is doing the reverse, figuring out which changeset in
1570 Another wrinkle is doing the reverse, figuring out which changeset in
1572 the changegroup a particular filenode or manifestnode belongs to.
1571 the changegroup a particular filenode or manifestnode belongs to.
1573
1572
1574 The caller can specify some nodes that must be included in the
1573 The caller can specify some nodes that must be included in the
1575 changegroup using the extranodes argument. It should be a dict
1574 changegroup using the extranodes argument. It should be a dict
1576 where the keys are the filenames (or 1 for the manifest), and the
1575 where the keys are the filenames (or 1 for the manifest), and the
1577 values are lists of (node, linknode) tuples, where node is a wanted
1576 values are lists of (node, linknode) tuples, where node is a wanted
1578 node and linknode is the changelog node that should be transmitted as
1577 node and linknode is the changelog node that should be transmitted as
1579 the linkrev.
1578 the linkrev.
1580 """
1579 """
1581
1580
1582 if extranodes is None:
1581 if extranodes is None:
1583 # can we go through the fast path ?
1582 # can we go through the fast path ?
1584 heads.sort()
1583 heads.sort()
1585 allheads = self.heads()
1584 allheads = self.heads()
1586 allheads.sort()
1585 allheads.sort()
1587 if heads == allheads:
1586 if heads == allheads:
1588 common = []
1587 common = []
1589 # parents of bases are known from both sides
1588 # parents of bases are known from both sides
1590 for n in bases:
1589 for n in bases:
1591 for p in self.changelog.parents(n):
1590 for p in self.changelog.parents(n):
1592 if p != nullid:
1591 if p != nullid:
1593 common.append(p)
1592 common.append(p)
1594 return self._changegroup(common, source)
1593 return self._changegroup(common, source)
1595
1594
1596 self.hook('preoutgoing', throw=True, source=source)
1595 self.hook('preoutgoing', throw=True, source=source)
1597
1596
1598 # Set up some initial variables
1597 # Set up some initial variables
1599 # Make it easy to refer to self.changelog
1598 # Make it easy to refer to self.changelog
1600 cl = self.changelog
1599 cl = self.changelog
1601 # msng is short for missing - compute the list of changesets in this
1600 # msng is short for missing - compute the list of changesets in this
1602 # changegroup.
1601 # changegroup.
1603 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1602 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1604 self.changegroupinfo(msng_cl_lst, source)
1603 self.changegroupinfo(msng_cl_lst, source)
1605 # Some bases may turn out to be superfluous, and some heads may be
1604 # Some bases may turn out to be superfluous, and some heads may be
1606 # too. nodesbetween will return the minimal set of bases and heads
1605 # too. nodesbetween will return the minimal set of bases and heads
1607 # necessary to re-create the changegroup.
1606 # necessary to re-create the changegroup.
1608
1607
1609 # Known heads are the list of heads that it is assumed the recipient
1608 # Known heads are the list of heads that it is assumed the recipient
1610 # of this changegroup will know about.
1609 # of this changegroup will know about.
1611 knownheads = {}
1610 knownheads = {}
1612 # We assume that all parents of bases are known heads.
1611 # We assume that all parents of bases are known heads.
1613 for n in bases:
1612 for n in bases:
1614 for p in cl.parents(n):
1613 for p in cl.parents(n):
1615 if p != nullid:
1614 if p != nullid:
1616 knownheads[p] = 1
1615 knownheads[p] = 1
1617 knownheads = knownheads.keys()
1616 knownheads = knownheads.keys()
1618 if knownheads:
1617 if knownheads:
1619 # Now that we know what heads are known, we can compute which
1618 # Now that we know what heads are known, we can compute which
1620 # changesets are known. The recipient must know about all
1619 # changesets are known. The recipient must know about all
1621 # changesets required to reach the known heads from the null
1620 # changesets required to reach the known heads from the null
1622 # changeset.
1621 # changeset.
1623 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1622 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1624 junk = None
1623 junk = None
1625 # Transform the list into an ersatz set.
1624 # Transform the list into an ersatz set.
1626 has_cl_set = dict.fromkeys(has_cl_set)
1625 has_cl_set = dict.fromkeys(has_cl_set)
1627 else:
1626 else:
1628 # If there were no known heads, the recipient cannot be assumed to
1627 # If there were no known heads, the recipient cannot be assumed to
1629 # know about any changesets.
1628 # know about any changesets.
1630 has_cl_set = {}
1629 has_cl_set = {}
1631
1630
1632 # Make it easy to refer to self.manifest
1631 # Make it easy to refer to self.manifest
1633 mnfst = self.manifest
1632 mnfst = self.manifest
1634 # We don't know which manifests are missing yet
1633 # We don't know which manifests are missing yet
1635 msng_mnfst_set = {}
1634 msng_mnfst_set = {}
1636 # Nor do we know which filenodes are missing.
1635 # Nor do we know which filenodes are missing.
1637 msng_filenode_set = {}
1636 msng_filenode_set = {}
1638
1637
1639 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1638 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1640 junk = None
1639 junk = None
1641
1640
1642 # A changeset always belongs to itself, so the changenode lookup
1641 # A changeset always belongs to itself, so the changenode lookup
1643 # function for a changenode is identity.
1642 # function for a changenode is identity.
1644 def identity(x):
1643 def identity(x):
1645 return x
1644 return x
1646
1645
1647 # A function generating function. Sets up an environment for the
1646 # A function generating function. Sets up an environment for the
1648 # inner function.
1647 # inner function.
1649 def cmp_by_rev_func(revlog):
1648 def cmp_by_rev_func(revlog):
1650 # Compare two nodes by their revision number in the environment's
1649 # Compare two nodes by their revision number in the environment's
1651 # revision history. Since the revision number both represents the
1650 # revision history. Since the revision number both represents the
1652 # most efficient order to read the nodes in, and represents a
1651 # most efficient order to read the nodes in, and represents a
1653 # topological sorting of the nodes, this function is often useful.
1652 # topological sorting of the nodes, this function is often useful.
1654 def cmp_by_rev(a, b):
1653 def cmp_by_rev(a, b):
1655 return cmp(revlog.rev(a), revlog.rev(b))
1654 return cmp(revlog.rev(a), revlog.rev(b))
1656 return cmp_by_rev
1655 return cmp_by_rev
1657
1656
1658 # If we determine that a particular file or manifest node must be a
1657 # If we determine that a particular file or manifest node must be a
1659 # node that the recipient of the changegroup will already have, we can
1658 # node that the recipient of the changegroup will already have, we can
1660 # also assume the recipient will have all the parents. This function
1659 # also assume the recipient will have all the parents. This function
1661 # prunes them from the set of missing nodes.
1660 # prunes them from the set of missing nodes.
1662 def prune_parents(revlog, hasset, msngset):
1661 def prune_parents(revlog, hasset, msngset):
1663 haslst = hasset.keys()
1662 haslst = hasset.keys()
1664 haslst.sort(cmp_by_rev_func(revlog))
1663 haslst.sort(cmp_by_rev_func(revlog))
1665 for node in haslst:
1664 for node in haslst:
1666 parentlst = [p for p in revlog.parents(node) if p != nullid]
1665 parentlst = [p for p in revlog.parents(node) if p != nullid]
1667 while parentlst:
1666 while parentlst:
1668 n = parentlst.pop()
1667 n = parentlst.pop()
1669 if n not in hasset:
1668 if n not in hasset:
1670 hasset[n] = 1
1669 hasset[n] = 1
1671 p = [p for p in revlog.parents(n) if p != nullid]
1670 p = [p for p in revlog.parents(n) if p != nullid]
1672 parentlst.extend(p)
1671 parentlst.extend(p)
1673 for n in hasset:
1672 for n in hasset:
1674 msngset.pop(n, None)
1673 msngset.pop(n, None)
1675
1674
1676 # This is a function generating function used to set up an environment
1675 # This is a function generating function used to set up an environment
1677 # for the inner function to execute in.
1676 # for the inner function to execute in.
1678 def manifest_and_file_collector(changedfileset):
1677 def manifest_and_file_collector(changedfileset):
1679 # This is an information gathering function that gathers
1678 # This is an information gathering function that gathers
1680 # information from each changeset node that goes out as part of
1679 # information from each changeset node that goes out as part of
1681 # the changegroup. The information gathered is a list of which
1680 # the changegroup. The information gathered is a list of which
1682 # manifest nodes are potentially required (the recipient may
1681 # manifest nodes are potentially required (the recipient may
1683 # already have them) and total list of all files which were
1682 # already have them) and total list of all files which were
1684 # changed in any changeset in the changegroup.
1683 # changed in any changeset in the changegroup.
1685 #
1684 #
1686 # We also remember the first changenode we saw any manifest
1685 # We also remember the first changenode we saw any manifest
1687 # referenced by so we can later determine which changenode 'owns'
1686 # referenced by so we can later determine which changenode 'owns'
1688 # the manifest.
1687 # the manifest.
1689 def collect_manifests_and_files(clnode):
1688 def collect_manifests_and_files(clnode):
1690 c = cl.read(clnode)
1689 c = cl.read(clnode)
1691 for f in c[3]:
1690 for f in c[3]:
1692 # This is to make sure we only have one instance of each
1691 # This is to make sure we only have one instance of each
1693 # filename string for each filename.
1692 # filename string for each filename.
1694 changedfileset.setdefault(f, f)
1693 changedfileset.setdefault(f, f)
1695 msng_mnfst_set.setdefault(c[0], clnode)
1694 msng_mnfst_set.setdefault(c[0], clnode)
1696 return collect_manifests_and_files
1695 return collect_manifests_and_files
1697
1696
1698 # Figure out which manifest nodes (of the ones we think might be part
1697 # Figure out which manifest nodes (of the ones we think might be part
1699 # of the changegroup) the recipient must know about and remove them
1698 # of the changegroup) the recipient must know about and remove them
1700 # from the changegroup.
1699 # from the changegroup.
1701 def prune_manifests():
1700 def prune_manifests():
1702 has_mnfst_set = {}
1701 has_mnfst_set = {}
1703 for n in msng_mnfst_set:
1702 for n in msng_mnfst_set:
1704 # If a 'missing' manifest thinks it belongs to a changenode
1703 # If a 'missing' manifest thinks it belongs to a changenode
1705 # the recipient is assumed to have, obviously the recipient
1704 # the recipient is assumed to have, obviously the recipient
1706 # must have that manifest.
1705 # must have that manifest.
1707 linknode = cl.node(mnfst.linkrev(n))
1706 linknode = cl.node(mnfst.linkrev(n))
1708 if linknode in has_cl_set:
1707 if linknode in has_cl_set:
1709 has_mnfst_set[n] = 1
1708 has_mnfst_set[n] = 1
1710 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1709 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1711
1710
1712 # Use the information collected in collect_manifests_and_files to say
1711 # Use the information collected in collect_manifests_and_files to say
1713 # which changenode any manifestnode belongs to.
1712 # which changenode any manifestnode belongs to.
1714 def lookup_manifest_link(mnfstnode):
1713 def lookup_manifest_link(mnfstnode):
1715 return msng_mnfst_set[mnfstnode]
1714 return msng_mnfst_set[mnfstnode]
1716
1715
1717 # A function generating function that sets up the initial environment
1716 # A function generating function that sets up the initial environment
1718 # the inner function.
1717 # the inner function.
1719 def filenode_collector(changedfiles):
1718 def filenode_collector(changedfiles):
1720 next_rev = [0]
1719 next_rev = [0]
1721 # This gathers information from each manifestnode included in the
1720 # This gathers information from each manifestnode included in the
1722 # changegroup about which filenodes the manifest node references
1721 # changegroup about which filenodes the manifest node references
1723 # so we can include those in the changegroup too.
1722 # so we can include those in the changegroup too.
1724 #
1723 #
1725 # It also remembers which changenode each filenode belongs to. It
1724 # It also remembers which changenode each filenode belongs to. It
1726 # does this by assuming the a filenode belongs to the changenode
1725 # does this by assuming the a filenode belongs to the changenode
1727 # the first manifest that references it belongs to.
1726 # the first manifest that references it belongs to.
1728 def collect_msng_filenodes(mnfstnode):
1727 def collect_msng_filenodes(mnfstnode):
1729 r = mnfst.rev(mnfstnode)
1728 r = mnfst.rev(mnfstnode)
1730 if r == next_rev[0]:
1729 if r == next_rev[0]:
1731 # If the last rev we looked at was the one just previous,
1730 # If the last rev we looked at was the one just previous,
1732 # we only need to see a diff.
1731 # we only need to see a diff.
1733 deltamf = mnfst.readdelta(mnfstnode)
1732 deltamf = mnfst.readdelta(mnfstnode)
1734 # For each line in the delta
1733 # For each line in the delta
1735 for f, fnode in deltamf.items():
1734 for f, fnode in deltamf.items():
1736 f = changedfiles.get(f, None)
1735 f = changedfiles.get(f, None)
1737 # And if the file is in the list of files we care
1736 # And if the file is in the list of files we care
1738 # about.
1737 # about.
1739 if f is not None:
1738 if f is not None:
1740 # Get the changenode this manifest belongs to
1739 # Get the changenode this manifest belongs to
1741 clnode = msng_mnfst_set[mnfstnode]
1740 clnode = msng_mnfst_set[mnfstnode]
1742 # Create the set of filenodes for the file if
1741 # Create the set of filenodes for the file if
1743 # there isn't one already.
1742 # there isn't one already.
1744 ndset = msng_filenode_set.setdefault(f, {})
1743 ndset = msng_filenode_set.setdefault(f, {})
1745 # And set the filenode's changelog node to the
1744 # And set the filenode's changelog node to the
1746 # manifest's if it hasn't been set already.
1745 # manifest's if it hasn't been set already.
1747 ndset.setdefault(fnode, clnode)
1746 ndset.setdefault(fnode, clnode)
1748 else:
1747 else:
1749 # Otherwise we need a full manifest.
1748 # Otherwise we need a full manifest.
1750 m = mnfst.read(mnfstnode)
1749 m = mnfst.read(mnfstnode)
1751 # For every file in we care about.
1750 # For every file in we care about.
1752 for f in changedfiles:
1751 for f in changedfiles:
1753 fnode = m.get(f, None)
1752 fnode = m.get(f, None)
1754 # If it's in the manifest
1753 # If it's in the manifest
1755 if fnode is not None:
1754 if fnode is not None:
1756 # See comments above.
1755 # See comments above.
1757 clnode = msng_mnfst_set[mnfstnode]
1756 clnode = msng_mnfst_set[mnfstnode]
1758 ndset = msng_filenode_set.setdefault(f, {})
1757 ndset = msng_filenode_set.setdefault(f, {})
1759 ndset.setdefault(fnode, clnode)
1758 ndset.setdefault(fnode, clnode)
1760 # Remember the revision we hope to see next.
1759 # Remember the revision we hope to see next.
1761 next_rev[0] = r + 1
1760 next_rev[0] = r + 1
1762 return collect_msng_filenodes
1761 return collect_msng_filenodes
1763
1762
1764 # We have a list of filenodes we think we need for a file, lets remove
1763 # We have a list of filenodes we think we need for a file, lets remove
1765 # all those we now the recipient must have.
1764 # all those we now the recipient must have.
1766 def prune_filenodes(f, filerevlog):
1765 def prune_filenodes(f, filerevlog):
1767 msngset = msng_filenode_set[f]
1766 msngset = msng_filenode_set[f]
1768 hasset = {}
1767 hasset = {}
1769 # If a 'missing' filenode thinks it belongs to a changenode we
1768 # If a 'missing' filenode thinks it belongs to a changenode we
1770 # assume the recipient must have, then the recipient must have
1769 # assume the recipient must have, then the recipient must have
1771 # that filenode.
1770 # that filenode.
1772 for n in msngset:
1771 for n in msngset:
1773 clnode = cl.node(filerevlog.linkrev(n))
1772 clnode = cl.node(filerevlog.linkrev(n))
1774 if clnode in has_cl_set:
1773 if clnode in has_cl_set:
1775 hasset[n] = 1
1774 hasset[n] = 1
1776 prune_parents(filerevlog, hasset, msngset)
1775 prune_parents(filerevlog, hasset, msngset)
1777
1776
1778 # A function generator function that sets up the a context for the
1777 # A function generator function that sets up the a context for the
1779 # inner function.
1778 # inner function.
1780 def lookup_filenode_link_func(fname):
1779 def lookup_filenode_link_func(fname):
1781 msngset = msng_filenode_set[fname]
1780 msngset = msng_filenode_set[fname]
1782 # Lookup the changenode the filenode belongs to.
1781 # Lookup the changenode the filenode belongs to.
1783 def lookup_filenode_link(fnode):
1782 def lookup_filenode_link(fnode):
1784 return msngset[fnode]
1783 return msngset[fnode]
1785 return lookup_filenode_link
1784 return lookup_filenode_link
1786
1785
1787 # Add the nodes that were explicitly requested.
1786 # Add the nodes that were explicitly requested.
1788 def add_extra_nodes(name, nodes):
1787 def add_extra_nodes(name, nodes):
1789 if not extranodes or name not in extranodes:
1788 if not extranodes or name not in extranodes:
1790 return
1789 return
1791
1790
1792 for node, linknode in extranodes[name]:
1791 for node, linknode in extranodes[name]:
1793 if node not in nodes:
1792 if node not in nodes:
1794 nodes[node] = linknode
1793 nodes[node] = linknode
1795
1794
1796 # Now that we have all theses utility functions to help out and
1795 # Now that we have all theses utility functions to help out and
1797 # logically divide up the task, generate the group.
1796 # logically divide up the task, generate the group.
1798 def gengroup():
1797 def gengroup():
1799 # The set of changed files starts empty.
1798 # The set of changed files starts empty.
1800 changedfiles = {}
1799 changedfiles = {}
1801 # Create a changenode group generator that will call our functions
1800 # Create a changenode group generator that will call our functions
1802 # back to lookup the owning changenode and collect information.
1801 # back to lookup the owning changenode and collect information.
1803 group = cl.group(msng_cl_lst, identity,
1802 group = cl.group(msng_cl_lst, identity,
1804 manifest_and_file_collector(changedfiles))
1803 manifest_and_file_collector(changedfiles))
1805 for chnk in group:
1804 for chnk in group:
1806 yield chnk
1805 yield chnk
1807
1806
1808 # The list of manifests has been collected by the generator
1807 # The list of manifests has been collected by the generator
1809 # calling our functions back.
1808 # calling our functions back.
1810 prune_manifests()
1809 prune_manifests()
1811 add_extra_nodes(1, msng_mnfst_set)
1810 add_extra_nodes(1, msng_mnfst_set)
1812 msng_mnfst_lst = msng_mnfst_set.keys()
1811 msng_mnfst_lst = msng_mnfst_set.keys()
1813 # Sort the manifestnodes by revision number.
1812 # Sort the manifestnodes by revision number.
1814 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1813 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1815 # Create a generator for the manifestnodes that calls our lookup
1814 # Create a generator for the manifestnodes that calls our lookup
1816 # and data collection functions back.
1815 # and data collection functions back.
1817 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1816 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1818 filenode_collector(changedfiles))
1817 filenode_collector(changedfiles))
1819 for chnk in group:
1818 for chnk in group:
1820 yield chnk
1819 yield chnk
1821
1820
1822 # These are no longer needed, dereference and toss the memory for
1821 # These are no longer needed, dereference and toss the memory for
1823 # them.
1822 # them.
1824 msng_mnfst_lst = None
1823 msng_mnfst_lst = None
1825 msng_mnfst_set.clear()
1824 msng_mnfst_set.clear()
1826
1825
1827 if extranodes:
1826 if extranodes:
1828 for fname in extranodes:
1827 for fname in extranodes:
1829 if isinstance(fname, int):
1828 if isinstance(fname, int):
1830 continue
1829 continue
1831 msng_filenode_set.setdefault(fname, {})
1830 msng_filenode_set.setdefault(fname, {})
1832 changedfiles[fname] = 1
1831 changedfiles[fname] = 1
1833 # Go through all our files in order sorted by name.
1832 # Go through all our files in order sorted by name.
1834 for fname in util.sort(changedfiles):
1833 for fname in util.sort(changedfiles):
1835 filerevlog = self.file(fname)
1834 filerevlog = self.file(fname)
1836 if not len(filerevlog):
1835 if not len(filerevlog):
1837 raise util.Abort(_("empty or missing revlog for %s") % fname)
1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1838 # Toss out the filenodes that the recipient isn't really
1837 # Toss out the filenodes that the recipient isn't really
1839 # missing.
1838 # missing.
1840 if fname in msng_filenode_set:
1839 if fname in msng_filenode_set:
1841 prune_filenodes(fname, filerevlog)
1840 prune_filenodes(fname, filerevlog)
1842 add_extra_nodes(fname, msng_filenode_set[fname])
1841 add_extra_nodes(fname, msng_filenode_set[fname])
1843 msng_filenode_lst = msng_filenode_set[fname].keys()
1842 msng_filenode_lst = msng_filenode_set[fname].keys()
1844 else:
1843 else:
1845 msng_filenode_lst = []
1844 msng_filenode_lst = []
1846 # If any filenodes are left, generate the group for them,
1845 # If any filenodes are left, generate the group for them,
1847 # otherwise don't bother.
1846 # otherwise don't bother.
1848 if len(msng_filenode_lst) > 0:
1847 if len(msng_filenode_lst) > 0:
1849 yield changegroup.chunkheader(len(fname))
1848 yield changegroup.chunkheader(len(fname))
1850 yield fname
1849 yield fname
1851 # Sort the filenodes by their revision #
1850 # Sort the filenodes by their revision #
1852 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1851 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1853 # Create a group generator and only pass in a changenode
1852 # Create a group generator and only pass in a changenode
1854 # lookup function as we need to collect no information
1853 # lookup function as we need to collect no information
1855 # from filenodes.
1854 # from filenodes.
1856 group = filerevlog.group(msng_filenode_lst,
1855 group = filerevlog.group(msng_filenode_lst,
1857 lookup_filenode_link_func(fname))
1856 lookup_filenode_link_func(fname))
1858 for chnk in group:
1857 for chnk in group:
1859 yield chnk
1858 yield chnk
1860 if fname in msng_filenode_set:
1859 if fname in msng_filenode_set:
1861 # Don't need this anymore, toss it to free memory.
1860 # Don't need this anymore, toss it to free memory.
1862 del msng_filenode_set[fname]
1861 del msng_filenode_set[fname]
1863 # Signal that no more groups are left.
1862 # Signal that no more groups are left.
1864 yield changegroup.closechunk()
1863 yield changegroup.closechunk()
1865
1864
1866 if msng_cl_lst:
1865 if msng_cl_lst:
1867 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1866 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1868
1867
1869 return util.chunkbuffer(gengroup())
1868 return util.chunkbuffer(gengroup())
1870
1869
1871 def changegroup(self, basenodes, source):
1870 def changegroup(self, basenodes, source):
1872 # to avoid a race we use changegroupsubset() (issue1320)
1871 # to avoid a race we use changegroupsubset() (issue1320)
1873 return self.changegroupsubset(basenodes, self.heads(), source)
1872 return self.changegroupsubset(basenodes, self.heads(), source)
1874
1873
1875 def _changegroup(self, common, source):
1874 def _changegroup(self, common, source):
1876 """Generate a changegroup of all nodes that we have that a recipient
1875 """Generate a changegroup of all nodes that we have that a recipient
1877 doesn't.
1876 doesn't.
1878
1877
1879 This is much easier than the previous function as we can assume that
1878 This is much easier than the previous function as we can assume that
1880 the recipient has any changenode we aren't sending them.
1879 the recipient has any changenode we aren't sending them.
1881
1880
1882 common is the set of common nodes between remote and self"""
1881 common is the set of common nodes between remote and self"""
1883
1882
1884 self.hook('preoutgoing', throw=True, source=source)
1883 self.hook('preoutgoing', throw=True, source=source)
1885
1884
1886 cl = self.changelog
1885 cl = self.changelog
1887 nodes = cl.findmissing(common)
1886 nodes = cl.findmissing(common)
1888 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1887 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1889 self.changegroupinfo(nodes, source)
1888 self.changegroupinfo(nodes, source)
1890
1889
1891 def identity(x):
1890 def identity(x):
1892 return x
1891 return x
1893
1892
1894 def gennodelst(log):
1893 def gennodelst(log):
1895 for r in log:
1894 for r in log:
1896 n = log.node(r)
1895 n = log.node(r)
1897 if log.linkrev(n) in revset:
1896 if log.linkrev(n) in revset:
1898 yield n
1897 yield n
1899
1898
1900 def changed_file_collector(changedfileset):
1899 def changed_file_collector(changedfileset):
1901 def collect_changed_files(clnode):
1900 def collect_changed_files(clnode):
1902 c = cl.read(clnode)
1901 c = cl.read(clnode)
1903 for fname in c[3]:
1902 for fname in c[3]:
1904 changedfileset[fname] = 1
1903 changedfileset[fname] = 1
1905 return collect_changed_files
1904 return collect_changed_files
1906
1905
1907 def lookuprevlink_func(revlog):
1906 def lookuprevlink_func(revlog):
1908 def lookuprevlink(n):
1907 def lookuprevlink(n):
1909 return cl.node(revlog.linkrev(n))
1908 return cl.node(revlog.linkrev(n))
1910 return lookuprevlink
1909 return lookuprevlink
1911
1910
1912 def gengroup():
1911 def gengroup():
1913 # construct a list of all changed files
1912 # construct a list of all changed files
1914 changedfiles = {}
1913 changedfiles = {}
1915
1914
1916 for chnk in cl.group(nodes, identity,
1915 for chnk in cl.group(nodes, identity,
1917 changed_file_collector(changedfiles)):
1916 changed_file_collector(changedfiles)):
1918 yield chnk
1917 yield chnk
1919
1918
1920 mnfst = self.manifest
1919 mnfst = self.manifest
1921 nodeiter = gennodelst(mnfst)
1920 nodeiter = gennodelst(mnfst)
1922 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1921 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1923 yield chnk
1922 yield chnk
1924
1923
1925 for fname in util.sort(changedfiles):
1924 for fname in util.sort(changedfiles):
1926 filerevlog = self.file(fname)
1925 filerevlog = self.file(fname)
1927 if not len(filerevlog):
1926 if not len(filerevlog):
1928 raise util.Abort(_("empty or missing revlog for %s") % fname)
1927 raise util.Abort(_("empty or missing revlog for %s") % fname)
1929 nodeiter = gennodelst(filerevlog)
1928 nodeiter = gennodelst(filerevlog)
1930 nodeiter = list(nodeiter)
1929 nodeiter = list(nodeiter)
1931 if nodeiter:
1930 if nodeiter:
1932 yield changegroup.chunkheader(len(fname))
1931 yield changegroup.chunkheader(len(fname))
1933 yield fname
1932 yield fname
1934 lookup = lookuprevlink_func(filerevlog)
1933 lookup = lookuprevlink_func(filerevlog)
1935 for chnk in filerevlog.group(nodeiter, lookup):
1934 for chnk in filerevlog.group(nodeiter, lookup):
1936 yield chnk
1935 yield chnk
1937
1936
1938 yield changegroup.closechunk()
1937 yield changegroup.closechunk()
1939
1938
1940 if nodes:
1939 if nodes:
1941 self.hook('outgoing', node=hex(nodes[0]), source=source)
1940 self.hook('outgoing', node=hex(nodes[0]), source=source)
1942
1941
1943 return util.chunkbuffer(gengroup())
1942 return util.chunkbuffer(gengroup())
1944
1943
1945 def addchangegroup(self, source, srctype, url, emptyok=False):
1944 def addchangegroup(self, source, srctype, url, emptyok=False):
1946 """add changegroup to repo.
1945 """add changegroup to repo.
1947
1946
1948 return values:
1947 return values:
1949 - nothing changed or no source: 0
1948 - nothing changed or no source: 0
1950 - more heads than before: 1+added heads (2..n)
1949 - more heads than before: 1+added heads (2..n)
1951 - less heads than before: -1-removed heads (-2..-n)
1950 - less heads than before: -1-removed heads (-2..-n)
1952 - number of heads stays the same: 1
1951 - number of heads stays the same: 1
1953 """
1952 """
1954 def csmap(x):
1953 def csmap(x):
1955 self.ui.debug(_("add changeset %s\n") % short(x))
1954 self.ui.debug(_("add changeset %s\n") % short(x))
1956 return len(cl)
1955 return len(cl)
1957
1956
1958 def revmap(x):
1957 def revmap(x):
1959 return cl.rev(x)
1958 return cl.rev(x)
1960
1959
1961 if not source:
1960 if not source:
1962 return 0
1961 return 0
1963
1962
1964 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1963 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1965
1964
1966 changesets = files = revisions = 0
1965 changesets = files = revisions = 0
1967
1966
1968 # write changelog data to temp files so concurrent readers will not see
1967 # write changelog data to temp files so concurrent readers will not see
1969 # inconsistent view
1968 # inconsistent view
1970 cl = self.changelog
1969 cl = self.changelog
1971 cl.delayupdate()
1970 cl.delayupdate()
1972 oldheads = len(cl.heads())
1971 oldheads = len(cl.heads())
1973
1972
1974 tr = self.transaction()
1973 tr = self.transaction()
1975 try:
1974 try:
1976 trp = weakref.proxy(tr)
1975 trp = weakref.proxy(tr)
1977 # pull off the changeset group
1976 # pull off the changeset group
1978 self.ui.status(_("adding changesets\n"))
1977 self.ui.status(_("adding changesets\n"))
1979 cor = len(cl) - 1
1978 cor = len(cl) - 1
1980 chunkiter = changegroup.chunkiter(source)
1979 chunkiter = changegroup.chunkiter(source)
1981 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1980 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1982 raise util.Abort(_("received changelog group is empty"))
1981 raise util.Abort(_("received changelog group is empty"))
1983 cnr = len(cl) - 1
1982 cnr = len(cl) - 1
1984 changesets = cnr - cor
1983 changesets = cnr - cor
1985
1984
1986 # pull off the manifest group
1985 # pull off the manifest group
1987 self.ui.status(_("adding manifests\n"))
1986 self.ui.status(_("adding manifests\n"))
1988 chunkiter = changegroup.chunkiter(source)
1987 chunkiter = changegroup.chunkiter(source)
1989 # no need to check for empty manifest group here:
1988 # no need to check for empty manifest group here:
1990 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1989 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1991 # no new manifest will be created and the manifest group will
1990 # no new manifest will be created and the manifest group will
1992 # be empty during the pull
1991 # be empty during the pull
1993 self.manifest.addgroup(chunkiter, revmap, trp)
1992 self.manifest.addgroup(chunkiter, revmap, trp)
1994
1993
1995 # process the files
1994 # process the files
1996 self.ui.status(_("adding file changes\n"))
1995 self.ui.status(_("adding file changes\n"))
1997 while 1:
1996 while 1:
1998 f = changegroup.getchunk(source)
1997 f = changegroup.getchunk(source)
1999 if not f:
1998 if not f:
2000 break
1999 break
2001 self.ui.debug(_("adding %s revisions\n") % f)
2000 self.ui.debug(_("adding %s revisions\n") % f)
2002 fl = self.file(f)
2001 fl = self.file(f)
2003 o = len(fl)
2002 o = len(fl)
2004 chunkiter = changegroup.chunkiter(source)
2003 chunkiter = changegroup.chunkiter(source)
2005 if fl.addgroup(chunkiter, revmap, trp) is None:
2004 if fl.addgroup(chunkiter, revmap, trp) is None:
2006 raise util.Abort(_("received file revlog group is empty"))
2005 raise util.Abort(_("received file revlog group is empty"))
2007 revisions += len(fl) - o
2006 revisions += len(fl) - o
2008 files += 1
2007 files += 1
2009
2008
2010 # make changelog see real files again
2009 # make changelog see real files again
2011 cl.finalize(trp)
2010 cl.finalize(trp)
2012
2011
2013 newheads = len(self.changelog.heads())
2012 newheads = len(self.changelog.heads())
2014 heads = ""
2013 heads = ""
2015 if oldheads and newheads != oldheads:
2014 if oldheads and newheads != oldheads:
2016 heads = _(" (%+d heads)") % (newheads - oldheads)
2015 heads = _(" (%+d heads)") % (newheads - oldheads)
2017
2016
2018 self.ui.status(_("added %d changesets"
2017 self.ui.status(_("added %d changesets"
2019 " with %d changes to %d files%s\n")
2018 " with %d changes to %d files%s\n")
2020 % (changesets, revisions, files, heads))
2019 % (changesets, revisions, files, heads))
2021
2020
2022 if changesets > 0:
2021 if changesets > 0:
2023 self.hook('pretxnchangegroup', throw=True,
2022 self.hook('pretxnchangegroup', throw=True,
2024 node=hex(self.changelog.node(cor+1)), source=srctype,
2023 node=hex(self.changelog.node(cor+1)), source=srctype,
2025 url=url)
2024 url=url)
2026
2025
2027 tr.close()
2026 tr.close()
2028 finally:
2027 finally:
2029 del tr
2028 del tr
2030
2029
2031 if changesets > 0:
2030 if changesets > 0:
2032 # forcefully update the on-disk branch cache
2031 # forcefully update the on-disk branch cache
2033 self.ui.debug(_("updating the branch cache\n"))
2032 self.ui.debug(_("updating the branch cache\n"))
2034 self.branchtags()
2033 self.branchtags()
2035 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2034 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2036 source=srctype, url=url)
2035 source=srctype, url=url)
2037
2036
2038 for i in xrange(cor + 1, cnr + 1):
2037 for i in xrange(cor + 1, cnr + 1):
2039 self.hook("incoming", node=hex(self.changelog.node(i)),
2038 self.hook("incoming", node=hex(self.changelog.node(i)),
2040 source=srctype, url=url)
2039 source=srctype, url=url)
2041
2040
2042 # never return 0 here:
2041 # never return 0 here:
2043 if newheads < oldheads:
2042 if newheads < oldheads:
2044 return newheads - oldheads - 1
2043 return newheads - oldheads - 1
2045 else:
2044 else:
2046 return newheads - oldheads + 1
2045 return newheads - oldheads + 1
2047
2046
2048
2047
2049 def stream_in(self, remote):
2048 def stream_in(self, remote):
2050 fp = remote.stream_out()
2049 fp = remote.stream_out()
2051 l = fp.readline()
2050 l = fp.readline()
2052 try:
2051 try:
2053 resp = int(l)
2052 resp = int(l)
2054 except ValueError:
2053 except ValueError:
2055 raise util.UnexpectedOutput(
2054 raise util.UnexpectedOutput(
2056 _('Unexpected response from remote server:'), l)
2055 _('Unexpected response from remote server:'), l)
2057 if resp == 1:
2056 if resp == 1:
2058 raise util.Abort(_('operation forbidden by server'))
2057 raise util.Abort(_('operation forbidden by server'))
2059 elif resp == 2:
2058 elif resp == 2:
2060 raise util.Abort(_('locking the remote repository failed'))
2059 raise util.Abort(_('locking the remote repository failed'))
2061 elif resp != 0:
2060 elif resp != 0:
2062 raise util.Abort(_('the server sent an unknown error code'))
2061 raise util.Abort(_('the server sent an unknown error code'))
2063 self.ui.status(_('streaming all changes\n'))
2062 self.ui.status(_('streaming all changes\n'))
2064 l = fp.readline()
2063 l = fp.readline()
2065 try:
2064 try:
2066 total_files, total_bytes = map(int, l.split(' ', 1))
2065 total_files, total_bytes = map(int, l.split(' ', 1))
2067 except (ValueError, TypeError):
2066 except (ValueError, TypeError):
2068 raise util.UnexpectedOutput(
2067 raise util.UnexpectedOutput(
2069 _('Unexpected response from remote server:'), l)
2068 _('Unexpected response from remote server:'), l)
2070 self.ui.status(_('%d files to transfer, %s of data\n') %
2069 self.ui.status(_('%d files to transfer, %s of data\n') %
2071 (total_files, util.bytecount(total_bytes)))
2070 (total_files, util.bytecount(total_bytes)))
2072 start = time.time()
2071 start = time.time()
2073 for i in xrange(total_files):
2072 for i in xrange(total_files):
2074 # XXX doesn't support '\n' or '\r' in filenames
2073 # XXX doesn't support '\n' or '\r' in filenames
2075 l = fp.readline()
2074 l = fp.readline()
2076 try:
2075 try:
2077 name, size = l.split('\0', 1)
2076 name, size = l.split('\0', 1)
2078 size = int(size)
2077 size = int(size)
2079 except (ValueError, TypeError):
2078 except (ValueError, TypeError):
2080 raise util.UnexpectedOutput(
2079 raise util.UnexpectedOutput(
2081 _('Unexpected response from remote server:'), l)
2080 _('Unexpected response from remote server:'), l)
2082 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2081 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2083 ofp = self.sopener(name, 'w')
2082 ofp = self.sopener(name, 'w')
2084 for chunk in util.filechunkiter(fp, limit=size):
2083 for chunk in util.filechunkiter(fp, limit=size):
2085 ofp.write(chunk)
2084 ofp.write(chunk)
2086 ofp.close()
2085 ofp.close()
2087 elapsed = time.time() - start
2086 elapsed = time.time() - start
2088 if elapsed <= 0:
2087 if elapsed <= 0:
2089 elapsed = 0.001
2088 elapsed = 0.001
2090 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2089 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2091 (util.bytecount(total_bytes), elapsed,
2090 (util.bytecount(total_bytes), elapsed,
2092 util.bytecount(total_bytes / elapsed)))
2091 util.bytecount(total_bytes / elapsed)))
2093 self.invalidate()
2092 self.invalidate()
2094 return len(self.heads()) + 1
2093 return len(self.heads()) + 1
2095
2094
2096 def clone(self, remote, heads=[], stream=False):
2095 def clone(self, remote, heads=[], stream=False):
2097 '''clone remote repository.
2096 '''clone remote repository.
2098
2097
2099 keyword arguments:
2098 keyword arguments:
2100 heads: list of revs to clone (forces use of pull)
2099 heads: list of revs to clone (forces use of pull)
2101 stream: use streaming clone if possible'''
2100 stream: use streaming clone if possible'''
2102
2101
2103 # now, all clients that can request uncompressed clones can
2102 # now, all clients that can request uncompressed clones can
2104 # read repo formats supported by all servers that can serve
2103 # read repo formats supported by all servers that can serve
2105 # them.
2104 # them.
2106
2105
2107 # if revlog format changes, client will have to check version
2106 # if revlog format changes, client will have to check version
2108 # and format flags on "stream" capability, and use
2107 # and format flags on "stream" capability, and use
2109 # uncompressed only if compatible.
2108 # uncompressed only if compatible.
2110
2109
2111 if stream and not heads and remote.capable('stream'):
2110 if stream and not heads and remote.capable('stream'):
2112 return self.stream_in(remote)
2111 return self.stream_in(remote)
2113 return self.pull(remote, heads)
2112 return self.pull(remote, heads)
2114
2113
2115 # used to avoid circular references so destructors work
2114 # used to avoid circular references so destructors work
2116 def aftertrans(files):
2115 def aftertrans(files):
2117 renamefiles = [tuple(t) for t in files]
2116 renamefiles = [tuple(t) for t in files]
2118 def a():
2117 def a():
2119 for src, dest in renamefiles:
2118 for src, dest in renamefiles:
2120 util.rename(src, dest)
2119 util.rename(src, dest)
2121 return a
2120 return a
2122
2121
2123 def instance(ui, path, create):
2122 def instance(ui, path, create):
2124 return localrepository(ui, util.drop_scheme('file', path), create)
2123 return localrepository(ui, util.drop_scheme('file', path), create)
2125
2124
2126 def islocal(path):
2125 def islocal(path):
2127 return True
2126 return True
General Comments 0
You need to be logged in to leave comments. Login now