##// END OF EJS Templates
commit: refactor salvage calculation to a different function...
Pulkit Goyal -
r46297:61454026 default
parent child Browse files
Show More
@@ -1,471 +1,480 b''
1 # commit.py - fonction to perform commit
1 # commit.py - fonction to perform commit
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 nullid,
13 nullid,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 context,
18 context,
19 mergestate,
19 mergestate,
20 metadata,
20 metadata,
21 phases,
21 phases,
22 scmutil,
22 scmutil,
23 subrepoutil,
23 subrepoutil,
24 )
24 )
25
25
26
26
27 def _write_copy_meta(repo):
27 def _write_copy_meta(repo):
28 """return a (changelog, filelog) boolean tuple
28 """return a (changelog, filelog) boolean tuple
29
29
30 changelog: copy related information should be stored in the changeset
30 changelog: copy related information should be stored in the changeset
31 filelof: copy related information should be written in the file revision
31 filelof: copy related information should be written in the file revision
32 """
32 """
33 if repo.filecopiesmode == b'changeset-sidedata':
33 if repo.filecopiesmode == b'changeset-sidedata':
34 writechangesetcopy = True
34 writechangesetcopy = True
35 writefilecopymeta = True
35 writefilecopymeta = True
36 else:
36 else:
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
38 writefilecopymeta = writecopiesto != b'changeset-only'
38 writefilecopymeta = writecopiesto != b'changeset-only'
39 writechangesetcopy = writecopiesto in (
39 writechangesetcopy = writecopiesto in (
40 b'changeset-only',
40 b'changeset-only',
41 b'compatibility',
41 b'compatibility',
42 )
42 )
43 return writechangesetcopy, writefilecopymeta
43 return writechangesetcopy, writefilecopymeta
44
44
45
45
46 def commitctx(repo, ctx, error=False, origctx=None):
46 def commitctx(repo, ctx, error=False, origctx=None):
47 """Add a new revision to the target repository.
47 """Add a new revision to the target repository.
48 Revision information is passed via the context argument.
48 Revision information is passed via the context argument.
49
49
50 ctx.files() should list all files involved in this commit, i.e.
50 ctx.files() should list all files involved in this commit, i.e.
51 modified/added/removed files. On merge, it may be wider than the
51 modified/added/removed files. On merge, it may be wider than the
52 ctx.files() to be committed, since any file nodes derived directly
52 ctx.files() to be committed, since any file nodes derived directly
53 from p1 or p2 are excluded from the committed ctx.files().
53 from p1 or p2 are excluded from the committed ctx.files().
54
54
55 origctx is for convert to work around the problem that bug
55 origctx is for convert to work around the problem that bug
56 fixes to the files list in changesets change hashes. For
56 fixes to the files list in changesets change hashes. For
57 convert to be the identity, it can pass an origctx and this
57 convert to be the identity, it can pass an origctx and this
58 function will use the same files list when it makes sense to
58 function will use the same files list when it makes sense to
59 do so.
59 do so.
60 """
60 """
61 repo = repo.unfiltered()
61 repo = repo.unfiltered()
62
62
63 p1, p2 = ctx.p1(), ctx.p2()
63 p1, p2 = ctx.p1(), ctx.p2()
64 user = ctx.user()
64 user = ctx.user()
65
65
66 with repo.lock(), repo.transaction(b"commit") as tr:
66 with repo.lock(), repo.transaction(b"commit") as tr:
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
68
68
69 extra = ctx.extra().copy()
69 extra = ctx.extra().copy()
70
70
71 if extra is not None:
71 if extra is not None:
72 for name in (
72 for name in (
73 b'p1copies',
73 b'p1copies',
74 b'p2copies',
74 b'p2copies',
75 b'filesadded',
75 b'filesadded',
76 b'filesremoved',
76 b'filesremoved',
77 ):
77 ):
78 extra.pop(name, None)
78 extra.pop(name, None)
79 if repo.changelog._copiesstorage == b'extra':
79 if repo.changelog._copiesstorage == b'extra':
80 extra = _extra_with_copies(repo, extra, files)
80 extra = _extra_with_copies(repo, extra, files)
81
81
82 # update changelog
82 # update changelog
83 repo.ui.note(_(b"committing changelog\n"))
83 repo.ui.note(_(b"committing changelog\n"))
84 repo.changelog.delayupdate(tr)
84 repo.changelog.delayupdate(tr)
85 n = repo.changelog.add(
85 n = repo.changelog.add(
86 mn,
86 mn,
87 files,
87 files,
88 ctx.description(),
88 ctx.description(),
89 tr,
89 tr,
90 p1.node(),
90 p1.node(),
91 p2.node(),
91 p2.node(),
92 user,
92 user,
93 ctx.date(),
93 ctx.date(),
94 extra,
94 extra,
95 )
95 )
96 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
96 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
97 repo.hook(
97 repo.hook(
98 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
98 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
99 )
99 )
100 # set the new commit is proper phase
100 # set the new commit is proper phase
101 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
101 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
102 if targetphase:
102 if targetphase:
103 # retract boundary do not alter parent changeset.
103 # retract boundary do not alter parent changeset.
104 # if a parent have higher the resulting phase will
104 # if a parent have higher the resulting phase will
105 # be compliant anyway
105 # be compliant anyway
106 #
106 #
107 # if minimal phase was 0 we don't need to retract anything
107 # if minimal phase was 0 we don't need to retract anything
108 phases.registernew(repo, tr, targetphase, [n])
108 phases.registernew(repo, tr, targetphase, [n])
109 return n
109 return n
110
110
111
111
112 def _prepare_files(tr, ctx, error=False, origctx=None):
112 def _prepare_files(tr, ctx, error=False, origctx=None):
113 repo = ctx.repo()
113 repo = ctx.repo()
114 p1 = ctx.p1()
114 p1 = ctx.p1()
115
115
116 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
116 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
117
117
118 if ctx.manifestnode():
118 if ctx.manifestnode():
119 # reuse an existing manifest revision
119 # reuse an existing manifest revision
120 repo.ui.debug(b'reusing known manifest\n')
120 repo.ui.debug(b'reusing known manifest\n')
121 mn = ctx.manifestnode()
121 mn = ctx.manifestnode()
122 files = metadata.ChangingFiles()
122 files = metadata.ChangingFiles()
123 files.update_touched(ctx.files())
123 files.update_touched(ctx.files())
124 if writechangesetcopy:
124 if writechangesetcopy:
125 files.update_added(ctx.filesadded())
125 files.update_added(ctx.filesadded())
126 files.update_removed(ctx.filesremoved())
126 files.update_removed(ctx.filesremoved())
127 elif not ctx.files():
127 elif not ctx.files():
128 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
128 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
129 mn = p1.manifestnode()
129 mn = p1.manifestnode()
130 files = metadata.ChangingFiles()
130 files = metadata.ChangingFiles()
131 else:
131 else:
132 mn, files = _process_files(tr, ctx, error=error)
132 mn, files = _process_files(tr, ctx, error=error)
133
133
134 if origctx and origctx.manifestnode() == mn:
134 if origctx and origctx.manifestnode() == mn:
135 origfiles = origctx.files()
135 origfiles = origctx.files()
136 assert files.touched.issubset(origfiles)
136 assert files.touched.issubset(origfiles)
137 files.update_touched(origfiles)
137 files.update_touched(origfiles)
138
138
139 if writechangesetcopy:
139 if writechangesetcopy:
140 files.update_copies_from_p1(ctx.p1copies())
140 files.update_copies_from_p1(ctx.p1copies())
141 files.update_copies_from_p2(ctx.p2copies())
141 files.update_copies_from_p2(ctx.p2copies())
142
142
143 copy_sd = ctx.repo().filecopiesmode == b'changeset-sidedata'
143 ms = mergestate.mergestate.read(repo)
144 salvaged = _get_salvaged(ctx.repo(), ms, ctx)
145 for s in salvaged:
146 files.mark_salvaged(s)
147
148 return mn, files
149
150
151 def _get_salvaged(repo, ms, ctx):
152 """ returns a list of salvaged files
153
154 returns empty list if config option which process salvaged files are
155 not enabled """
156 salvaged = []
157 copy_sd = repo.filecopiesmode == b'changeset-sidedata'
144 if copy_sd and len(ctx.parents()) > 1:
158 if copy_sd and len(ctx.parents()) > 1:
145 # XXX this `mergestate.read` could be duplicated with a the merge state
146 # reading in _process_files So we could refactor further to reuse it in
147 # some cases.
148 ms = mergestate.mergestate.read(repo)
149 if ms.active():
159 if ms.active():
150 for fname in sorted(ms._stateextras.keys()):
160 for fname in sorted(ms._stateextras.keys()):
151 might_removed = ms.extras(fname).get(b'merge-removal-candidate')
161 might_removed = ms.extras(fname).get(b'merge-removal-candidate')
152 if might_removed == b'yes':
162 if might_removed == b'yes':
153 if fname in ctx:
163 if fname in ctx:
154 files.mark_salvaged(fname)
164 salvaged.append(fname)
155
165 return salvaged
156 return mn, files
157
166
158
167
159 def _process_files(tr, ctx, error=False):
168 def _process_files(tr, ctx, error=False):
160 repo = ctx.repo()
169 repo = ctx.repo()
161 p1 = ctx.p1()
170 p1 = ctx.p1()
162 p2 = ctx.p2()
171 p2 = ctx.p2()
163
172
164 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
173 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
165
174
166 m1ctx = p1.manifestctx()
175 m1ctx = p1.manifestctx()
167 m2ctx = p2.manifestctx()
176 m2ctx = p2.manifestctx()
168 mctx = m1ctx.copy()
177 mctx = m1ctx.copy()
169
178
170 m = mctx.read()
179 m = mctx.read()
171 m1 = m1ctx.read()
180 m1 = m1ctx.read()
172 m2 = m2ctx.read()
181 m2 = m2ctx.read()
173 ms = mergestate.mergestate.read(repo)
182 ms = mergestate.mergestate.read(repo)
174
183
175 files = metadata.ChangingFiles()
184 files = metadata.ChangingFiles()
176
185
177 # check in files
186 # check in files
178 added = []
187 added = []
179 removed = list(ctx.removed())
188 removed = list(ctx.removed())
180 linkrev = len(repo)
189 linkrev = len(repo)
181 repo.ui.note(_(b"committing files:\n"))
190 repo.ui.note(_(b"committing files:\n"))
182 uipathfn = scmutil.getuipathfn(repo)
191 uipathfn = scmutil.getuipathfn(repo)
183 for f in sorted(ctx.modified() + ctx.added()):
192 for f in sorted(ctx.modified() + ctx.added()):
184 repo.ui.note(uipathfn(f) + b"\n")
193 repo.ui.note(uipathfn(f) + b"\n")
185 try:
194 try:
186 fctx = ctx[f]
195 fctx = ctx[f]
187 if fctx is None:
196 if fctx is None:
188 removed.append(f)
197 removed.append(f)
189 else:
198 else:
190 added.append(f)
199 added.append(f)
191 m[f], is_touched = _filecommit(
200 m[f], is_touched = _filecommit(
192 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
201 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
193 )
202 )
194 if is_touched:
203 if is_touched:
195 if is_touched == 'added':
204 if is_touched == 'added':
196 files.mark_added(f)
205 files.mark_added(f)
197 elif is_touched == 'merged':
206 elif is_touched == 'merged':
198 files.mark_merged(f)
207 files.mark_merged(f)
199 else:
208 else:
200 files.mark_touched(f)
209 files.mark_touched(f)
201 m.setflag(f, fctx.flags())
210 m.setflag(f, fctx.flags())
202 except OSError:
211 except OSError:
203 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
212 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
204 raise
213 raise
205 except IOError as inst:
214 except IOError as inst:
206 errcode = getattr(inst, 'errno', errno.ENOENT)
215 errcode = getattr(inst, 'errno', errno.ENOENT)
207 if error or errcode and errcode != errno.ENOENT:
216 if error or errcode and errcode != errno.ENOENT:
208 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
217 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
209 raise
218 raise
210
219
211 # update manifest
220 # update manifest
212 removed = [f for f in removed if f in m1 or f in m2]
221 removed = [f for f in removed if f in m1 or f in m2]
213 drop = sorted([f for f in removed if f in m])
222 drop = sorted([f for f in removed if f in m])
214 for f in drop:
223 for f in drop:
215 del m[f]
224 del m[f]
216 if p2.rev() == nullrev:
225 if p2.rev() == nullrev:
217 files.update_removed(removed)
226 files.update_removed(removed)
218 else:
227 else:
219 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
228 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
220 for f in removed:
229 for f in removed:
221 if not rf(f):
230 if not rf(f):
222 files.mark_removed(f)
231 files.mark_removed(f)
223
232
224 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
233 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
225
234
226 return mn, files
235 return mn, files
227
236
228
237
229 def _filecommit(
238 def _filecommit(
230 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms,
239 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms,
231 ):
240 ):
232 """
241 """
233 commit an individual file as part of a larger transaction
242 commit an individual file as part of a larger transaction
234
243
235 input:
244 input:
236
245
237 fctx: a file context with the content we are trying to commit
246 fctx: a file context with the content we are trying to commit
238 manifest1: manifest of changeset first parent
247 manifest1: manifest of changeset first parent
239 manifest2: manifest of changeset second parent
248 manifest2: manifest of changeset second parent
240 linkrev: revision number of the changeset being created
249 linkrev: revision number of the changeset being created
241 tr: current transation
250 tr: current transation
242 includecopymeta: boolean, set to False to skip storing the copy data
251 includecopymeta: boolean, set to False to skip storing the copy data
243 (only used by the Google specific feature of using
252 (only used by the Google specific feature of using
244 changeset extra as copy source of truth).
253 changeset extra as copy source of truth).
245 ms: mergestate object
254 ms: mergestate object
246
255
247 output: (filenode, touched)
256 output: (filenode, touched)
248
257
249 filenode: the filenode that should be used by this changeset
258 filenode: the filenode that should be used by this changeset
250 touched: one of: None (mean untouched), 'added' or 'modified'
259 touched: one of: None (mean untouched), 'added' or 'modified'
251 """
260 """
252
261
253 fname = fctx.path()
262 fname = fctx.path()
254 fparent1 = manifest1.get(fname, nullid)
263 fparent1 = manifest1.get(fname, nullid)
255 fparent2 = manifest2.get(fname, nullid)
264 fparent2 = manifest2.get(fname, nullid)
256 touched = None
265 touched = None
257 if fparent1 == fparent2 == nullid:
266 if fparent1 == fparent2 == nullid:
258 touched = 'added'
267 touched = 'added'
259
268
260 if isinstance(fctx, context.filectx):
269 if isinstance(fctx, context.filectx):
261 # This block fast path most comparisons which are usually done. It
270 # This block fast path most comparisons which are usually done. It
262 # assumes that bare filectx is used and no merge happened, hence no
271 # assumes that bare filectx is used and no merge happened, hence no
263 # need to create a new file revision in this case.
272 # need to create a new file revision in this case.
264 node = fctx.filenode()
273 node = fctx.filenode()
265 if node in [fparent1, fparent2]:
274 if node in [fparent1, fparent2]:
266 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
275 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
267 if (
276 if (
268 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
277 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
269 ) or (
278 ) or (
270 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
279 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
271 ):
280 ):
272 touched = 'modified'
281 touched = 'modified'
273 return node, touched
282 return node, touched
274
283
275 flog = repo.file(fname)
284 flog = repo.file(fname)
276 meta = {}
285 meta = {}
277 cfname = fctx.copysource()
286 cfname = fctx.copysource()
278 fnode = None
287 fnode = None
279
288
280 if cfname and cfname != fname:
289 if cfname and cfname != fname:
281 # Mark the new revision of this file as a copy of another
290 # Mark the new revision of this file as a copy of another
282 # file. This copy data will effectively act as a parent
291 # file. This copy data will effectively act as a parent
283 # of this new revision. If this is a merge, the first
292 # of this new revision. If this is a merge, the first
284 # parent will be the nullid (meaning "look up the copy data")
293 # parent will be the nullid (meaning "look up the copy data")
285 # and the second one will be the other parent. For example:
294 # and the second one will be the other parent. For example:
286 #
295 #
287 # 0 --- 1 --- 3 rev1 changes file foo
296 # 0 --- 1 --- 3 rev1 changes file foo
288 # \ / rev2 renames foo to bar and changes it
297 # \ / rev2 renames foo to bar and changes it
289 # \- 2 -/ rev3 should have bar with all changes and
298 # \- 2 -/ rev3 should have bar with all changes and
290 # should record that bar descends from
299 # should record that bar descends from
291 # bar in rev2 and foo in rev1
300 # bar in rev2 and foo in rev1
292 #
301 #
293 # this allows this merge to succeed:
302 # this allows this merge to succeed:
294 #
303 #
295 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
296 # \ / merging rev3 and rev4 should use bar@rev2
305 # \ / merging rev3 and rev4 should use bar@rev2
297 # \- 2 --- 4 as the merge base
306 # \- 2 --- 4 as the merge base
298 #
307 #
299
308
300 cnode = manifest1.get(cfname)
309 cnode = manifest1.get(cfname)
301 newfparent = fparent2
310 newfparent = fparent2
302
311
303 if manifest2: # branch merge
312 if manifest2: # branch merge
304 if fparent2 == nullid or cnode is None: # copied on remote side
313 if fparent2 == nullid or cnode is None: # copied on remote side
305 if cfname in manifest2:
314 if cfname in manifest2:
306 cnode = manifest2[cfname]
315 cnode = manifest2[cfname]
307 newfparent = fparent1
316 newfparent = fparent1
308
317
309 # Here, we used to search backwards through history to try to find
318 # Here, we used to search backwards through history to try to find
310 # where the file copy came from if the source of a copy was not in
319 # where the file copy came from if the source of a copy was not in
311 # the parent directory. However, this doesn't actually make sense to
320 # the parent directory. However, this doesn't actually make sense to
312 # do (what does a copy from something not in your working copy even
321 # do (what does a copy from something not in your working copy even
313 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
314 # the user that copy information was dropped, so if they didn't
323 # the user that copy information was dropped, so if they didn't
315 # expect this outcome it can be fixed, but this is the correct
324 # expect this outcome it can be fixed, but this is the correct
316 # behavior in this circumstance.
325 # behavior in this circumstance.
317
326
318 if cnode:
327 if cnode:
319 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
328 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
320 if includecopymeta:
329 if includecopymeta:
321 meta[b"copy"] = cfname
330 meta[b"copy"] = cfname
322 meta[b"copyrev"] = hex(cnode)
331 meta[b"copyrev"] = hex(cnode)
323 fparent1, fparent2 = nullid, newfparent
332 fparent1, fparent2 = nullid, newfparent
324 else:
333 else:
325 repo.ui.warn(
334 repo.ui.warn(
326 _(
335 _(
327 b"warning: can't find ancestor for '%s' "
336 b"warning: can't find ancestor for '%s' "
328 b"copied from '%s'!\n"
337 b"copied from '%s'!\n"
329 )
338 )
330 % (fname, cfname)
339 % (fname, cfname)
331 )
340 )
332
341
333 elif fparent1 == nullid:
342 elif fparent1 == nullid:
334 fparent1, fparent2 = fparent2, nullid
343 fparent1, fparent2 = fparent2, nullid
335 elif fparent2 != nullid:
344 elif fparent2 != nullid:
336 # is one parent an ancestor of the other?
345 # is one parent an ancestor of the other?
337 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
346 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
338 if fparent1 in fparentancestors:
347 if fparent1 in fparentancestors:
339 fparent1, fparent2 = fparent2, nullid
348 fparent1, fparent2 = fparent2, nullid
340 elif fparent2 in fparentancestors:
349 elif fparent2 in fparentancestors:
341 fparent2 = nullid
350 fparent2 = nullid
342 elif not fparentancestors:
351 elif not fparentancestors:
343 # TODO: this whole if-else might be simplified much more
352 # TODO: this whole if-else might be simplified much more
344 if (
353 if (
345 ms.active()
354 ms.active()
346 and ms.extras(fname).get(b'filenode-source') == b'other'
355 and ms.extras(fname).get(b'filenode-source') == b'other'
347 ):
356 ):
348 fparent1, fparent2 = fparent2, nullid
357 fparent1, fparent2 = fparent2, nullid
349
358
350 force_new_node = False
359 force_new_node = False
351 # The file might have been deleted by merge code and user explicitly choose
360 # The file might have been deleted by merge code and user explicitly choose
352 # to revert the file and keep it. The other case can be where there is
361 # to revert the file and keep it. The other case can be where there is
353 # change-delete or delete-change conflict and user explicitly choose to keep
362 # change-delete or delete-change conflict and user explicitly choose to keep
354 # the file. The goal is to create a new filenode for users explicit choices
363 # the file. The goal is to create a new filenode for users explicit choices
355 if (
364 if (
356 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
365 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
357 and ms.active()
366 and ms.active()
358 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
367 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
359 ):
368 ):
360 force_new_node = True
369 force_new_node = True
361 # is the file changed?
370 # is the file changed?
362 text = fctx.data()
371 text = fctx.data()
363 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
372 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
364 if touched is None: # do not overwrite added
373 if touched is None: # do not overwrite added
365 if fparent2 == nullid:
374 if fparent2 == nullid:
366 touched = 'modified'
375 touched = 'modified'
367 else:
376 else:
368 touched = 'merged'
377 touched = 'merged'
369 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
378 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
370 # are just the flags changed during merge?
379 # are just the flags changed during merge?
371 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
380 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
372 touched = 'modified'
381 touched = 'modified'
373 fnode = fparent1
382 fnode = fparent1
374 else:
383 else:
375 fnode = fparent1
384 fnode = fparent1
376 return fnode, touched
385 return fnode, touched
377
386
378
387
379 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
388 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
380 """make a new manifest entry (or reuse a new one)
389 """make a new manifest entry (or reuse a new one)
381
390
382 given an initialised manifest context and precomputed list of
391 given an initialised manifest context and precomputed list of
383 - files: files affected by the commit
392 - files: files affected by the commit
384 - added: new entries in the manifest
393 - added: new entries in the manifest
385 - drop: entries present in parents but absent of this one
394 - drop: entries present in parents but absent of this one
386
395
387 Create a new manifest revision, reuse existing ones if possible.
396 Create a new manifest revision, reuse existing ones if possible.
388
397
389 Return the nodeid of the manifest revision.
398 Return the nodeid of the manifest revision.
390 """
399 """
391 repo = ctx.repo()
400 repo = ctx.repo()
392
401
393 md = None
402 md = None
394
403
395 # all this is cached, so it is find to get them all from the ctx.
404 # all this is cached, so it is find to get them all from the ctx.
396 p1 = ctx.p1()
405 p1 = ctx.p1()
397 p2 = ctx.p2()
406 p2 = ctx.p2()
398 m1ctx = p1.manifestctx()
407 m1ctx = p1.manifestctx()
399
408
400 m1 = m1ctx.read()
409 m1 = m1ctx.read()
401
410
402 if not files:
411 if not files:
403 # if no "files" actually changed in terms of the changelog,
412 # if no "files" actually changed in terms of the changelog,
404 # try hard to detect unmodified manifest entry so that the
413 # try hard to detect unmodified manifest entry so that the
405 # exact same commit can be reproduced later on convert.
414 # exact same commit can be reproduced later on convert.
406 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
415 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
407 if not files and md:
416 if not files and md:
408 repo.ui.debug(
417 repo.ui.debug(
409 b'not reusing manifest (no file change in '
418 b'not reusing manifest (no file change in '
410 b'changelog, but manifest differs)\n'
419 b'changelog, but manifest differs)\n'
411 )
420 )
412 if files or md:
421 if files or md:
413 repo.ui.note(_(b"committing manifest\n"))
422 repo.ui.note(_(b"committing manifest\n"))
414 # we're using narrowmatch here since it's already applied at
423 # we're using narrowmatch here since it's already applied at
415 # other stages (such as dirstate.walk), so we're already
424 # other stages (such as dirstate.walk), so we're already
416 # ignoring things outside of narrowspec in most cases. The
425 # ignoring things outside of narrowspec in most cases. The
417 # one case where we might have files outside the narrowspec
426 # one case where we might have files outside the narrowspec
418 # at this point is merges, and we already error out in the
427 # at this point is merges, and we already error out in the
419 # case where the merge has files outside of the narrowspec,
428 # case where the merge has files outside of the narrowspec,
420 # so this is safe.
429 # so this is safe.
421 mn = mctx.write(
430 mn = mctx.write(
422 tr,
431 tr,
423 linkrev,
432 linkrev,
424 p1.manifestnode(),
433 p1.manifestnode(),
425 p2.manifestnode(),
434 p2.manifestnode(),
426 added,
435 added,
427 drop,
436 drop,
428 match=repo.narrowmatch(),
437 match=repo.narrowmatch(),
429 )
438 )
430 else:
439 else:
431 repo.ui.debug(
440 repo.ui.debug(
432 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
441 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
433 )
442 )
434 mn = p1.manifestnode()
443 mn = p1.manifestnode()
435
444
436 return mn
445 return mn
437
446
438
447
439 def _extra_with_copies(repo, extra, files):
448 def _extra_with_copies(repo, extra, files):
440 """encode copy information into a `extra` dictionnary"""
449 """encode copy information into a `extra` dictionnary"""
441 p1copies = files.copied_from_p1
450 p1copies = files.copied_from_p1
442 p2copies = files.copied_from_p2
451 p2copies = files.copied_from_p2
443 filesadded = files.added
452 filesadded = files.added
444 filesremoved = files.removed
453 filesremoved = files.removed
445 files = sorted(files.touched)
454 files = sorted(files.touched)
446 if not _write_copy_meta(repo)[1]:
455 if not _write_copy_meta(repo)[1]:
447 # If writing only to changeset extras, use None to indicate that
456 # If writing only to changeset extras, use None to indicate that
448 # no entry should be written. If writing to both, write an empty
457 # no entry should be written. If writing to both, write an empty
449 # entry to prevent the reader from falling back to reading
458 # entry to prevent the reader from falling back to reading
450 # filelogs.
459 # filelogs.
451 p1copies = p1copies or None
460 p1copies = p1copies or None
452 p2copies = p2copies or None
461 p2copies = p2copies or None
453 filesadded = filesadded or None
462 filesadded = filesadded or None
454 filesremoved = filesremoved or None
463 filesremoved = filesremoved or None
455
464
456 extrasentries = p1copies, p2copies, filesadded, filesremoved
465 extrasentries = p1copies, p2copies, filesadded, filesremoved
457 if extra is None and any(x is not None for x in extrasentries):
466 if extra is None and any(x is not None for x in extrasentries):
458 extra = {}
467 extra = {}
459 if p1copies is not None:
468 if p1copies is not None:
460 p1copies = metadata.encodecopies(files, p1copies)
469 p1copies = metadata.encodecopies(files, p1copies)
461 extra[b'p1copies'] = p1copies
470 extra[b'p1copies'] = p1copies
462 if p2copies is not None:
471 if p2copies is not None:
463 p2copies = metadata.encodecopies(files, p2copies)
472 p2copies = metadata.encodecopies(files, p2copies)
464 extra[b'p2copies'] = p2copies
473 extra[b'p2copies'] = p2copies
465 if filesadded is not None:
474 if filesadded is not None:
466 filesadded = metadata.encodefileindices(files, filesadded)
475 filesadded = metadata.encodefileindices(files, filesadded)
467 extra[b'filesadded'] = filesadded
476 extra[b'filesadded'] = filesadded
468 if filesremoved is not None:
477 if filesremoved is not None:
469 filesremoved = metadata.encodefileindices(files, filesremoved)
478 filesremoved = metadata.encodefileindices(files, filesremoved)
470 extra[b'filesremoved'] = filesremoved
479 extra[b'filesremoved'] = filesremoved
471 return extra
480 return extra
General Comments 0
You need to be logged in to leave comments. Login now